filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_0_4199
#!/usr/bin/env python # -*- coding: utf-8 -*- import featuretools as ft import pandas as pd import pytest from numpy import nan from cardea.data_loader import EntitySetLoader from cardea.problem_definition import MissedAppointment @pytest.fixture() def missed_appointment(): return MissedAppointment() @pytest.fixture() def es_loader(): return EntitySetLoader() @pytest.fixture() def cutoff_times(): return pd.DataFrame( {"instance_id": [10, 11, 12], "time": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018], "label": ['noshow', 'noshow', 'fulfilled'] }) @pytest.fixture() def objects(es_loader): appointment_df = pd.DataFrame({"identifier": [10, 11, 12], "status": ['noshow', 'noshow', 'fulfilled'], "start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018], "participant": [120, 121, 122], "created": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018]}) participant_df = pd.DataFrame({"object_id": [120, 121, 122], "actor": [0, 1, 2]}) patient_df = pd.DataFrame({"identifier": [0, 1, 2], "gender": ['female', 'female', 'male'], "birthDate": ['10/21/2000', '7/2/2000', '1/10/2000'], "active": ['True', 'True', 'nan']}) appointment = es_loader.create_object(appointment_df, 'Appointment') participant = es_loader.create_object(participant_df, 'Appointment_Participant') patient = es_loader.create_object(patient_df, 'Patient') return [appointment, participant, patient] @pytest.fixture() def es_success(objects, es_loader): es = ft.EntitySet(id="test") identifiers = es_loader.get_object_ids(objects) fhir_dict = es_loader.get_dataframes(objects) es_loader.create_entity(fhir_dict, identifiers, entity_set=es) relationships = es_loader.get_relationships(objects, list(fhir_dict.keys())) es_loader.create_relationships(relationships, entity_set=es) return es @pytest.fixture() def object_error_missing_label(es_loader): appointment_df = pd.DataFrame({"identifier": [10, 11, 12], "start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018], "participant": [120, 121, 122], "created": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018]}) appointment = es_loader.create_object(appointment_df, 'Appointment') return appointment @pytest.fixture() def objects_error_missing_cutoff_label(es_loader): appointment_df = pd.DataFrame({"identifier": [10, 11, 12], "start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018], "status": ['noshow', 'noshow', 'fulfilled'], "participant": [120, 121, 122]}) appointment = es_loader.create_object(appointment_df, 'Appointment') return appointment @pytest.fixture() def entityset_error_missing_label(objects, object_error_missing_label, es_loader): es = ft.EntitySet(id="test") objects.extend([object_error_missing_label]) identifiers = es_loader.get_object_ids(objects) fhir_dict = es_loader.get_dataframes(objects) es_loader.create_entity(fhir_dict, identifiers, entity_set=es) relationships = es_loader.get_relationships(objects, list(fhir_dict.keys())) es_loader.create_relationships(relationships, entity_set=es) return es @pytest.fixture() def entityset_error_missing_cutoff_label(objects, objects_error_missing_cutoff_label, es_loader): es = ft.EntitySet(id="test") for object in objects: es_loader.create_entity(object, entity_set=es) for object in objects: es_loader.create_relationships(object, entity_set=es) es_loader.create_entity(objects_error_missing_cutoff_label, entity_set=es) es_loader.create_relationships(objects_error_missing_cutoff_label, entity_set=es) return es def test_generate_cutoff_times_success( es_success, missed_appointment, cutoff_times): _, _, generated_df = missed_appointment.generate_cutoff_times(es_success) generated_df.index = cutoff_times.index # both should have the same index generated_df = generated_df[cutoff_times.columns] # same columns order assert generated_df.equals(cutoff_times) def test_generate_cutoff_times_error( entityset_error_missing_label, missed_appointment): with pytest.raises(ValueError): missed_appointment.generate_cutoff_times( entityset_error_missing_label) def test_generate_cutoff_times_error_value(es_success, missed_appointment): es_success['Appointment'].df.loc[len(es_success['Appointment'].df)] = [ nan, nan, nan, nan, nan] with pytest.raises(ValueError): missed_appointment.generate_cutoff_times( es_success) def test_generate_cutoff_times_missing_cutoff_time( es_success, missed_appointment): es_success['Appointment'].delete_variables(['created']) with pytest.raises(ValueError): missed_appointment.generate_cutoff_times( es_success)
the-stack_0_4201
from django.shortcuts import render from vdw.raw.sources.models import Source def sources(request): sources = Source.objects.filter(published=True, archived=False)\ .select_related('stats') return render(request, 'sources/sources.html', { 'sources': sources, })
the-stack_0_4202
from setuptools import find_packages, setup with open('README.md', 'r') as fh: long_description = fh.read() setup( name='backoid', description='backoid', version="0.0.1", long_description=long_description, long_description_content_type="text/markdown", packages=find_packages("src"), package_dir={"": "src"}, install_requires=[ 'pymysql>=0.9.3', 'azure-storage-blob', 'pyyaml' ], classifiers=[ "Natural Language :: English", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.7", ], entry_points={ 'console_scripts': [ 'backoid = backoid.cli:main' ] } )
the-stack_0_4204
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import os from pants.base.file_system_project_tree import FileSystemProjectTree from pants_test.pants_run_integration_test import PantsRunIntegrationTest class FilemapIntegrationTest(PantsRunIntegrationTest): PATH_PREFIX = 'testprojects/tests/python/pants/file_sets/' TEST_EXCLUDE_FILES = { 'a.py', 'aa.py', 'aaa.py', 'ab.py', 'aabb.py', 'test_a.py', 'dir1/a.py', 'dir1/aa.py', 'dir1/aaa.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/a.py', 'dir1/dirdir1/aa.py', 'dir1/dirdir1/ab.py' } def setUp(self): super(FilemapIntegrationTest, self).setUp() project_tree = FileSystemProjectTree(os.path.abspath(self.PATH_PREFIX), ['BUILD', '.*']) scan_set = set() def should_ignore(file): return file.endswith('.pyc') for root, dirs, files in project_tree.walk(''): scan_set.update({os.path.join(root, f) for f in files if not should_ignore(f)}) self.assertEqual(scan_set, self.TEST_EXCLUDE_FILES) def _mk_target(self, test_name): return '{}:{}'.format(self.PATH_PREFIX, test_name) def _extract_exclude_output(self, test_name): stdout_data = self.do_command('filemap', self._mk_target(test_name), success=True).stdout_data return {s.split(' ')[0].replace(self.PATH_PREFIX, '') for s in stdout_data.split('\n') if s.startswith(self.PATH_PREFIX)} def test_testprojects(self): self.do_command('filemap', 'testprojects::', success=True) def test_python_sources(self): run = self.do_command('filemap', 'testprojects/src/python/sources', success=True) self.assertIn('testprojects/src/python/sources/sources.py', run.stdout_data) def test_exclude_invalid_string(self): build_path = os.path.join(self.PATH_PREFIX, 'BUILD.invalid') build_content = '''python_library(name='exclude_strings_disallowed', sources=rglobs('*.py', exclude='aa.py'))''' with self.temporary_file_content(build_path, build_content): pants_run = self.do_command('filemap', self._mk_target('exclude_strings_disallowed'), success=False) self.assertRegexpMatches(pants_run.stderr_data, r'Excludes of type `.*` are not supported') def test_exclude_list_of_strings(self): test_out = self._extract_exclude_output('exclude_list_of_strings') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'dir1/aaa.py'}, test_out) def test_exclude_globs(self): test_out = self._extract_exclude_output('exclude_globs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aabb.py', 'dir1/dirdir1/aa.py'}, test_out) def test_exclude_strings(self): test_out = self._extract_exclude_output('exclude_strings') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aa.py', 'ab.py'}, test_out) def test_exclude_set(self): test_out = self._extract_exclude_output('exclude_set') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'a.py'}, test_out) def test_exclude_rglobs(self): test_out = self._extract_exclude_output('exclude_rglobs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'}, test_out) def test_exclude_zglobs(self): test_out = self._extract_exclude_output('exclude_zglobs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'}, test_out) def test_exclude_composite(self): test_out = self._extract_exclude_output('exclude_composite') self.assertEqual(self.TEST_EXCLUDE_FILES - {'a.py', 'aaa.py', 'dir1/a.py', 'dir1/dirdir1/a.py'}, test_out) def test_implicit_sources(self): test_out = self._extract_exclude_output('implicit_sources') self.assertEqual({'a.py', 'aa.py', 'aaa.py', 'aabb.py', 'ab.py'}, test_out) test_out = self._extract_exclude_output('test_with_implicit_sources') self.assertEqual({'test_a.py'}, test_out)
the-stack_0_4207
import json import zipfile import os import sys import pytest from click.testing import CliRunner import mock from chalice import cli from chalice.cli import factory from chalice.config import Config from chalice.utils import record_deployed_values from chalice import local from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME @pytest.fixture def runner(): return CliRunner() @pytest.fixture def mock_cli_factory(): cli_factory = mock.Mock(spec=factory.CLIFactory) cli_factory.create_config_obj.return_value = Config.create(project_dir='.') cli_factory.create_botocore_session.return_value = mock.sentinel.Session return cli_factory def assert_chalice_app_structure_created(dirname): app_contents = os.listdir(os.path.join(os.getcwd(), dirname)) assert 'app.py' in app_contents assert 'requirements.txt' in app_contents assert '.chalice' in app_contents assert '.gitignore' in app_contents def _run_cli_command(runner, function, args, cli_factory=None): # Handles passing in 'obj' so we can get commands # that use @pass_context to work properly. # click doesn't support this natively so we have to duplicate # what 'def cli(...)' is doing. if cli_factory is None: cli_factory = factory.CLIFactory('.') result = runner.invoke( function, args, obj={'project_dir': '.', 'debug': False, 'factory': cli_factory}) return result def test_create_new_project_creates_app(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 # The 'new-project' command creates a directory based on # the project name assert os.listdir(os.getcwd()) == ['testproject'] assert_chalice_app_structure_created(dirname='testproject') def test_create_project_with_prompted_app_name(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, input='testproject') assert result.exit_code == 0 assert os.listdir(os.getcwd()) == ['testproject'] assert_chalice_app_structure_created(dirname='testproject') def test_error_raised_if_dir_already_exists(runner): with runner.isolated_filesystem(): os.mkdir('testproject') result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 1 assert 'Directory already exists: testproject' in result.output def test_can_load_project_config_after_project_creation(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 config = factory.CLIFactory('testproject').load_project_config() assert config == { 'version': '2.0', 'app_name': 'testproject', 'stages': { 'dev': {'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME}, } } def test_default_new_project_adds_index_route(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 app = factory.CLIFactory('testproject').load_chalice_app() assert '/' in app.routes def test_gen_policy_command_creates_policy(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = runner.invoke(cli.cli, ['gen-policy'], obj={}) assert result.exit_code == 0 # The output should be valid JSON. parsed_policy = json.loads(result.output) # We don't want to validate the specific parts of the policy # (that's tested elsewhere), but we'll check to make sure # it looks like a policy document. assert 'Version' in parsed_policy assert 'Statement' in parsed_policy def test_can_package_command(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.package, ['outdir']) assert result.exit_code == 0, result.output assert os.path.isdir('outdir') dir_contents = os.listdir('outdir') assert 'sam.json' in dir_contents assert 'deployment.zip' in dir_contents def test_can_package_with_single_file(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.package, ['--single-file', 'package.zip']) assert result.exit_code == 0, result.output assert os.path.isfile('package.zip') with zipfile.ZipFile('package.zip', 'r') as f: assert sorted(f.namelist()) == ['deployment.zip', 'sam.json'] def test_does_deploy_with_default_api_gateway_stage_name(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # This isn't perfect as we're assuming we know how to # create the config_obj like the deploy() command does, # it should give us more confidence that the api gateway # stage defaults are still working. cli_factory = factory.CLIFactory('.') config = cli_factory.create_config_obj( chalice_stage_name='dev', autogen_policy=None, api_gateway_stage=None ) assert config.api_gateway_stage == DEFAULT_APIGATEWAY_STAGE_NAME def test_can_specify_api_gateway_stage(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--api-gateway-stage', 'notdev'], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_config_obj.assert_called_with( autogen_policy=None, chalice_stage_name='dev', api_gateway_stage='notdev' ) def test_can_deploy_specify_connection_timeout(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--connection-timeout', 100], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_botocore_session.assert_called_with( connection_timeout=100 ) def test_can_retrieve_url(runner, mock_cli_factory): deployed_values_dev = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://dev-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } deployed_values_prod = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://prod-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') deployed_dir = os.path.join('.chalice', 'deployed') os.makedirs(deployed_dir) record_deployed_values( deployed_values_dev, os.path.join(deployed_dir, 'dev.json') ) record_deployed_values( deployed_values_prod, os.path.join(deployed_dir, 'prod.json') ) result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert result.output == 'https://dev-url/\n' prod_result = _run_cli_command(runner, cli.url, ['--stage', 'prod'], cli_factory=mock_cli_factory) assert prod_result.exit_code == 0 assert prod_result.output == 'https://prod-url/\n' def test_error_when_no_deployed_record(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 2 assert 'not find' in result.output @pytest.mark.skipif(sys.version_info[0] == 3, reason=('Python Version 3 cannot create pipelines due to ' 'CodeBuild not having a Python 3.6 image. This ' 'mark can be removed when that image exists.')) def test_can_generate_pipeline_for_all(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. assert "AWSTemplateFormatVersion" in template assert "Outputs" in template def test_no_errors_if_override_codebuild_image(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. image = template['Parameters']['CodeBuildImage']['Default'] assert image == 'python:3.6.1' def test_can_configure_github(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # The -i option is provided so we don't have to skip this # test on python3.6 result = _run_cli_command( runner, cli.generate_pipeline, ['--source', 'github', '-i' 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The template is already tested in the unit tests # for template generation. We just want a basic # sanity check to make sure things are mapped # properly. assert 'GithubOwner' in template['Parameters'] assert 'GithubRepoName' in template['Parameters'] def test_can_extract_buildspec_yaml(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['--buildspec-file', 'buildspec.yml', '-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('buildspec.yml') with open('buildspec.yml') as f: data = f.read() # The contents of this file are tested elsewhere, # we just want a basic sanity check here. assert 'chalice package' in data def test_env_vars_set_in_local(runner, mock_cli_factory, monkeypatch): local_server = mock.Mock(spec=local.LocalDevServer) mock_cli_factory.create_local_server.return_value = local_server mock_cli_factory.create_config_obj.return_value = Config.create( project_dir='.', environment_variables={'foo': 'bar'}) actual_env = {} monkeypatch.setattr(os, 'environ', actual_env) with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') _run_cli_command(runner, cli.local, [], cli_factory=mock_cli_factory) assert actual_env['foo'] == 'bar' def test_can_specify_profile_for_logs(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.logs, ['--profile', 'my-profile'], cli_factory=mock_cli_factory ) assert result.exit_code == 0 assert mock_cli_factory.profile == 'my-profile'
the-stack_0_4208
import os import pandas as pd def read_synchronisation_file(experiment_root): filepath = os.path.join(experiment_root, "labels", "synchronisation.csv") return pd.read_csv(filepath) def convert_timestamps(experiment_root, timestamps, from_reference, to_reference): """ Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time) Parameters ---------- experiment_root: str Root of the current experiment (to find the right synchronisation matrix) timestamps: float or array like timestamps to be converted from_reference: str name of the reference of the original timestamps to_reference: str name of the reference time the timestamp has to be converted to Returns ------- converted_timestamps: float or array like Timestamps given in to_reference time values """ synchronisation_file = read_synchronisation_file(experiment_root) offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0] converted_timestamps = timestamps + offset return converted_timestamps if __name__ == '__main__': exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8" print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video"))
the-stack_0_4209
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. import unittest from pyiron_base.job.template import PythonTemplateJob from pyiron_base._tests import TestWithProject class ToyJob(PythonTemplateJob): def __init__(self, project, job_name): super(ToyJob, self).__init__(project, job_name) self.input['input_energy'] = 100 def run_static(self): with self.project_hdf5.open("output/generic") as h5out: h5out["energy_tot"] = self.input["input_energy"] self.status.finished = True class TestProjectData(TestWithProject): @classmethod def setUpClass(cls): super().setUpClass() for i, c in enumerate("abcd"): j = cls.project.create_job(ToyJob, f"test_{c}") j.input['input_energy'] = i j.run() def setUp(self): self.table = self.project.create.table('test_table') self.table.filter_function = lambda j: j.name in ["test_a", "test_b"] self.table.add['name'] = lambda j: j.name self.table.run() def tearDown(self): self.project.remove_job(self.table.name) def test_filter(self): """Filter functions should restrict jobs included in the table.""" df = self.table.get_dataframe() self.assertEqual(2, len(df), "Table not correctly filtered.") self.assertEqual(["test_a", "test_b"], df.name.to_list(), "Table not correctly filtered.") def test_filter_reload(self): """Lambdas should work as filter functions even if read from HDF5.""" try: table_loaded = self.project.load(self.table.name) except: self.fail("Error on reloading table with filter lambda.") if __name__ == '__main__': unittest.main()
the-stack_0_4210
""" Calculations that deal with seismic moment tensors. Notes from Lay and Wallace Chapter 8: * Decomposition 1: Mij = isotropic + deviatoric * Decomposition 2: Mij = isotropic + 3 vector dipoles * Decomposition 3: Mij = isotropic + 3 double couples * Decomposition 4: Mij = isotropic + 3 CLVDs * Decomposition 5: Mij = isotropic + major DC + minor DC * Decomposition 6: Mij = isotropic + DC + CLVD The most useful in practice are Decomposition 1 and Decomposition 6. """ import numpy as np def get_MT(mrr, mtt, mpp, mrt, mrp, mtp): """Build a matrix from the six components of the moment tensor""" MT = np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]]); return MT; def diagonalize_MT(MT): """Return a diagonal matrix whose elements are the ordered eigenvalues.""" eigvals, eigvecs = np.linalg.eig(MT); eigvals = sorted(eigvals)[::-1]; return np.diag(eigvals); def get_deviatoric_MT(MT): """Get deviatoric MT (returns a matrix)""" iso_MT = get_iso_MT(MT); M_dev = np.subtract(MT, iso_MT); return M_dev; def get_iso_MT(MT): """Return the isotropic moment tensor (returns a matrix)""" x = (1 / 3) * np.trace(MT); iso_MT = np.multiply(np.eye(3), x); return iso_MT def get_clvd_dc_from_deviatoric_MT(MT): """ Return the dc and clvd components of a deviatoric MT, from Shearer Equation 9.14. Returns two matricies. """ eigenvalues = np.diag(MT); assert(eigenvalues[0] > eigenvalues[1] > eigenvalues[2]), ValueError("Deviatoric eigenvalues out of order.") dc_component = (1/2)*(eigenvalues[0]-eigenvalues[2]); clvd_component = eigenvalues[1]*(1/2); M_dc = np.diag([dc_component, 0, -dc_component]); M_clvd = np.diag([-clvd_component, 2*clvd_component, -clvd_component]); return M_clvd, M_dc; def decompose_iso_dc_clvd(MT): """ A useful function to decompose a full moment tensor into an isotropic part, a double-couple, and a CLVD component. Returns three matrices. """ diag_MT = diagonalize_MT(MT); # equivalent to a coordinate transformation M_iso = get_iso_MT(diag_MT); # get the trace M_dev = get_deviatoric_MT(diag_MT); M_dev = diagonalize_MT(M_dev); # diagonalized in the proper order M_clvd, M_dc = get_clvd_dc_from_deviatoric_MT(M_dev); return M_iso, M_clvd, M_dc; # def get_separate_scalar_moments(MT): # """return isotropic, clvd, and double couple moments. Not frequently used.""" # M_iso, M_clvd, M_dc = decompose_iso_dc_clvd(MT); # iso_moment = abs(M_iso[0][0]); # clvd_moment = abs(M_clvd[0][0]); # dc_moment = abs(M_dc[0][0]); # return iso_moment, clvd_moment, dc_moment; def get_total_scalar_moment(MT): """Shearer Equation 9.8: quadratic sum of element of moment tensor components, in newton-meters""" MT = np.divide(MT, 1e16); # done to prevent computer buffer overflow total = 0; for i in range(3): for j in range(3): total = total + MT[i][j]*MT[i][j]; Mo = (1/np.sqrt(2)) * np.sqrt(total); Mo = np.multiply(Mo, 1e16); return Mo; def get_percent_double_couple(MT): """Get the percent double couple and percent clvd moment from a deviatoric moment tensor. When isotropic term is involved, this can get more complicated and there are several approaches. See Shearer equation 9.17 for epsilon. See Vavrycuk, 2001 for other approaches when isotropic component is involved. """ m_dev = diagonalize_MT(get_deviatoric_MT(MT)); epsilon = np.diag(m_dev)[1] / np.max([np.abs(np.diag(m_dev)[0]), np.abs(np.diag(m_dev)[2])]); fraction = epsilon * 2; perc_clvd = 100 * (abs(fraction)); perc_dc = 100 - perc_clvd; return perc_dc, perc_clvd;
the-stack_0_4211
# Copyright 2019 ZTE corporation. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from typing import Any, Mapping, NamedTuple, Optional, Sequence from itertools import zip_longest from . import utilities from .models.data_format import DataFormat def get_tensor_by_fuzzy_name(graph, name): if ':' in name: tensor = graph.get_tensor_by_name(name) else: tensor = graph.get_operation_by_name(name).outputs[0] return tensor class Config(NamedTuple): input_names: Optional[Sequence[str]] data_formats: Sequence[Optional[DataFormat]] output_names: Optional[Sequence[str]] @staticmethod def from_json(value: Mapping[str, Any]) -> 'Config': return Config(input_names=value.get('input_names'), data_formats=utilities.get_data_formats(value.get('input_formats')), output_names=value.get('output_names')) @staticmethod def from_env(env: Mapping[str, str]) -> 'Config': return Config(input_names=utilities.split_by(env.get('INPUT_NAMES'), ','), data_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS'), ',')), output_names=utilities.split_by(env.get('OUTPUT_NAMES'), ',')) def get_input_tensors_from_graph(self, graph): if self.input_names is None: input_tensors = [operation.outputs[0] for operation in graph.get_operations() if operation.type == 'Placeholder'] else: input_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.input_names] return input_tensors def get_output_tensors_from_graph(self, graph): if self.output_names is None: output_tensors = [output_tensor for operation in graph.get_operations() if operation.type not in ['Assign', 'Const', 'Identity', 'IsVariableInitialized', 'NoOp', 'Placeholder', 'SaveV2', 'VarIsInitializedOp'] for output_tensor in operation.outputs if not output_tensor.consumers()] else: output_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.output_names] return output_tensors def get_inputs(graph, config): return zip_longest(config.get_input_tensors_from_graph(graph), config.data_formats)
the-stack_0_4212
#!/usr/bin/env python3 # Copyright 2021 The Pigweed Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Command line interface for mcuxpresso_builder.""" import argparse import pathlib import sys from pw_build_mcuxpresso import components def _parse_args() -> argparse.Namespace: """Setup argparse and parse command line args.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='command', metavar='<command>', required=True) project_parser = subparsers.add_parser( 'project', help='output components of an MCUXpresso project') project_parser.add_argument('manifest_filename', type=pathlib.Path) project_parser.add_argument('--include', type=str, action='append') project_parser.add_argument('--exclude', type=str, action='append') project_parser.add_argument('--prefix', dest='path_prefix', type=str) return parser.parse_args() def main(): """Main command line function.""" args = _parse_args() if args.command == 'project': components.project(args.manifest_filename, include=args.include, exclude=args.exclude, path_prefix=args.path_prefix) sys.exit(0) if __name__ == '__main__': main()
the-stack_0_4214
import cv2 import os import scipy as scp import scipy.misc import matplotlib from sklearn.cluster import KMeans import numpy as np import evaluationClass_tools as evTools import random from sklearn import svm from sklearn import preprocessing import pickle import triangle_detection as triang def oneClass(image_seg): rows, cols = image_seg.shape[:2] color = [0, 0, 0] for i in range(rows): for j in range(cols): if (image_seg[i][j][0] == 0 and image_seg[i][j][1] == 0 and image_seg[i][j][2] == 0): continue else: if (color[0] == 0 and color[1] == 0 and color[2] == [0]): color[0] = image_seg[i][j][0] color[1] = image_seg[i][j][1] color[2] = image_seg[i][j][2] continue if (image_seg[i][j][0] != color[0] or image_seg[i][j][1] != color[1] and image_seg[i][j][2] != color[2]): return False return True, color # def triangStats(img, singleColor = True): # imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, imbw = cv2.threshold(imggray, 10, 255, 0) # _, contours, _ = cv2.findContours(imbw, 1, 2) # maxArea = 0; # Ax = Ay = Bx = By = Cx = Cy = 0 # areaCnt = 0 # maxCnt = None # idx = -1 # for cnt in contours: # idx += 1 # retval, triangle = cv2.minEnclosingTriangle(cnt) # if (triangle is None): # continue # areaCnt = cv2.contourArea(cnt) # if (areaCnt <= maxArea): # continue # maxArea = areaCnt # maxCnt = idx # Ax = triangle[0][0][0] # Ay = triangle[0][0][1] # Bx = triangle[1][0][0] # By = triangle[1][0][1] # Cx = triangle[2][0][0] # Cy = triangle[2][0][1] # if (maxArea < 0.1 * imggray.shape[0] * imggray.shape[1]): # return False, None, None, None # v1x = 0 # v1y = 0 # v2x = 0 # v2y = 0 # v3x = 0 # v3y = 0 # imgCnt = np.zeros((img.shape[0], img.shape[1], 3), np.uint8) # mask = np.zeros((img.shape[0], img.shape[1], 3), np.uint8) # cv2.drawContours(mask, contours, maxCnt, color=(255, 255, 255), thickness=cv2.FILLED) # color = [0, 0, 0] # contActivePixels = 0 # valret = True # for i in range(mask.shape[0]): # for j in range(mask.shape[1]): # if (mask[i, j, 0] == 255 and mask[i, j, 1] == 255 and mask[i, j, 2] == 255): # if(img[i, j, 0] != 0 or img[i, j, 1] != 0 or img[i, j, 2] != 0): # contActivePixels+=1 # if (color[0] == 0 and color[1] == 0 and color[2] == 0): # color[0] = int(img[i][j][0]) # color[1] = int(img[i][j][1]) # color[2] = int(img[i][j][2]) # else: # if (img[i][j][0] != color[0] or img[i][j][1] != color[1] or img[i][j][2] != color[2]): # valret = False # if(singleColor == True and valret == False): # return False, None, None, None # cv2.drawContours(imgCnt, contours, maxCnt, color=color, thickness=cv2.FILLED) # if (Cy < By and Cy < Ay): # v1y = Cy # v1x = Cx # if (Ax < Bx): # v2x = Ax # v2y = Ay # v3x = Bx # v3y = By # else: # v2x = Bx # v2y = By # v3x = Ax # v3y = Ay # elif (By < Cy and By < Ay): # v1y = By # v1x = Bx # if (Ax < Cx): # v2x = Ax # v2y = Ay # v3x = Cx # v3y = Cy # else: # v2x = Cx # v2y = Cy # v3x = Ax # v3y = Ay # else: # v1y = Ay # v1x = Ax # if (Bx < Cx): # v2x = Bx # v2y = By # v3x = Cx # v3y = Cy # else: # v2x = Cx # v2y = Cy # v3x = Bx # v3y = By # # (x,y),radius = cv2.minEnclosingCircle(cnt) # triangleArea = abs((v2x * (v1y - v3y) + v1x * (v3y - v2y) + v3x * (v2y - v1y)) / 2) # # print(f"({v1x},{v1y}) ({v2x},{v2y}) ({v3x},{v3y}) {maxArea} {triangleArea}") # # a=input('pare') # # center = (int(x),int(y)) # # radius = int(radius) # # cv2.circle(img,center,radius,(255,255,0),2) # #desc = [maxArea / triangleArea, 0 if v3y - v1y == 0 else (v2y - v1y) / (v3y - v1y), # #1 if v1x - v2x > 0 and v3x - v1x > 0 else 0, np.rad2deg(np.arctan( abs(v3y-v2y) / (v3x - v2x)))] # desc = [contActivePixels/triangleArea, np.rad2deg(np.arctan(abs(v3y - v2y) / (v3x - v2x))), 1 if v1x - v2x > 0 and v3x - v1x > 0 else 0 ] # return True, np.array([desc]), imgCnt, color def applySmv(desc, svmModel): return svmModel.predict(desc) def sortImgByNumberOfActivePixels(elem): return elem[1] def sortImgByFilledTriangPerc(elem): return elem[1] def allPxDominantStreet(image_seg, avgPavedPx, avgRockPx, avgNonPavedPx, th): rows,cols = image_seg.shape[:2] endLoop = 0 validNonZeroPx = 0 for i in range(rows): for j in range(cols): if (image_seg[i][j][0] == 0 and image_seg[i][j][1] == 0 and image_seg[i][j][2] == 0): continue if avgPavedPx >= th: if (image_seg[i][j][0] != 0 or image_seg[i][j][1] != 0 or image_seg[i][j][2] != 255): return False else: validNonZeroPx = validNonZeroPx + 1 if avgRockPx >= th: if (image_seg[i][j][0] != 255 or image_seg[i][j][1] != 0 or image_seg[i][j][2] != 0): return False else: validNonZeroPx = validNonZeroPx + 1 if avgNonPavedPx >= th: if (image_seg[i][j][0] != 0 or (image_seg[i][j][1] != 255 and image_seg[i][j][1] != 100) or image_seg[i][j][2] != 0): return False else: validNonZeroPx = validNonZeroPx + 1 if validNonZeroPx == 0: return False, 0 else: return True, validNonZeroPx file_smv_model = 'svm_model3.sav' svm_model = pickle.load(open(file_smv_model, 'rb')) baseGTtrainfile = 'gt_image_4_balanced_train.txt' baseGTvalfile = 'gt_image_4_balanced_val.txt' file_scaler = 'scaler3.sav' scaler = pickle.load(open(file_scaler, 'rb')) classes = evTools.ClassesDef.PAVED_NONPAVED_ROCK basedirretrain='retrain_SVM_balanced_novo_1' dir_test = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\data\\dataset_Olinda_varHeading_fov90\\teste2\\'; dir_segmented = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\RUNS\\'+basedirretrain+'\\results\\'; path_retraindataset = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\data\\data_road\\training\\' dir_retraindataset = 'image_4retrain' dir_gt_retraindataset = 'retrain_SVM_balanced_novo' dirpath = 'RUNS\\'+basedirretrain txt_retrain_name = 'retrain_SVM_balanced_novo_2_train.txt' txt_val_retrain_name = 'retrain_SVM_balanced_novo_2_val.txt' txtBestResults = 'retrain_SVM_balanced_novo_updatedResults.txt'; firstRetrain = False try: fileBestResults = open(txtBestResults, 'r') except IOError: firstRetrain = True fileBestResults = open(txtBestResults, 'w') fileBestResults.close() fileBestResults = open(txtBestResults, 'r') fileBestResults.close() newBestResults = [] resFile = open(os.path.join(dirpath,'results.txt'),'r', encoding="utf8") line = resFile.readline() labelGT = '?' count = 0 streetsPaved = [] streetsRock = [] streetsNP = [] while line: streetname = line.replace('\t',' ') streetname = streetname.split(' [')[0] print('PROCESSING STREET: '+streetname) th = 0.99 fileBestResults = open(txtBestResults, 'r') lineBestResult = fileBestResults.readline() newLineBestResult = line bestResultUpdated = False while lineBestResult: streetnameBestResult = lineBestResult.replace('\t',' ') streetnameBestResult = streetnameBestResult.split(' [')[0] if streetname == streetnameBestResult: break lineBestResult = fileBestResults.readline() fileBestResults.close() # label, npav, nrock, nnonp, avgPavedPx, avgRockPx, avgNonPavedPx = evTools.getNumberOfImagesFromClass(line,classes,0) # labelBR, npavBR, nrockBR, nnonpBR, avgPavedPxBR, avgRockPxBR, avgNonPavedPxBR = evTools.getNumberOfImagesFromClass(lineBestResult,classes,0) # print(lineBestResult) # if (avgPavedPxBR >= th and avgPavedPxBR > avgPavedPx) or (avgRockPxBR >= th and avgRockPxBR > avgRockPx) or (avgNonPavedPxBR >= th and avgNonPavedPxBR >= avgNonPavedPx): # newLineBestResult = lineBestResult # if avgNonPavedPx < th and avgRockPx < th and avgPavedPx < th and avgNonPavedPxBR < th and avgRockPxBR < th and avgPavedPxBR < th: # line = resFile.readline() # newBestResults.append(newLineBestResult) # continue streetpath_test = os.path.join(dir_test,streetname) streetpath_seg = os.path.join(dir_segmented,streetname) for filename in os.listdir(streetpath_test): filename_seg = filename.replace('.png','_raw.png'); # print(filename_seg) dirToSaveResult = path_retraindataset+dir_gt_retraindataset+'\\'+streetname+'\\'; # currentImgForegPix = 0 # BRimgForegPix = 0 # try: # current_image_seg = scp.misc.imread(streetpath_seg+'\\'+filename_seg,mode='') # if avgNonPavedPx < th and avgRockPx < th and avgPavedPx < th: # currentimgOK = False # print('currentimgok = -false') # else: # currentimgOK, currentImgForegPix = allPxDominantStreet(current_image_seg, avgPavedPx, avgRockPx, avgNonPavedPx, th) # #currentimgOK = currentimgOK and evTools.good_res_image(current_image_seg) # print('currentimgok = '+str(currentimgOK)) # except Exception as e: # print('exc curr: '+ str(e)) # print('error to read curr img: '+os.path.join(streetpath_seg,filename_seg)) # currentimgOK = False # try: # BR_image_seg = scp.misc.imread(dirToSaveResult+filename_seg,mode='') # if avgNonPavedPxBR < th and avgRockPxBR < th and avgPavedPxBR < th: # BRimgOK = False # print('brimgok = -false') # else: # BRimgOK, BRimgForegPix = allPxDominantStreet(BR_image_seg, avgPavedPxBR, avgRockPxBR, avgNonPavedPxBR, th) # #BRimgOK = BRimgOK and evTools.good_res_image(BR_image_seg) # print('brimgok = '+str(BRimgOK)) # except Exception as e: # print('Exc: '+ str(e)) # print('error to read BR: '+os.path.join(dirToSaveResult+filename_seg)) # BRimgOK = False # if currentimgOK == False and BRimgOK == False: # print('continue') # continue try: current_image_seg = scp.misc.imread(streetpath_seg+'\\'+filename_seg,mode='') currentimgOK = True except Exception as e: currentimgOK = False try: BR_image_seg = scp.misc.imread(dirToSaveResult+filename_seg,mode='') BRimgOK = True except Exception as e: BRimgOK = False perfCurrentImage = 0 if currentimgOK: oneClassContour,descCurrImg, _, curImgTriang, colorCurrImg = triang.triangStats(current_image_seg) if(oneClassContour): goodCurImg = applySmv(scaler.transform(descCurrImg),svm_model) if(goodCurImg==1): perfCurrentImage = descCurrImg[0][0] perfBRImage = 0 if BRimgOK: oneClassContour, descBRImg,_, brImgTriang, colorBRImg = triang.triangStats(BR_image_seg) #goodBRImage = applySmv(scaler.transform(descBRImg),svm_model) #if(goodBRImage==1): perfBRImage = descBRImg[0][0] #if avgPavedPx >= th or avgPavedPxBR >= th: if perfCurrentImage > perfBRImage: if not os.path.exists(dirToSaveResult): os.makedirs(dirToSaveResult) print('image updated') print('path saved to file:') print('training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n') arr = ['training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n',perfCurrentImage] if (colorCurrImg[0] == 255 and colorCurrImg[1] == 0 and colorCurrImg[2] == 0): streetsPaved.append(arr) elif colorCurrImg[0] == 0 and colorCurrImg[1] == 255 and colorCurrImg[2] == 0: streetsNP.append(arr) else: streetsRock.append(arr) scp.misc.imsave(dirToSaveResult+filename_seg, curImgTriang) elif BRimgOK: arr = ['training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n',perfBRImage] if (colorBRImg[0] == 255 and colorBRImg[1] == 0 and colorBRImg[2] == 0): streetsPaved.append(arr) elif (colorBRImg[0] == 0 and colorBRImg[1] == 255 and colorBRImg[2] == 0): streetsNP.append(arr) else: streetsRock.append(arr) #scp.misc.imsave(dirToSaveResult+filename_seg, BR_image_seg) newBestResults.append(newLineBestResult) line = resFile.readline() fileBestResults = open(txtBestResults, 'w') for l in newBestResults: fileBestResults.write(l) fileBestResults.close() #trainfile = open(os.path.join(dirpath,'train4_retrainpercsemlateral3.txt'),'a') #valfile = open(os.path.join(dirpath,'val4_retrainpercsemlateral3.txt'),'a') trainfile = open(os.path.join(dirpath,txt_retrain_name),'w') valfile = open(os.path.join(dirpath,txt_val_retrain_name),'w') with open(baseGTtrainfile) as f: for line in f: trainfile.write(line) trainfile.write('\n') with open(baseGTvalfile) as f: for line in f: valfile.write(line) valfile.write('\n') #streetsPaved.sort(key=sortImgByFilledTriangPerc, reverse=True) #streetsNP.sort(key=sortImgByFilledTriangPerc, reverse=True) #streetsRock.sort(key=sortImgByFilledTriangPerc, reverse=True) print(len(streetsPaved)) print(len(streetsNP)) print(len(streetsRock)) #minClass = min(len(streetsPaved),len(streetsNP)) #minClass = min(minClass,len(streetsRock)) # for i in range(minClass): # if i % 3 != 0: # trainfile.write(streetsPaved[i][0]) # trainfile.write(streetsNP[i][0]) # #trainfile.write(streetsRock[i][0]) # else: # valfile.write(streetsPaved[i][0]) # valfile.write(streetsNP[i][0]) # #valfile.write(streetsRock[i][0]) # maxClass = max(len(streetsPaved),len(streetsNP)) # maxClass= max(maxClass,len(streetsRock)) # for i in range(maxClass): # if i % 3 != 0: # if i < len(streetsPaved): # trainfile.write(streetsPaved[i][0]) # print(f"train paved: {streetsPaved[i][0]}") # if i < len(streetsRock): # trainfile.write(streetsRock[i][0]) # print(f"train rock: {streetsRock[i][0]}") # if i < len(streetsNP): # trainfile.write(streetsNP[i][0]) # print(f"train nonpaved: {streetsNP[i][0]}") # else: # if i < len(streetsPaved): # valfile.write(streetsPaved[i][0]) # print(f"val paved: {streetsPaved[i][0]}") # if i < len(streetsRock): # valfile.write(streetsRock[i][0]) # print(f"val rock: {streetsRock[i][0]}") # if i < len(streetsNP): # valfile.write(streetsNP[i][0]) # print(f"val nonpaved: {streetsNP[i][0]}") minClass = min(len(streetsPaved)+len(streetsRock),len(streetsNP)) flagRock = False ipaved = 0 irock = 0 inonpaved = 0 for i in range(minClass): if i % 3 != 0: if( i % 2 == 0): trainfile.write(streetsNP[inonpaved][0]) inonpaved += 1 else: if(flagRock == True or ipaved >= len(streetsPaved)): trainfile.write(streetsRock[irock][0]) flagRock = False irock += 1 elif(flagRock == False or irock >= len(streetsRock)): trainfile.write(streetsPaved[ipaved][0]) flagRock = True ipaved += 1 else: if( i % 2 == 0): valfile.write(streetsNP[inonpaved][0]) inonpaved += 1 else: if(flagRock == True or ipaved >= len(streetsPaved)): valfile.write(streetsRock[irock][0]) flagRock = False irock += 1 elif(flagRock == False or irock >= len(streetsRock)): valfile.write(streetsPaved[ipaved][0]) flagRock = True ipaved += 1 trainfile.close(); valfile.close();
the-stack_0_4215
# encoding: utf-8 import datetime from django.test import TestCase from haystack import connections from haystack.inputs import AltParser, Exact from haystack.models import SearchResult from haystack.query import SQ, SearchQuerySet from ..core.models import AnotherMockModel, MockModel class SolrSearchQueryTestCase(TestCase): fixtures = ["base_data"] def setUp(self): super(SolrSearchQueryTestCase, self).setUp() self.sq = connections["solr"].get_query() def test_build_query_all(self): self.assertEqual(self.sq.build_query(), "*:*") def test_build_query_single_word(self): self.sq.add_filter(SQ(content="hello")) self.assertEqual(self.sq.build_query(), "(hello)") def test_build_query_boolean(self): self.sq.add_filter(SQ(content=True)) self.assertEqual(self.sq.build_query(), "(true)") def test_build_query_datetime(self): self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28))) self.assertEqual(self.sq.build_query(), "(2009-05-08T11:28:00Z)") def test_build_query_multiple_words_and(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_filter(SQ(content="world")) self.assertEqual(self.sq.build_query(), "((hello) AND (world))") def test_build_query_multiple_words_not(self): self.sq.add_filter(~SQ(content="hello")) self.sq.add_filter(~SQ(content="world")) self.assertEqual(self.sq.build_query(), "(NOT ((hello)) AND NOT ((world)))") def test_build_query_multiple_words_or(self): self.sq.add_filter(~SQ(content="hello")) self.sq.add_filter(SQ(content="hello"), use_or=True) self.assertEqual(self.sq.build_query(), "(NOT ((hello)) OR (hello))") def test_build_query_multiple_words_mixed(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(content="hello"), use_or=True) self.sq.add_filter(~SQ(content="world")) self.assertEqual( self.sq.build_query(), "(((why) OR (hello)) AND NOT ((world)))" ) def test_build_query_phrase(self): self.sq.add_filter(SQ(content="hello world")) self.assertEqual(self.sq.build_query(), "(hello AND world)") self.sq.add_filter(SQ(content__exact="hello world")) self.assertEqual( self.sq.build_query(), '((hello AND world) AND ("hello world"))' ) def test_build_query_boost(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_boost("world", 5) self.assertEqual(self.sq.build_query(), "(hello) world^5") def test_correct_exact(self): self.sq.add_filter(SQ(content=Exact("hello world"))) self.assertEqual(self.sq.build_query(), '("hello world")') def test_build_query_multiple_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00"))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00"))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:([* TO "2009-02-10 01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12 12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))', ) def test_build_complex_altparser_query(self): self.sq.add_filter(SQ(content=AltParser("dismax", "Don't panic", qf="text"))) self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00"))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00"))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) query = self.sq.build_query() self.assertTrue('(_query_:"{!dismax qf=text}Don\'t panic")' in query) self.assertTrue('pub_date:([* TO "2009-02-10 01:59:00"])' in query) self.assertTrue('author:({"daniel" TO *})' in query) self.assertTrue('created:({* TO "2009-02-12 12:13:00"})' in query) self.assertTrue('title:(["B" TO *])' in query) self.assertTrue('id:("1" OR "2" OR "3")' in query) self.assertTrue('rating:(["3" TO "5"])' in query) def test_build_query_multiple_filter_types_with_datetimes(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:([* TO "2009-02-10T01:59:00Z"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12T12:13:00Z"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))', ) def test_build_query_in_filter_multiple_words(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"])) self.assertEqual( self.sq.build_query(), '((why) AND title:("A Famous Paper" OR "An Infamous Article"))', ) def test_build_query_in_filter_datetime(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:("2009-07-06T01:56:21Z"))' ) def test_build_query_in_with_set(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=set(["A Famous Paper", "An Infamous Article"]))) query = self.sq.build_query() self.assertTrue("(why)" in query) # Because ordering in Py3 is now random. if 'title:("A ' in query: self.assertTrue( 'title:("A Famous Paper" OR "An Infamous Article")' in query ) else: self.assertTrue( 'title:("An Infamous Article" OR "A Famous Paper")' in query ) def test_build_query_with_contains(self): self.sq.add_filter(SQ(content="circular")) self.sq.add_filter(SQ(title__contains="haystack")) self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack*))") def test_build_query_with_endswith(self): self.sq.add_filter(SQ(content="circular")) self.sq.add_filter(SQ(title__endswith="haystack")) self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack))") def test_build_query_wildcard_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__startswith="haystack")) self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack*))") def test_build_query_fuzzy_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__fuzzy="haystack")) self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack~))") def test_clean(self): self.assertEqual(self.sq.clean("hello world"), "hello world") self.assertEqual(self.sq.clean("hello AND world"), "hello and world") self.assertEqual( self.sq.clean( 'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / world' ), 'hello and or not to \\+ \\- \\&& \\|| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\" \\~ \\* \\? \\: \\\\ \\/ world', ) self.assertEqual( self.sq.clean("so please NOTe i am in a bAND and bORed"), "so please NOTe i am in a bAND and bORed", ) def test_build_query_with_models(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_model(MockModel) self.assertEqual(self.sq.build_query(), "(hello)") self.sq.add_model(AnotherMockModel) self.assertEqual(self.sq.build_query(), "(hello)") def test_set_result_class(self): # Assert that we're defaulting to ``SearchResult``. self.assertTrue(issubclass(self.sq.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass self.sq.set_result_class(IttyBittyResult) self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult)) # Reset to default. self.sq.set_result_class(None) self.assertTrue(issubclass(self.sq.result_class, SearchResult)) def test_in_filter_values_list(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=MockModel.objects.values_list("id", flat=True))) self.assertEqual(self.sq.build_query(), '((why) AND title:("1" OR "2" OR "3"))') def test_narrow_sq(self): sqs = SearchQuerySet(using="solr").narrow(SQ(foo="moof")) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) self.assertEqual(sqs.query.narrow_queries.pop(), "foo:(moof)") def test_query__in(self): sqs = SearchQuerySet(using="solr").filter(id__in=[1, 2, 3]) self.assertEqual(sqs.query.build_query(), 'id:("1" OR "2" OR "3")') def test_query__in_empty_list(self): """Confirm that an empty list avoids a Solr exception""" sqs = SearchQuerySet(using="solr").filter(id__in=[]) self.assertEqual(sqs.query.build_query(), "id:(!*:*)")
the-stack_0_4216
from select import select from scapy.all import conf, ETH_P_ALL, MTU, plist # Stop sniff() asynchronously # Source: https://github.com/secdev/scapy/issues/989#issuecomment-380044430 def sniff(store=False, prn=None, lfilter=None, stop_event=None, refresh=.1, *args, **kwargs): """Sniff packets sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) store: wether to store sniffed packets or discard them prn: function to apply to each packet. If something is returned, it is displayed. Ex: ex: prn = lambda x: x.summary() lfilter: python function applied to each packet to determine if further action may be done ex: lfilter = lambda x: x.haslayer(Padding) stop_event: Event that stops the function when set refresh: check stop_event.set() every refresh seconds """ s = conf.L2listen(type=ETH_P_ALL, *args, **kwargs) lst = [] try: while True: if stop_event and stop_event.is_set(): break sel = select([s], [], [], refresh) if s in sel[0]: # if the packet s is ready to be read from p = s.recv(MTU) # recieve from somewhere () if p is None: break if lfilter and not lfilter(p): continue if store: lst.append(p) if prn: r = prn(p) if r is not None: print(r) except KeyboardInterrupt: pass finally: s.close() return plist.PacketList(lst, "Sniffed")
the-stack_0_4218
import logging as log from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model from django_keycloak.keycloak import Connect class Command(BaseCommand): help = "Synchronize users with keycloak" def handle(self, *args, **options): keycloak = Connect() User = get_user_model() remote_users = set([user.get("id") for user in keycloak.get_users()]) local_users = set(str(_u.id) for _u in User.objects.all()) users_to_remove = local_users.difference(remote_users) users_to_add = remote_users.difference(local_users) # Delete users that are no longer in keycloak User.objects.filter(id__in=list(users_to_remove)).delete() log.info( "Removed {} users".format(len(users_to_remove)), "and there are {} new users in keycloak that are not" " locally".format(len(users_to_add)), )
the-stack_0_4220
import os import logging from functools import partial import pandas as pd from solarforecastarbiter.io.fetch import eia from solarforecastarbiter.io.reference_observations import ( common, default_forecasts) from requests.exceptions import HTTPError logger = logging.getLogger('reference_data') def initialize_site_observations(api, site): """Creates an observation at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession API Session object, authenticated for the Reference user. site : solarforecastarbiter.datamodel.Site The site object for which to create the Observations. Notes ----- Currently only creates observations for net load [MW] (`f"EBA.{eia_site_id}.D.H"`), but EIA contains other variables that may be incorporated later (e.g. solar generation: `f"EBA.{eia_site_id}.NG.SUN.H"`). """ sfa_var = "net_load" logger.info(f'Creating {sfa_var} at {site.name}') try: common.create_observation(api, site, sfa_var) except HTTPError as e: logger.error(f'Could not create Observation for "{sfa_var}" ' f'at EIA site {site.name}') logger.debug(f'Error: {e.response.text}') def initialize_site_forecasts(api, site): """Creates a forecast at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession API Session object, authenticated for the Reference user. site : solarforecastarbiter.datamodel.Site The site object for which to create the Observations. """ common.create_forecasts( api, site, ["net_load"], default_forecasts.TEMPLATE_NETLOAD_PERSISTENCE_FORECASTS) def fetch(api, site, start, end, *, eia_api_key): """Retrieve observation data for a EIA site between start and end. Parameters ---------- api : solarforecastarbiter.io.APISession Unused but conforms to common.update_site_observations call site : solarforecastarbiter.datamodel.Site Site object with the appropriate metadata. start : datetime The beginning of the period to request data for. end : datetime The end of the period to request data for. eia_api_key : str API key for api.eia.gov Returns ------- data : pandas.DataFrame All of the requested data as a single DataFrame. Notes ----- Currently only fetches observations for net load [MW] (`f"EBA.{eia_site_id}.D.H"`), but EIA contains other variables that may be incorporated later (e.g. solar generation: `f"EBA.{eia_site_id}.NG.SUN.H"`). """ try: site_extra_params = common.decode_extra_parameters(site) except ValueError: return pd.DataFrame() eia_site_id = site_extra_params['network_api_id'] series_id = f"EBA.{eia_site_id}.D.H" # hourly net load (demand) obs_df = eia.get_eia_data( series_id, eia_api_key, start, end ) if obs_df.empty: logger.warning(f'Data for site {site.name} contained no ' f'entries from {start} to {end}.') return pd.DataFrame() obs_df = obs_df.rename(columns={"value": "net_load"}) return obs_df def update_observation_data(api, sites, observations, start, end): """Retrieve data from the network, and then format and post it to each observation at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession An active Reference user session. sites: list of solarforecastarbiter.datamodel.Site List of all reference sites. observations: list of solarforecastarbiter.datamodel.Observation List of all reference observations. start : datetime The beginning of the period to request data for. end : datetime The end of the period to request data for. Raises ------ KeyError If EIA_API_KEY environmental variable is not set. """ eia_api_key = os.getenv("EIA_API_KEY") if eia_api_key is None: raise KeyError('"EIA_API_KEY" environment variable must be ' 'set to update EIA observation data.') eia_sites = common.filter_by_networks(sites, ['EIA']) for site in eia_sites: common.update_site_observations( api, partial(fetch, eia_api_key=eia_api_key), site, observations, start, end)
the-stack_0_4223
import math file = open('day-5.input') result = 0 # F, ==> lower half ==> [min, math.floor((max - min) / 2)] # B,R ==> upper half def get_row(expression): min = 0 max = 127 for i in range(7): selector = expression[i] if selector == 'F': max = math.floor((max+min)/ 2) elif selector == 'B': min = math.ceil((max + min) / 2) return max def get_column(expression): min = 0 max = 7 for i in range(3): selector = expression[i] if selector == 'L': max = math.floor((max+min)/ 2) elif selector == 'R': min = math.ceil((max + min) / 2) return max def get_seat_id(seat_row, seat_column): return 8 * seat_row + seat_column all_seats =[] min_found_seat_row = 127 max_found_seat_row = 0 for row in range(1,126): #exclude very front and very back rows for column in range(8): all_seats.append((row, column)) for line in file: line = line.strip() seat_row = get_row(line[0:7]) if seat_row < min_found_seat_row: min_found_seat_row = seat_row if seat_row > max_found_seat_row: max_found_seat_row = seat_row seat_column = get_column(line[-3:]) all_seats.remove((seat_row, seat_column)) for (row, column) in all_seats[:]: if row < min_found_seat_row + 1: all_seats.remove((row, column)) if row > max_found_seat_row - 1: all_seats.remove((row, column)) print(min_found_seat_row) print(max_found_seat_row) found_seat = all_seats[0] result = get_seat_id(found_seat[0], found_seat[1]) print(result)
the-stack_0_4224
# coding: utf-8 """ NiFi Rest Api The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. OpenAPI spec version: 1.11.1-SNAPSHOT Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class PositionDTO(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'x': 'float', 'y': 'float' } attribute_map = { 'x': 'x', 'y': 'y' } def __init__(self, x=None, y=None): """ PositionDTO - a model defined in Swagger """ self._x = None self._y = None if x is not None: self.x = x if y is not None: self.y = y @property def x(self): """ Gets the x of this PositionDTO. The x coordinate. :return: The x of this PositionDTO. :rtype: float """ return self._x @x.setter def x(self, x): """ Sets the x of this PositionDTO. The x coordinate. :param x: The x of this PositionDTO. :type: float """ self._x = x @property def y(self): """ Gets the y of this PositionDTO. The y coordinate. :return: The y of this PositionDTO. :rtype: float """ return self._y @y.setter def y(self, y): """ Sets the y of this PositionDTO. The y coordinate. :param y: The y of this PositionDTO. :type: float """ self._y = y def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, PositionDTO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
the-stack_0_4225
#!/usr/bin/env python3 #------------------------------------------------------------------------------- # ============LICENSE_START======================================================= # Copyright (C) 2018 Sven van der Meer. All rights reserved. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 # ============LICENSE_END========================================================= #------------------------------------------------------------------------------- ## ## acronyms-val - validates YAML files of SKB acronyms ## ## @author Sven van der Meer <[email protected]> ## @version v0.0.0 ## ## ## Includes, all we need ## import yaml ## parsing YAML files import os ## operating system, e.g. file handling from os import walk ## for walking directories import functools ## some tools for functions import sys, getopt ## system for exit, getopt for CLI parsing import glob ## gobal globbing to get YAML files recursively import pathlib ## mkdirs in Python import datetime ## to get date/time for ADOC files ## ## Global variables ## task_level = "warn" ## warning level yaml_dir = '' ## YAML directory acronyms = {} ## dictionary of acronyms ## ## DO NOT CHANGE CODE BELOW, unless you know what you are doing ## ## ## function: print help, for empty or wrong command line ## def help(): print("") print("acronyms-val - validates YAML files of SKB acronyms\n") print(" Usage: acronyms-val [options]\n") print(" Options") print(" [-h | --help] - this help screen") print(" [-T | --task-level] <level> - task log level: error, warn, warn-strict, info, debug, trace") print(" [-y | --yaml-directory] <dir> - top YAML directory") print("\n") ## ## function: parse command line ## def cli(argv): global yaml_dir global task_level try: opts, args = getopt.getopt(argv,"hT:y:",["yaml-directory=","help","task-level="]) except getopt.GetoptError: help() sys.exit(70) for opt, arg in opts: if opt in ("-h", "--help"): help() sys.exit(0) elif opt in ("-T", "--task-level"): task_level = arg elif opt in ("-y", "--yaml-directory"): yaml_dir = arg ## ## function: validates a single YAML file ## def validate_file(file, entries, key): ## check for required keys found_keys = True expected_keys = ( 'short' , 'short-target', 'long', 'long-target', 'description', 'notes', 'urls') errors = "" if not 'short' in entries: errors += " --> did not find key 'short'\n" found_keys = False else: if len(entries['short']) == 0: errors += " --> key 'short' with no entry\n" found_keys = False if not 'long' in entries: errors += " --> did not find key 'long'\n" found_keys = False else: if len(entries['long']) == 0: errors += " --> key 'long' with no entry\n" found_keys = False if 'long-target' in entries and len(entries['long-target']) == 0: errors += " --> key 'long-target' with no entry\n" found_keys = False if 'description' in entries and len(entries['description']) == 0: errors += " --> key 'description' with no entry\n" found_keys = False if 'notes' in entries and len(entries['notes']) == 0: errors += " --> key 'notes' with no entry\n" found_keys = False if 'urls' in entries and len(entries['urls']) == 0: errors += " --> key 'urls' with no entry\n" found_keys = False if not all(elem in expected_keys for elem in entries): errors += " --> unknown key\n" found_keys = False file_short = file[len(yaml_dir)+1:] dir_short = file_short.rsplit('/', 1)[0] key_short = key.rsplit('/', 1)[0] if not key_short == dir_short: errors += " --> something wrong in key path (" + key_short + ") and directory (" + dir_short + ")\n" found_keys = False if found_keys == False: print(" -> validation failed") print("%s" % errors) sys.exit(80) ## ## function: process a single YAML file ## def process_file(file): file_exists = os.path.isfile(file) if file_exists == True: stream = open(file,'r') data = yaml.load(stream) stream.close() entries = data[list(data.keys())[0]] ## dictionary with all entries key = list(data.keys())[0] ## key name of the YAML spec validate_file(file, entries, key) if not key in acronyms: entries['src-file'] = file acronyms[key] = entries else: print(" -> key %s already in dictionary, defined in %s" % (key, acronyms[key]['src-file'])) sys.exit(80) else: print("error: could not open file: %s" % file) sys.exit(72) ## ## function: main function ## def main(argv): cli(argv) print(" > YAML directory: %s" % yaml_dir) dir_exists = os.path.isdir(yaml_dir) if dir_exists == True: files = glob.glob(yaml_dir + '/**/*.yaml', recursive=True) for file in files: print("\n > processing: .../%s" % file[len(yaml_dir)+1:]) process_file(file) print("\n > processed %d YAML files, found %d acronyms" % (len(files), len(acronyms))) else: print("error: could not open YAML directory: %s" % yaml_dir) sys.exit(71) ## ## Call main ## if __name__ == "__main__": main(sys.argv[1:]) print(" > done")
the-stack_0_4227
""" The Sponge Roll Problem with Columnwise Column Generation for the PuLP Modeller Authors: Antony Phillips, Dr Stuart Mitchell 2008 """ # Import Column Generation functions from CGcolumnwise import * # The Master Problem is created prob, obj, constraints = createMaster() # A list of starting patterns is created newPatterns = [[1,0,0],[0,1,0],[0,0,1]] # New patterns will be added until newPatterns is an empty list while newPatterns: # The new patterns are added to the problem addPatterns(obj,constraints,newPatterns) # The master problem is solved, and the dual variables are returned duals = masterSolve(prob) # The sub problem is solved and a new pattern will be returned if there is one # which can reduce the master objective function newPatterns = subSolve(duals) # The master problem is solved with Integer Constraints not relaxed solution, varsdict = masterSolve(prob,relax = False) # Display Solution for i,j in list(varsdict.items()): print(i, "=", j) print("objective = ", solution)
the-stack_0_4230
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. import re # noqa: F401 import sys # noqa: F401 from datadog_api_client.v2.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) class LogsListRequestPage(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = { ("limit",): { "inclusive_maximum": 1000, }, } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "cursor": (str,), # noqa: E501 "limit": (int,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { "cursor": "cursor", # noqa: E501 "limit": "limit", # noqa: E501 } _composed_schemas = {} required_properties = set( [ "_data_store", "_check_type", "_spec_property_naming", "_path_to_item", "_configuration", "_visited_composed_classes", ] ) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """LogsListRequestPage - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) cursor (str): List following results with a cursor provided in the previous query.. [optional] # noqa: E501 limit (int): Maximum number of logs in the response.. [optional] if omitted the server will use the default value of 10 # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) _path_to_item = kwargs.pop("_path_to_item", ()) _configuration = kwargs.pop("_configuration", None) _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
the-stack_0_4231
# Copyright 2021, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file # This modules disables the Pytype analyzer, see # https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more # information. """A set of utility methods for serializing Value protos using pybind11 bindings.""" import collections import os import os.path import tempfile from typing import Any, Collection, List, Mapping, Optional, Sequence, Tuple, Union import warnings import zipfile import numpy as np import tensorflow as tf from tensorflow_federated.proto.v0 import computation_pb2 from tensorflow_federated.proto.v0 import executor_pb2 from tensorflow_federated.python.common_libs import py_typecheck from tensorflow_federated.python.common_libs import structure from tensorflow_federated.python.common_libs import tracing from tensorflow_federated.python.core.impl.computation import computation_impl from tensorflow_federated.python.core.impl.executors import executor_bindings from tensorflow_federated.python.core.impl.executors import executor_utils from tensorflow_federated.python.core.impl.types import computation_types from tensorflow_federated.python.core.impl.types import placements from tensorflow_federated.python.core.impl.types import type_analysis from tensorflow_federated.python.core.impl.types import type_conversions from tensorflow_federated.python.core.impl.types import type_serialization from tensorflow_federated.python.core.impl.types import type_transformations from tensorflow_federated.python.core.impl.utils import tensorflow_utils _SerializeReturnType = Tuple[executor_pb2.Value, computation_types.Type] _DeserializeReturnType = Tuple[Any, computation_types.Type] # The maximum size allowed for serialized sequence values. Sequence that # serialize to values larger than this will result in errors being raised. This # likely occurs when the sequence is dependent on, and thus pulling in, many of # variables from the graph. _DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES = 20 * (1024**2) # 20 MB class DatasetSerializationError(Exception): """Error raised during Dataset serialization or deserialization.""" pass @tracing.trace def _serialize_computation( comp: computation_pb2.Computation, type_spec: Optional[computation_types.Type]) -> _SerializeReturnType: """Serializes a TFF computation.""" type_spec = executor_utils.reconcile_value_type_with_type_spec( type_serialization.deserialize_type(comp.type), type_spec) return executor_pb2.Value(computation=comp), type_spec @tracing.trace def _serialize_tensor_value( value: Any, type_spec: computation_types.TensorType ) -> Tuple[executor_pb2.Value, computation_types.TensorType]: """Serializes a tensor value into `executor_pb2.Value`. Args: value: A Numpy array or other object understood by `tf.make_tensor_proto`. type_spec: A `tff.TensorType`. Returns: A tuple `(value_proto, ret_type_spec)` in which `value_proto` is an instance of `executor_pb2.Value` with the serialized content of `value`, and `ret_type_spec` is the type of the serialized value. The `ret_type_spec` is the same as the argument `type_spec` if that argument was not `None`. If the argument was `None`, `ret_type_spec` is a type determined from `value`. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ original_value = value if tf.is_tensor(value): if isinstance(value, tf.Variable): value = value.read_value() if tf.executing_eagerly(): value = value.numpy() else: # Attempt to extract the value using the current graph context. with tf.compat.v1.Session() as sess: value = sess.run(value) # If we got a string or bytes scalar, wrap it in numpy so it has a dtype and # shape. if isinstance(value, bytes): value = np.bytes_(value) elif isinstance(value, str): value = np.str_(value) else: value = np.asarray(value) if not tf.TensorShape(value.shape).is_compatible_with(type_spec.shape): raise TypeError(f'Cannot serialize tensor with shape {value.shape} to ' f'shape {type_spec.shape}.') if value.dtype != type_spec.dtype.as_numpy_dtype: try: value = value.astype(type_spec.dtype.as_numpy_dtype, casting='same_kind') except TypeError as te: value_type_string = py_typecheck.type_string(type(original_value)) raise TypeError( f'Failed to serialize value of Python type {value_type_string} to ' f'a tensor of type {type_spec}.\nValue: {original_value}') from te return executor_bindings.serialize_tensor_value(value), type_spec def _serialize_dataset( dataset, max_serialized_size_bytes=_DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES): """Serializes a `tf.data.Dataset` value into a `bytes` object. Args: dataset: A `tf.data.Dataset`. max_serialized_size_bytes: An `int` size in bytes designating the threshold on when to raise an error if the resulting serialization is too big. Returns: A `bytes` object that can be sent to `tensorflow_serialization.deserialize_dataset` to recover the original `tf.data.Dataset`. Raises: SerializationError: if there was an error in TensorFlow during serialization. """ py_typecheck.check_type(dataset, type_conversions.TF_DATASET_REPRESENTATION_TYPES) dataset_graph = tf.raw_ops.DatasetToGraphV2( input_dataset=tf.data.experimental.to_variant(dataset)) if tf.executing_eagerly(): dataset_graph_def_bytes = dataset_graph.numpy() else: dataset_graph_def_bytes = tf.compat.v1.Session().run(dataset_graph) if len(dataset_graph_def_bytes) > max_serialized_size_bytes: raise ValueError('Serialized size of Dataset ({:d} bytes) exceeds maximum ' 'allowed ({:d} bytes)'.format( len(dataset_graph_def_bytes), max_serialized_size_bytes)) return dataset_graph_def_bytes def _check_container_compat_with_tf_nest(type_spec: computation_types.Type): """Asserts that all `StructTypes` with names have OrderedDict containers.""" def _names_are_in_sorted_order(name_sequence: Sequence[str]) -> bool: return sorted(name_sequence) == name_sequence def _check_ordereddict_container_for_struct(type_to_check): if not type_to_check.is_struct(): return type_to_check, False # We can't use `dir` here, since it sorts the names before returning. We # also must filter to names which are actually present. names_in_sequence_order = structure.name_list(type_to_check) names_are_sorted = _names_are_in_sorted_order(names_in_sequence_order) has_no_names = not bool(names_in_sequence_order) if has_no_names or (names_in_sequence_order and names_are_sorted): # If alphabetical order matches sequence order, TFF's deserialization will # traverse the structure correctly; there is no ambiguity here. On the # other hand, if there are no names, sequence order is the only method of # traversal, so there is no ambiguity here either. return type_to_check, False elif not type_to_check.is_struct_with_python(): raise ValueError('Attempting to serialize a named struct type with ' 'ambiguous traversal order (sequence order distinct ' 'from alphabetical order) without a Python container; ' 'this is an unsafe operation, as TFF cannot determine ' 'the intended traversal order after deserializing the ' 'proto due to inconsistent behavior of tf.nest.') container_type = computation_types.StructWithPythonType.get_container_type( type_to_check) if (not names_are_sorted) and container_type is not collections.OrderedDict: raise ValueError('Attempted to serialize a dataset yielding named ' 'elements in non-sorted sequence order with ' f'non-OrderedDict container (type {container_type}). ' 'This is an ambiguous operation; `tf.nest` behaves in ' 'a manner which depends on the Python type of this ' 'container, so coercing the dataset reconstructed ' 'from the resulting Value proto depends on assuming a ' 'single Python type here. Please prefer to use ' '`collections.OrderedDict` containers for the elements ' 'your dataset yields.') return type_to_check, False type_transformations.transform_type_postorder( type_spec, _check_ordereddict_container_for_struct) @tracing.trace def _serialize_sequence_value( value: Union[Union[type_conversions.TF_DATASET_REPRESENTATION_TYPES], List[Any]], type_spec: computation_types.SequenceType ) -> computation_types.SequenceType: """Serializes a `tf.data.Dataset` value into `executor_pb2.Value`. Args: value: A `tf.data.Dataset`, or equivalent list of values convertible to (potentially structures of) tensors. type_spec: A `computation_types.Type` specifying the TFF sequence type of `value.` Returns: A tuple `(value_proto, type_spec)` in which `value_proto` is an instance of `executor_pb2.Value` with the serialized content of `value`, and `type_spec` is the type of the serialized value. """ if isinstance(value, list): value = tensorflow_utils.make_data_set_from_elements( None, value, type_spec.element) if not isinstance(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES): raise TypeError( 'Cannot serialize Python type {!s} as TFF type {!s}.'.format( py_typecheck.type_string(type(value)), type_spec if type_spec is not None else 'unknown')) element_type = computation_types.to_type(value.element_spec) _check_container_compat_with_tf_nest(element_type) value_type = computation_types.SequenceType(element_type) if not type_spec.is_assignable_from(value_type): raise TypeError( 'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.' .format(value_type, type_spec if type_spec is not None else 'unknown')) value_proto = executor_pb2.Value() # TFF must store the type spec here because TF will lose the ordering of the # names for `tf.data.Dataset` that return elements of # `collections.abc.Mapping` type. This allows TFF to preserve and restore the # key ordering upon deserialization. value_proto.sequence.serialized_graph_def = _serialize_dataset(value) value_proto.sequence.element_type.CopyFrom( type_serialization.serialize_type(element_type)) return value_proto, type_spec @tracing.trace def _serialize_struct_type( struct_typed_value: Any, type_spec: computation_types.StructType, ) -> computation_types.StructType: """Serializes a value of tuple type.""" value_structure = structure.from_container(struct_typed_value) if len(value_structure) != len(type_spec): raise TypeError('Cannot serialize a struct value of ' f'{len(value_structure)} elements to a struct type ' f'requiring {len(type_spec)} elements. Trying to serialize' f'\n{struct_typed_value!r}\nto\n{type_spec}.') type_elem_iter = structure.iter_elements(type_spec) val_elem_iter = structure.iter_elements(value_structure) elements = [] for (e_name, e_type), (_, e_val) in zip(type_elem_iter, val_elem_iter): e_value, _ = serialize_value(e_val, e_type) if e_name: element = executor_pb2.Value.Struct.Element(name=e_name, value=e_value) else: element = executor_pb2.Value.Struct.Element(value=e_value) elements.append(element) value_proto = executor_pb2.Value( struct=executor_pb2.Value.Struct(element=elements)) return value_proto, type_spec @tracing.trace def _serialize_federated_value( federated_value: Any, type_spec: computation_types.FederatedType ) -> computation_types.FederatedType: """Serializes a value of federated type.""" if type_spec.all_equal: value = [federated_value] else: value = federated_value py_typecheck.check_type(value, list) value_proto = executor_pb2.Value() for v in value: federated_value_proto, it_type = serialize_value(v, type_spec.member) type_spec.member.check_assignable_from(it_type) value_proto.federated.value.append(federated_value_proto) value_proto.federated.type.CopyFrom( type_serialization.serialize_type(type_spec).federated) return value_proto, type_spec @tracing.trace def serialize_value( value: Any, type_spec: Optional[computation_types.Type] = None, ) -> _SerializeReturnType: """Serializes a value into `executor_pb2.Value`. We use a switch/function pattern in the body here (and in `deserialize_value` below in order to persist more information in traces and profiling. Args: value: A value to be serialized. type_spec: Optional type spec, a `tff.Type` or something convertible to it. Returns: A 2-tuple of serialized value and `tff.Type` that represents the TFF type of the serialized value. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ type_spec = computation_types.to_type(type_spec) if isinstance(value, computation_pb2.Computation): return _serialize_computation(value, type_spec) elif isinstance(value, computation_impl.ConcreteComputation): return _serialize_computation( computation_impl.ConcreteComputation.get_proto(value), executor_utils.reconcile_value_with_type_spec(value, type_spec)) elif type_spec is None: raise TypeError('A type hint is required when serializing a value which ' 'is not a TFF computation. Asked to serialized value {v} ' ' of type {t} with None type spec.'.format( v=value, t=type(value))) elif type_spec.is_tensor(): return _serialize_tensor_value(value, type_spec) elif type_spec.is_sequence(): return _serialize_sequence_value(value, type_spec) elif type_spec.is_struct(): return _serialize_struct_type(value, type_spec) elif type_spec.is_federated(): return _serialize_federated_value(value, type_spec) else: raise ValueError( 'Unable to serialize value with Python type {} and {} TFF type.'.format( str(py_typecheck.type_string(type(value))), str(type_spec) if type_spec is not None else 'unknown')) @tracing.trace def _deserialize_computation( value_proto: executor_pb2.Value) -> _DeserializeReturnType: """Deserializes a TFF computation.""" return (value_proto.computation, type_serialization.deserialize_type(value_proto.computation.type)) @tracing.trace def _deserialize_tensor_value( value_proto: executor_pb2.Value) -> _DeserializeReturnType: """Deserializes a tensor value from `.Value`. Args: value_proto: An instance of `executor_pb2.Value`. Returns: A tuple `(value, type_spec)`, where `value` is a Numpy array that represents the deserialized value, and `type_spec` is an instance of `tff.TensorType` that represents its type. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ value = executor_bindings.deserialize_tensor_value(value_proto) value_type = computation_types.TensorType( dtype=value.dtype, shape=value.shape) if not value.shape: # Unwrap the scalar array as just a primitive numeric. value = value.dtype.type(value) return value, value_type def _deserialize_dataset_from_zipped_saved_model(serialized_bytes): """Deserializes a zipped SavedModel `bytes` object to a `tf.data.Dataset`. DEPRECATED: this method is deprecated and replaced by `_deserialize_dataset_from_graph_def`. Args: serialized_bytes: `bytes` object produced by older versions of `tensorflow_serialization.serialize_dataset` that produced zipped SavedModel `bytes` strings. Returns: A `tf.data.Dataset` instance. Raises: SerializationError: if there was an error in TensorFlow during serialization. """ py_typecheck.check_type(serialized_bytes, bytes) temp_dir = tempfile.mkdtemp('dataset') fd, temp_zip = tempfile.mkstemp('zip') os.close(fd) try: with open(temp_zip, 'wb') as f: f.write(serialized_bytes) with zipfile.ZipFile(temp_zip, 'r') as z: z.extractall(path=temp_dir) loaded = tf.saved_model.load(temp_dir) # TODO(b/156302055): Follow up here when bug is resolved, either remove # if this function call stops failing by default, or leave if this is # working as intended. with tf.device('cpu'): ds = loaded.dataset_fn() except Exception as e: # pylint: disable=broad-except raise DatasetSerializationError( 'Error deserializing tff.Sequence value. Inner error: {!s}'.format( e)) from e finally: tf.io.gfile.rmtree(temp_dir) tf.io.gfile.remove(temp_zip) return ds def _deserialize_dataset_from_graph_def(serialized_graph_def: bytes, element_type: computation_types.Type): """Deserializes a serialized `tf.compat.v1.GraphDef` to a `tf.data.Dataset`. Args: serialized_graph_def: `bytes` object produced by `tensorflow_serialization.serialize_dataset` element_type: a `tff.Type` object representing the type structure of the elements yielded from the dataset. Returns: A `tf.data.Dataset` instance. """ py_typecheck.check_type(element_type, computation_types.Type) type_analysis.check_tensorflow_compatible_type(element_type) def transform_to_tff_known_type( type_spec: computation_types.Type) -> Tuple[computation_types.Type, bool]: """Transforms `StructType` to `StructWithPythonType`.""" if type_spec.is_struct() and not type_spec.is_struct_with_python(): field_is_named = tuple( name is not None for name, _ in structure.iter_elements(type_spec)) has_names = any(field_is_named) is_all_named = all(field_is_named) if is_all_named: return computation_types.StructWithPythonType( elements=structure.iter_elements(type_spec), container_type=collections.OrderedDict), True elif not has_names: return computation_types.StructWithPythonType( elements=structure.iter_elements(type_spec), container_type=tuple), True else: raise TypeError('Cannot represent TFF type in TF because it contains ' f'partially named structures. Type: {type_spec}') return type_spec, False if element_type.is_struct(): # TF doesn't support `structure.Struct` types, so we must transform the # `StructType` into a `StructWithPythonType` for use as the # `tf.data.Dataset.element_spec` later. tf_compatible_type, _ = type_transformations.transform_type_postorder( element_type, transform_to_tff_known_type) else: # We've checked this is only a struct or tensors, so we know this is a # `TensorType` here and will use as-is. tf_compatible_type = element_type def type_to_tensorspec(t: computation_types.TensorType) -> tf.TensorSpec: return tf.TensorSpec(shape=t.shape, dtype=t.dtype) element_spec = type_conversions.structure_from_tensor_type_tree( type_to_tensorspec, tf_compatible_type) ds = tf.data.experimental.from_variant( tf.raw_ops.DatasetFromGraph(graph_def=serialized_graph_def), structure=element_spec) # If a serialized dataset had elements of nested structes of tensors (e.g. # `dict`, `OrderedDict`), the deserialized dataset will return `dict`, # `tuple`, or `namedtuple` (loses `collections.OrderedDict` in a conversion). # # Since the dataset will only be used inside TFF, we wrap the dictionary # coming from TF in an `OrderedDict` when necessary (a type that both TF and # TFF understand), using the field order stored in the TFF type stored during # serialization. return tensorflow_utils.coerce_dataset_elements_to_tff_type_spec( ds, tf_compatible_type) @tracing.trace def _deserialize_sequence_value( sequence_value_proto: executor_pb2.Value.Sequence, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a `tf.data.Dataset`. Args: sequence_value_proto: `Sequence` protocol buffer message. type_hint: A `computation_types.Type` that hints at what the value type should be for executors that only return values. If the `sequence_value_proto.element_type` field was not set, the `type_hint` is used instead. Returns: A tuple of `(tf.data.Dataset, tff.Type)`. """ if sequence_value_proto.HasField('element_type'): element_type = type_serialization.deserialize_type( sequence_value_proto.element_type) elif type_hint is not None: element_type = type_hint.element else: raise ValueError( 'Cannot deserialize a sequence Value proto that without one of ' '`element_type` proto field or `element_type_hint`') which_value = sequence_value_proto.WhichOneof('value') if which_value == 'zipped_saved_model': warnings.warn( 'Deserializng a sequence value that was encoded as a zipped SavedModel.' ' This is a deprecated path, please update the binary that is ' 'serializing the sequences.', DeprecationWarning) ds = _deserialize_dataset_from_zipped_saved_model( sequence_value_proto.zipped_saved_model) ds = tensorflow_utils.coerce_dataset_elements_to_tff_type_spec( ds, element_type) elif which_value == 'serialized_graph_def': ds = _deserialize_dataset_from_graph_def( sequence_value_proto.serialized_graph_def, element_type) else: raise NotImplementedError( 'Deserializing Sequences enocded as {!s} has not been implemented' .format(which_value)) return ds, computation_types.SequenceType(element=element_type) @tracing.trace def _deserialize_struct_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value of struct type.""" val_elems = [] type_elems = [] if type_hint is not None: element_types = tuple(type_hint) else: element_types = [None] * len(value_proto.struct.element) for e, e_type in zip(value_proto.struct.element, element_types): name = e.name if e.name else None e_val, e_type = deserialize_value(e.value, e_type) val_elems.append((name, e_val)) type_elems.append((name, e_type) if name else e_type) return (structure.Struct(val_elems), computation_types.StructType(type_elems)) def _ensure_deserialized_types_compatible( previous_type: Optional[computation_types.Type], next_type: computation_types.Type) -> computation_types.Type: """Ensures one of `previous_type` or `next_type` is assignable to the other. Returns the type which is assignable from the other. Args: previous_type: Instance of `computation_types.Type` or `None`. next_type: Instance of `computation_types.Type`. Returns: The supertype of `previous_type` and `next_type`. Raises: TypeError if neither type is assignable from the other. """ if previous_type is None: return next_type else: if next_type.is_assignable_from(previous_type): return next_type elif previous_type.is_assignable_from(next_type): return previous_type raise TypeError('Type mismatch checking member assignability under a ' 'federated value. Deserialized type {} is incompatible ' 'with previously deserialized {}.'.format( next_type, previous_type)) @tracing.trace def _deserialize_federated_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value of federated type.""" if not value_proto.federated.value: raise ValueError('Attempting to deserialize federated value with no data.') # The C++ runtime doesn't use the `all_equal` boolean (and doesn't report it # in returned values), however the type_hint on the computation may contain # it. if type_hint is not None: all_equal = type_hint.all_equal else: all_equal = value_proto.federated.type.all_equal placement_uri = value_proto.federated.type.placement.value.uri # item_type will represent a supertype of all deserialized member types in the # federated value. This will be the hint used for deserialize member values. if type_hint is not None: item_type_hint = type_hint.member else: item_type_hint = None item_type = None if all_equal: # As an optimization, we only deserialize the first value of an # `all_equal=True` federated value. items = [value_proto.federated.value[0]] else: items = value_proto.federated.value value = [] for item in items: item_value, next_item_type = deserialize_value(item, item_type_hint) item_type = _ensure_deserialized_types_compatible(item_type, next_item_type) value.append(item_value) type_spec = computation_types.FederatedType( item_type, placement=placements.uri_to_placement_literal(placement_uri), all_equal=all_equal) if all_equal: value = value[0] return value, type_spec @tracing.trace def deserialize_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value (of any type) from `executor_pb2.Value`. Args: value_proto: An instance of `executor_pb2.Value`. type_hint: A `comptuations_types.Type` that hints at what the value type should be for executors that only return values. Returns: A tuple `(value, type_spec)`, where `value` is a deserialized representation of the transmitted value (e.g., Numpy array, or a `pb.Computation` instance), and `type_spec` is an instance of `tff.TensorType` that represents its type. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ if not hasattr(value_proto, 'WhichOneof'): raise TypeError('`value_proto` must be a protocol buffer message with a ' '`value` oneof field.') which_value = value_proto.WhichOneof('value') if which_value == 'tensor': return _deserialize_tensor_value(value_proto) elif which_value == 'computation': return _deserialize_computation(value_proto) elif which_value == 'sequence': return _deserialize_sequence_value(value_proto.sequence, type_hint) elif which_value == 'struct': return _deserialize_struct_value(value_proto, type_hint) elif which_value == 'federated': return _deserialize_federated_value(value_proto, type_hint) else: raise ValueError( 'Unable to deserialize a value of type {}.'.format(which_value)) CardinalitiesType = Mapping[placements.PlacementLiteral, int] def serialize_cardinalities( cardinalities: CardinalitiesType) -> List[executor_pb2.Cardinality]: serialized_cardinalities = [] for placement, cardinality in cardinalities.items(): cardinality_message = executor_pb2.Cardinality( placement=computation_pb2.Placement(uri=placement.uri), cardinality=cardinality) serialized_cardinalities.append(cardinality_message) return serialized_cardinalities def deserialize_cardinalities( serialized_cardinalities: Collection[executor_pb2.Cardinality] ) -> CardinalitiesType: cardinalities_dict = {} for cardinality_spec in serialized_cardinalities: literal = placements.uri_to_placement_literal( cardinality_spec.placement.uri) cardinalities_dict[literal] = cardinality_spec.cardinality return cardinalities_dict
the-stack_0_4232
#!/usr/bin/env python3 """ Utility functions for testing. """ import copy import numpy as np SEED = 42 def round_dict(d, precision=3): """Round all numerical values in a dictionary recursively.""" d = copy.deepcopy(d) if isinstance(d, dict): for k, v in d.items(): try: d[k] = round(v, precision) except TypeError: d[k] = round_dict(v) return d elif isinstance(d, list): return [round_dict(v) for v in d] elif isinstance(d, tuple): return tuple([round_dict(v) for v in d]) return d def random_real_series(x, add_null=False, limit_from=0, limit_to=5, seed=SEED): np.random.seed(seed) s = np.random.normal(x['mean'], x['std'], size=limit_to) s = np.minimum(np.maximum(s, x['minValue']), x['maxValue']) if add_null and len(s): s[np.random.choice(limit_to)] = None return list(s)[limit_from:] def random_integer_series(x, **kwargs): s = random_real_series(x, **kwargs) return [int(e) if e is not None else None for e in s] def random_nominal_series(x, add_null=False, limit_from=0, limit_to=5, seed=SEED): np.random.seed(seed) s = np.random.choice(x['type']['enumeration'], size=limit_to) if add_null and len(s): s[np.random.choice(limit_to)] = None return list(s)[limit_from:] def independent(include_real=True, include_integer=True, include_nominal=False, **kwargs): if 'add_independent_null' in kwargs: kwargs['add_null'] = kwargs.pop('add_independent_null') ret = [] if include_real: x = { 'name': 'subjectage', 'type': { 'name': 'real' }, 'series': [], 'mean': 70.4, 'std': 8.3, 'minValue': 30., 'maxValue': 90., 'label': 'Exact age' } x['series'] = random_real_series(x, seed=1, **kwargs) ret.append(x) if include_integer: x = { 'name': 'minimentalstate', 'type': { 'name': 'integer' }, 'series': [], 'mean': 24.4, 'std': 5.2, 'minValue': 0, 'maxValue': 30, 'label': 'MMSE Total scores' } x['series'] = random_integer_series(x, seed=2, **kwargs) ret.append(x) if include_nominal: x = { 'name': 'agegroup', 'type': { 'name': 'polynominal', 'enumeration': ['-50y', '50-59y'] }, 'label': 'Age Group', 'series': [] } x['series'] = random_nominal_series(x, seed=3, **kwargs) ret.append(x) return ret def inputs_regression(add_null=False, limit_from=0, limit_to=5, **kwargs): x = { 'name': 'lefthippocampus', 'label': 'Left Hippocampus', 'type': { 'name': 'real' }, 'series': [], 'mean': 3., 'std': 0.39, 'minValue': 1., 'maxValue': 5., } x['series'] = random_real_series(x, seed=4, add_null=add_null, limit_from=limit_from, limit_to=limit_to) return { 'data': { 'dependent': [x], 'independent': independent(limit_from=limit_from, limit_to=limit_to, **kwargs) }, 'parameters': [] } def inputs_classification(add_null=False, limit_from=0, limit_to=5, **kwargs): x = { 'name': 'adnicategory', 'label': 'ADNI category', 'type': { 'name': 'polynominal', 'enumeration': ['AD', 'CN', 'Other'], 'enumeration_labels': ['Alzheimers disease', 'Cognitively Normal', 'Other'] }, 'series': [] } x['series'] = random_nominal_series(x, seed=5, add_null=add_null, limit_from=limit_from, limit_to=limit_to) return { 'data': { 'dependent': [x], 'independent': independent(limit_from=limit_from, limit_to=limit_to, **kwargs) }, 'parameters': [] }
the-stack_0_4233
import random import os.path import sys import logging import gtk import gs import gs.ui.rtgraph as rtgraph import gs.config as config LOG = logging.getLogger("graph") class FieldChannel(rtgraph.Channel): def __init__(self, msg, field): rtgraph.Channel.__init__(self) i = 0 for f in msg.fields: if f.name == field.name: self._fidx = i i += 1 self._val = 0 def getValue(self): return self._val def update_msg_value(self, vals): self._val = vals[self._fidx] class RandomChannel(FieldChannel): def getValue(self): return random.random() class Graph(rtgraph.HScrollLineGraph): def __init__(self, source, msg, field, double_buffer, ymin=0.0, ymax=1.0, width=150, height=50, rate=30): rtgraph.HScrollLineGraph.__init__(self, scrollRate=rate, size=(width,height), range=(ymin,ymax), autoScale=True, axisLabel=True, channels=[FieldChannel(msg, field)], doubleBuffer=double_buffer ) self._source = source self._source.register_interest(self._on_msg, 0, msg.name) def _on_msg(self, msg, header, payload): vals = msg.unpack_values(payload) for f in self.channels: f.update_msg_value(vals) def get_scroll_rate_widget(self): return self.getTweakControls()[0] def delete(self): self._source.unregister_interest(self._on_msg) class _GraphRange(gtk.VBox): def __init__(self, graph): gtk.VBox.__init__(self) graph.connect("range-changed", self._on_range_changed) mal = gtk.Label("Max:") self.maxadj = gtk.Adjustment() self._update_adjustment(self.maxadj) masb = gtk.SpinButton(self.maxadj) masb.props.digits = 1 self.maxadj.connect("value-changed", self._on_adj_changed, graph, 1) mil = gtk.Label("Min:") self.minadj = gtk.Adjustment() self._update_adjustment(self.minadj) misb = gtk.SpinButton(self.minadj) misb.props.digits = 1 self.minadj.connect("value-changed", self._on_adj_changed, graph, 0) self.pack_start(mal, False) self.pack_start(masb, False) self.pack_start(mil, False) self.pack_start(misb, False) def _update_adjustment(self, adj, value=0.0, lower=0.0, upper=0.0): adj.lower = lower adj.page_increment = 1.0 adj.step_increment = 0.1 adj.upper = upper adj.value = value def _on_range_changed(self, graph, min_, max_): self._update_adjustment(self.maxadj, value=max_, lower=min_, upper=(max_*1.5)) self._update_adjustment(self.minadj, value=min_, lower=(min_*1.5), upper=max_) def _on_adj_changed(self, adj, graph, idx): graph.handler_block_by_func(self._on_range_changed) graph.rescale(adj.get_value(), idx) graph.handler_unblock_by_func(self._on_range_changed) class GraphHolder(gtk.HBox): """ Composite widget holding a rtgraph and controls graph is a hbox: frame | [\___ ] | vertical buttons (pause, remove, etc) [ \ ] | range widgets """ def __init__(self, g, name, adjustable, on_pause, on_print, on_remove, on_fullscreen, on_log_data): gtk.HBox.__init__(self, spacing=5) self.graph = g frame = gtk.Frame(name) vb = gtk.VBox() vb.pack_start(g, True, True) tweak = None if adjustable: tweak = g.get_scroll_rate_widget() vb.pack_start(tweak.widget, False, False) frame.add(vb) self.pack_start(frame) vb = gtk.VBox() bbox = gtk.VButtonBox() bbox.set_layout(gtk.BUTTONBOX_END) vb.pack_start(bbox, True, True) if on_pause: b = gs.ui.get_button(stock=gtk.STOCK_MEDIA_PAUSE, xalign=0) b.connect("clicked", on_pause, tweak) bbox.pack_start(b, False, False) if on_print: b = gs.ui.get_button(stock=gtk.STOCK_PRINT, xalign=0) b.connect("clicked", on_print, g, name) bbox.pack_start(b, False, False) if on_remove: b = gs.ui.get_button(stock=gtk.STOCK_REMOVE, xalign=0) b.connect("clicked", on_remove, name) bbox.pack_start(b, False, False) if on_fullscreen: b = gs.ui.get_button(stock=gtk.STOCK_FULLSCREEN, xalign=0) b.connect("clicked", on_fullscreen, name) bbox.pack_start(b, False, False) if on_log_data: b = gs.ui.get_button("Log Message",image_stock=gtk.STOCK_FILE, xalign=0) b.connect("clicked", on_log_data, name) bbox.pack_start(b, False, False) if adjustable: r = _GraphRange(g) vb.pack_start(r, False, False) self.pack_start(vb, False, False) self.show_all() class GraphManager(config.ConfigurableIface): CONFIG_SECTION = "GRAPHMANAGER" def __init__(self, conf, source, messages, box, main_window): config.ConfigurableIface.__init__(self, conf) self._source = source self._messages = messages self._box = box self._main_window = main_window self._graphs = {} def _on_log_data(self, sender, name): self._source.register_csv_logger(None, name.split(':')[0]) def _on_pause(self, sender, tweakScrollRate): if tweakScrollRate: tweakScrollRate.setValue(0) tweakScrollRate.refresh() def _on_remove(self, sender, name): gh = self._graphs[name] gh.graph.delete() self._box.remove(gh) del(self._graphs[name]) def _on_print(self, sender, graph, name): def on_print_page(operation, context, page_nr): cr = context.get_cairo_context() graph.drawIntoCairoContext(cr, name=name) print_op = gtk.PrintOperation() print_op.set_n_pages(1) print_op.connect("draw_page", on_print_page) res = print_op.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, None) def _on_fs_window_closed(self, widget, event, name, btn): gh = self._graphs[name] gh.hide() gh.reparent(self._box) gh.show_all() btn.set_sensitive(True) def _on_fullscreen(self, btn, name): gh = self._graphs[name] w = gtk.Window() w.connect("delete-event", self._on_fs_window_closed, name, btn) w.set_title(name) gh.hide() gh.reparent(w) w.show_all() btn.set_sensitive(False) def update_state_from_config(self): num = self.config_get("num_graphs", 0) if num: LOG.info("Restoring %s graphs" % num) for i in range(0, int(num)): name = self.config_get("graph_%d" % i, ":") try: msg_name, field_name = name.split(":") if msg_name and field_name: msg = self._messages.get_message_by_name(msg_name) field = msg.get_field_by_name(field_name) if msg and field: self.add_graph(msg, field) except Exception: LOG.warn("Error adding graph", exc_info=True) def update_config_from_state(self): self.config_delete_keys_in_section() num = 0 for name in self._graphs: self.config_set("graph_%d" % num, name) num += 1 LOG.info("Saved %s graphs" % num) self.config_set("num_graphs", num) def add_graph(self, msg, field, adjustable=True, double_buffer=False): name = "%s:%s" % (msg.name, field.name) if name not in self._graphs: LOG.info("Adding graph: %s" % name) gh = GraphHolder( Graph(self._source, msg, field, double_buffer), name, adjustable, self._on_pause, self._on_print, self._on_remove, self._on_fullscreen, self._on_log_data) self._box.pack_start(gh) self._graphs[name] = gh
the-stack_0_4234
#!/usr/bin/env python3 # coding: utf8 """ Description: Using fasta files (scaffold/chromosme/contig file, protein file), gff file, annotation tsv file and the species name this script writes a genbank file. The annotation tsv file contains association between gene and annotation (EC number, GO term, Interpro) to add information to the genbank. The species name needs to be compatible with the taxonomy of the EBI. Informations need a good formating: gene ID should be correctly written (like XXX_001 and no XXX_1 if you got more thant 100 genes). Currently when there is multiple GO terms/InterPro/EC the script split them when they are separated by ";" or by "," like GO:0006979;GO:0020037;GO:0004601, if you use another separator add to the re.split(',|;'). For the gff file ensure that the element start position is at least 1. If it's 0 gffutils will return an error (source : https://github.com/daler/gffutils/issues/104). Other informations can be added by adding a dictionary with gene ID as key and the information as value and adapt the condition used for the others annotations (EC, Interpro, Go term). Usage: gbk_creator_from_gff.py -fg <Genome fasta file> -fp <Protein Fasta file> -a <Annotation TSV file> -g <GFF file> -s <Species name> -o <GBK Output file name> """ import argparse import datetime import gffutils import numpy as np import os import pandas as pa import pronto import re import requests import shutil from Bio import SeqFeature as sf from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from collections import OrderedDict try: from Bio.Alphabet import IUPAC except ImportError: IUPAC = None def merging_mini_gff(gff_folder): """ Merge multiple gff files into one. Return the path to the merged file. """ mini_gff_path = os.path.dirname(os.path.realpath(os.listdir(gff_folder)[0])) + "/" + gff_folder + "/" gff_merged_path = mini_gff_path + 'merged_gff.gff' with open(gff_merged_path, 'w') as gff_file_merged: gff_files = os.listdir(gff_folder) gff_files.remove('merged_gff.gff') for mini_gff in gff_files: with open(mini_gff_path + mini_gff, 'rb') as mini_gff_file: shutil.copyfileobj(mini_gff_file, gff_file_merged) return gff_merged_path def create_GO_dataframes(): """ Use pronto to query the Gene Ontology and to create the Ontology. Create a dataframe which contains for all GO terms their GO namespaces (molecular_function, ..). Create a second dataframe containing alternative ID for some GO terms (deprecated ones). """ go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo') # For each GO terms look to the namespaces associated with them. go_namespaces = {} for go_term in go_ontology: if 'GO:' in go_term: go_namespaces[go_term] = go_ontology[go_term].namespace df_go_namespace = pa.DataFrame.from_dict(go_namespaces, orient='index') df_go_namespace.reset_index(inplace=True) df_go_namespace.columns = ['GO', 'namespace'] # For each GO terms look if there is an alternative ID fo them. go_alt_ids = {} for go_term in go_ontology: if go_ontology[go_term].alternate_ids != frozenset(): for go_alt in go_ontology[go_term].alternate_ids: go_alt_ids[go_alt] = go_term df_go_alternative = pa.DataFrame.from_dict(go_alt_ids, orient='index') df_go_alternative.reset_index(inplace=True) df_go_alternative.columns = ['GO', 'alternative_GO'] return df_go_namespace, df_go_alternative def create_taxonomic_data(species_name): """ Query the EBI with the species name to create a dictionary containing taxon id, taxonomy and some other informations. """ species_informations = {} species_name_url = species_name.replace(' ', '%20') url = 'https://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/scientific-name/' + species_name_url response = requests.get(url) temp_species_informations = response.json()[0] for temp_species_information in temp_species_informations: if temp_species_information == 'lineage': species_informations['taxonomy'] = temp_species_informations[temp_species_information].split('; ')[:-1] elif temp_species_information == 'division': species_informations['data_file_division'] = temp_species_informations[temp_species_information] elif temp_species_information == 'taxId': species_informations['db_xref'] = 'taxon:' + str(temp_species_informations[temp_species_information]) else: species_informations[temp_species_information] = temp_species_informations[temp_species_information] compatible_species_name = species_name.replace('/', '_') species_informations['description'] = compatible_species_name + ' genome' species_informations['organism'] = compatible_species_name species_informations['keywords'] = [compatible_species_name] return species_informations def find_column_of_interest(df): ''' Gene column is supposed to be the first one. Detect columns containing GO number, EC number and Interpro ID. To do this, regular expression are used, for each types of data. The occurrence of each regular expression is counted. Then the column containing the maximum of occurrence for a type of data is associated with it by returning it's name. ''' columns = df.columns.tolist() gene_column = columns[0] go_number_expression = r"[FPC]?:?GO[:_][\d]{7}" ec_expression = r"[Ee]?[Cc]?:?[\d]{1}[\.]{1}[\d]{,2}[\.]{,1}[\d]{,2}[\.]{,1}[\d]{,3}" ipr_expression = r"IPR[\d]{6}" go_number_columns = {} ec_columns = {} ipr_columns = {} for column in columns: df[column] = df[column].astype(str) go_number_columns[column] = len(df[df[column].str.match(go_number_expression)]) ec_columns[column] = len(df[df[column].str.match(ec_expression)]) ipr_columns[column] = len(df[df[column].str.match(ipr_expression)]) if go_number_columns: go_number_column = max(go_number_columns, key=go_number_columns.get) go_column = go_number_column if ec_columns != []: ec_column = max(ec_columns, key=ec_columns.get) else: ec_column = np.nan if ipr_columns != []: ipr_column = max(ipr_columns, key=ipr_columns.get) else: ipr_column = np.nan return gene_column, go_column, ec_column, ipr_column def contig_info(contig_id, contig_seq, species_informations): """ Create contig information from species_informations dictionary and contig id and contig seq. """ record = SeqRecord(contig_seq, id=contig_id, name=contig_id, description=species_informations['description'], annotations={"molecule_type": "DNA"}) if IUPAC: record.seq.alphabet = IUPAC.ambiguous_dna if 'data_file_division' in species_informations: record.annotations['data_file_division'] = species_informations['data_file_division'] record.annotations['date'] = datetime.date.today().strftime('%d-%b-%Y').upper() if 'topology' in species_informations: record.annotations['topology'] = species_informations['topology'] record.annotations['accessions'] = contig_id if 'organism' in species_informations: record.annotations['organism'] = species_informations['organism'] # Use of literal_eval for taxonomy and keywords to retrieve list. if 'taxonomy' in species_informations: record.annotations['taxonomy'] = species_informations['taxonomy'] if 'keywords' in species_informations: record.annotations['keywords'] = species_informations['keywords'] if 'source' in species_informations: record.annotations['source'] = species_informations['source'] new_feature_source = sf.SeqFeature(sf.FeatureLocation(1-1, len(contig_seq)), type="source") new_feature_source.qualifiers['scaffold'] = contig_id if 'isolate' in species_informations: new_feature_source.qualifiers['isolate'] = species_informations['isolate'] # db_xref corresponds to the taxon NCBI ID. # Important if you want to use Pathway Tools after. if 'db_xref' in species_informations: new_feature_source.qualifiers['db_xref'] = species_informations['db_xref'] if 'cell_type' in species_informations: new_feature_source.qualifiers['cell_type'] = species_informations['cell_type'] if 'dev_stage' in species_informations: new_feature_source.qualifiers['dev_stage'] = species_informations['dev_stage'] if 'mol_type' in species_informations: new_feature_source.qualifiers['mol_type'] = species_informations['mol_type'] record.features.append(new_feature_source) return record def strand_change(input_strand): """ The input is strand in str ('-', '+') modify it to be a strand in int (-1, +1) to be compatible with SeqIO strand reading. """ if isinstance(input_strand, str): if input_strand == '-': new_strand = -1 elif input_strand == '+': new_strand = +1 if input_strand == '.': new_strand = None elif input_strand == '?': new_strand = 0 elif isinstance(input_strand, int): if input_strand == -1: new_strand = input_strand elif input_strand == +1: new_strand = input_strand return new_strand def search_and_add_RNA(gff_database, gene_informations, record, type_RNA): """ Search in the gff_database if the gene have RNA of the (type_RNA). For the RNA it will add a feature to the contig record of the genbank. Then it returns the contig record. gene_informations contain: [0] -> gene feature [1] -> gene ID cleaned [2] -> gene start position [3] -> gene end postion [4] -> gene strand modified (str -> int) """ for rna in gff_database.children(gene_informations[0], featuretype=type_RNA, order_by='start'): new_feature_RNA = sf.SeqFeature(sf.FeatureLocation(gene_informations[2], gene_informations[3], gene_informations[4]), type=type_RNA) new_feature_RNA.qualifiers['locus_tag'] = gene_informations[1] record.features.append(new_feature_RNA) return record def search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq): """ Search in the gff_database if the gene is a pseudogene. Add it to the record. """ location_exons = [] for pseudogene in gff_database.children(gene, featuretype="pseudogene", order_by='start'): # Select exon corresponding to the gene. # Then iterate for each exon and extract information. df_temp = df_exons[df_exons['gene_id'] == pseudogene.id] for _, row in df_temp.iterrows(): new_feature_location_exons = sf.FeatureLocation(row['start'], row['end'], row['strand']) location_exons.append(new_feature_location_exons) if location_exons and len(location_exons)>=2: exon_compound_locations = sf.CompoundLocation(location_exons, operator='join') new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS') else: start_position = gene.start -1 end_position = gene.end strand = strand_change(gene.strand) new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="CDS") new_feature_cds.qualifiers['translation'] = gene_protein_seq[pseudogene.id] new_feature_cds.qualifiers['locus_tag'] = gene.id new_feature_cds.qualifiers['pseudo'] = None record.features.append(new_feature_cds) return record def gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out): """ From a genome fasta (containing each contigs of the genome), a protein fasta (containing each protein sequence), an annotation table (containing gene name associated with GO terms, InterPro and EC), a gff file (containing gene, exon, mRNA, ncRNA, tRNA), a contig information table (containing species name, taxon ID, ..) create a genbank file. """ print('Creating GFF database (gffutils)') # Create the gff database file. # gffutils use sqlite3 file-based database to access data inside GFF. # ':memory:' ask gffutils to keep database in memory instead of writting in a file. gff_database = gffutils.create_db(gff_file, ':memory:', force=True, keep_order=True, merge_strategy='merge', sort_attribute_values=True) # Length of your gene ID. # Catch it in the GFF database. # It's pretty dumb as we go into a loop for one information. # But I don't find another way to catch the length of gene_id. length_gene_id = 0 for gene in gff_database.features_of_type('gene'): length_gene_id = len(gene.id.replace('gene:', '')) break # Get the longest contig ID to check if all contig IDs have the # same length, if not add 0 (at the supposed position of the number). longest_contig_id = "" for contig_for_length_id in gff_database.features_of_type('sequence_assembly'): if len(longest_contig_id) < len(contig_for_length_id.id): longest_contig_id = contig_for_length_id.id print('Formatting fasta and annotation file') # Dictionary with scaffold/chromosome id as key and sequence as value. contig_seqs = OrderedDict() for record in SeqIO.parse(genome_fasta, "fasta"): id_contig = record.id contig_seqs[id_contig] = record.seq # Dictionary with gene id as key and protein sequence as value. gene_protein_seq = {} for record in SeqIO.parse(prot_fasta, "fasta"): gene_protein_seq[record.id] = record.seq # Create a taxonomy dictionary querying the EBI. species_informations = create_taxonomic_data(species_name) # Read a tsv file containing GO terms, Interpro and EC associated with gene name. mapping_data = pa.read_csv(annot_table, sep='\t') mapping_data.replace(np.nan, '', inplace=True) gene_column, go_column, ec_column, ipr_column = find_column_of_interest(mapping_data) mapping_data.set_index(gene_column, inplace=True) # Dictionary with gene id as key and GO terms/Interpro/EC as value. annot_GOs = mapping_data[go_column].to_dict() annot_IPRs = mapping_data[ipr_column].to_dict() annot_ECs = mapping_data[ec_column].to_dict() # Query Gene Ontology to extract namespaces and alternative IDs. df_go_namespace, df_go_alternative = create_GO_dataframes() # Dictionary GO id as term and GO namespace as value. df_go_namespace.set_index('GO', inplace=True) go_namespaces = df_go_namespace['namespace'].to_dict() # Dictionary GO id as term and GO alternatives id as value. df_go_alternative.set_index('GO', inplace=True) go_alternatives = df_go_alternative['alternative_GO'].to_dict() # Create a dataframe containing each exon with informations (gene, start, end and strand) df_exons = pa.DataFrame(columns=['exon_id', 'gene_id', 'start', 'end', 'strand']) print('Searching for exons') temporary_datas = [] # Search for all exons in gff database and extract start position (have to minus one to get the right position) # the end position, the strand (have to change from str to int) and the gene ID. # Then add it to a list of dictionary that will be added to the dataframe. for exon in gff_database.features_of_type('exon'): start_position = exon.start - 1 end_position = exon.end strand = strand_change(exon.strand) gene_id = exon.id.replace('exon:', '')[:-2] temporary_datas.append({'exon_id': exon.id, 'gene_id': gene_id, 'start': start_position, 'end':end_position, 'strand': strand}) df_exons = df_exons.append(temporary_datas) # All SeqRecord objects will be stored in a list and then give to the SeqIO writer to create the genbank. seq_objects = [] print('Assembling Genbank informations') # Iterate through each contig. # Then iterate through gene and throug RNA linked with the gene. # Then look if protein informations are available. for contig_id in sorted(contig_seqs): # Data for each contig. record = contig_info(contig_id, contig_seqs[contig_id], species_informations) for gene in gff_database.features_of_type('gene'): gene_contig = gene.chrom if gene_contig == contig_id: id_gene = gene.id start_position = gene.start -1 end_position = gene.end strand = strand_change(gene.strand) new_feature_gene = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="gene") new_feature_gene.qualifiers['locus_tag'] = id_gene # Add gene information to contig record. record.features.append(new_feature_gene) # Search and add RNAs. gene_informations = [gene, id_gene, start_position, end_position, strand] record = search_and_add_RNA(gff_database, gene_informations, record, 'mRNA') record = search_and_add_RNA(gff_database, gene_informations, record,'tRNA') record = search_and_add_RNA(gff_database, gene_informations, record, 'ncRNA') record = search_and_add_RNA(gff_database, gene_informations, record, 'lncRNA') # Search for pseudogene and add them. record = search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq) # Create CDS using exons, if no exon use gene information location_exons = [] # Use parent mRNA in gff to find CDS. # With this we take the isoform of gene. for mrna in gff_database.children(gene, featuretype="mRNA", order_by='start'): mrna_id = mrna.id # Select exon corresponding to the gene. # Then iterate for each exon and extract information. df_temp = df_exons[df_exons['gene_id'] == mrna_id] for _, row in df_temp.iterrows(): new_feature_location_exons = sf.FeatureLocation(row['start'], row['end'], row['strand']) location_exons.append(new_feature_location_exons) if location_exons and len(location_exons)>=2: exon_compound_locations = sf.CompoundLocation(location_exons, operator='join') new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS') else: new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="CDS") new_feature_cds.qualifiers['translation'] = gene_protein_seq[mrna_id] new_feature_cds.qualifiers['locus_tag'] = id_gene # Add GO annotation according to the namespace. if mrna_id in annot_GOs: gene_gos = re.split(';|,', annot_GOs[mrna_id]) if gene_gos != [""]: go_components = [] go_functions = [] go_process = [] for go in gene_gos: # Check if GO term is not a deprecated one. # If yes take the corresponding one in alternative GO. if go not in go_namespaces: go_test = go_alternatives[go] else: go_test = go if go_namespaces[go_test] == 'cellular_component': go_components.append(go) if go_namespaces[go_test] == 'molecular_function': go_functions.append(go) if go_namespaces[go_test] == 'biological_process': go_process.append(go) new_feature_cds.qualifiers['go_component'] = go_components new_feature_cds.qualifiers['go_function'] = go_functions new_feature_cds.qualifiers['go_process'] = go_process # Add InterPro annotation. if mrna_id in annot_IPRs: gene_iprs = re.split(';|,', annot_IPRs[mrna_id]) if gene_iprs != [""]: new_feature_cds.qualifiers['db_xref'] = ["InterPro:"+interpro for interpro in gene_iprs] # Add EC annotation. if mrna_id in annot_ECs: gene_ecs = re.split(';|,', annot_ECs[mrna_id]) if gene_ecs != [""]: new_feature_cds.qualifiers['EC_number'] = [ec.replace('ec:', '') for ec in gene_ecs] # Add CDS information to contig record record.features.append(new_feature_cds) seq_objects.append(record) # Create Genbank with the list of SeqRecord. SeqIO.write(seq_objects, gbk_out, 'genbank') def main(genome_fasta, prot_fasta, annot_table, gff_file_folder, species_name, gbk_out): # Check if gff is a file or is multiple files in a folder. # If it's multiple files, it wil merge them in one. if os.path.isfile(gff_file_folder): gff_file = gff_file_folder if not os.path.isfile(gff_file_folder): gff_file = merging_mini_gff(gff_file_folder) gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out) def run(): parser = argparse.ArgumentParser(prog = "gbk_creator_from_gff.py") parser.add_argument("-fg", "--fgen", dest = "genome_fasta", metavar = "FILE", help = "contig fasta file", required = True) parser.add_argument("-fp", "--fprot", dest = "prot_fasta", metavar = "FILE", help = "protein fasta file", required = True) parser.add_argument("-a", "--annot", dest = "annot_table", metavar = "FILE", help = "annotation tsv file", required = True) parser.add_argument("-g", "--gff", dest = "gff_file_folder", metavar = "FILE or FOLDER", help = "gff file or folder containing multiple gff", required = True) parser.add_argument("-s", "--speciesname", dest = "species_name", metavar = "STRING", help = "species scientific name", required = True) parser.add_argument("-o", "--output", dest = "gbk_out", metavar = "FILE", help = "output file", default = "mygbk.gbk") args = parser.parse_args() main(genome_fasta=args.genome_fasta, prot_fasta=args.prot_fasta, annot_table=args.annot_table, gff_file_folder=args.gff_file_folder, species_name=args.species_name, gbk_out=args.gbk_out) if __name__ == '__main__': run()
the-stack_0_4235
#! /usr/bin/env python # -*- coding: utf-8 -*- from django.conf.urls import url, include from config import views urlpatterns = [ url(r'^$', views.index, name='config'), url(r'^config_save/$', views.config_save, name='config_save'), url(r'^token/', views.get_token, name='token'), ]
the-stack_0_4236
from functools import partial import pandas as pd from cellphonedb.src.core.core_logger import core_logger from cellphonedb.src.core.exceptions.AllCountsFilteredException import AllCountsFilteredException from cellphonedb.src.core.exceptions.NoInteractionsFound import NoInteractionsFound from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper def call(meta: pd.DataFrame, counts: pd.DataFrame, counts_data: str, interactions: pd.DataFrame, genes: pd.DataFrame, complexes: pd.DataFrame, complex_compositions: pd.DataFrame, pvalue: float, separator: str, iterations: int = 1000, threshold: float = 0.1, threads: int = 4, debug_seed: int = -1, result_precision: int = 3, ) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame): core_logger.info( '[Cluster Statistical Analysis] ' 'Threshold:{} Iterations:{} Debug-seed:{} Threads:{} Precision:{}'.format(threshold, iterations, debug_seed, threads, result_precision)) if debug_seed >= 0: pd.np.random.seed(debug_seed) core_logger.warning('Debug random seed enabled. Setted to {}'.format(debug_seed)) cells_names = sorted(counts.columns) interactions.set_index('id_interaction', drop=True, inplace=True) interactions_reduced = interactions[['multidata_1_id', 'multidata_2_id']].drop_duplicates() complex_compositions.set_index('id_complex_composition', inplace=True, drop=True) # Add id multidata to counts input counts: pd.DataFrame = counts.merge(genes[['id_multidata', 'ensembl', 'gene_name', 'hgnc_symbol']], left_index=True, right_on=counts_data) counts_relations = counts[['id_multidata', 'ensembl', 'gene_name', 'hgnc_symbol']].copy() counts.set_index('id_multidata', inplace=True, drop=True) counts = counts[cells_names] counts = counts.astype('float32') counts = counts.groupby(counts.index).mean() if counts.empty: raise AllCountsFilteredException(hint='Are you using human data?') # End add id multidata interactions_filtered, counts_filtered, complex_composition_filtered = \ cpdb_statistical_analysis_helper.prefilters(interactions_reduced, counts, complexes, complex_compositions) if interactions_filtered.empty: raise NoInteractionsFound() clusters = cpdb_statistical_analysis_helper.build_clusters(meta, counts_filtered, complex_composition_filtered) core_logger.info('Running Real Analysis') cluster_interactions = cpdb_statistical_analysis_helper.get_cluster_combinations(clusters['names']) base_result = cpdb_statistical_analysis_helper.build_result_matrix(interactions_filtered, cluster_interactions, separator) real_mean_analysis = cpdb_statistical_analysis_helper.mean_analysis(interactions_filtered, clusters, cluster_interactions, base_result, separator) real_percents_analysis = cpdb_statistical_analysis_helper.percent_analysis(clusters, threshold, interactions_filtered, cluster_interactions, base_result, separator) core_logger.info('Running Statistical Analysis') statistical_mean_analysis = cpdb_statistical_analysis_helper.shuffled_analysis(iterations, meta, counts_filtered, interactions_filtered, cluster_interactions, complex_composition_filtered, base_result, threads, separator) result_percent = cpdb_statistical_analysis_helper.build_percent_result(real_mean_analysis, real_percents_analysis, statistical_mean_analysis, interactions_filtered, cluster_interactions, base_result, separator) pvalues_result, means_result, significant_means, deconvoluted_result = build_results( interactions_filtered, interactions, counts_relations, real_mean_analysis, result_percent, clusters['means'], complex_composition_filtered, counts, genes, result_precision, pvalue, counts_data ) return pvalues_result, means_result, significant_means, deconvoluted_result def build_results(interactions: pd.DataFrame, interactions_original: pd.DataFrame, counts_relations: pd.DataFrame, real_mean_analysis: pd.DataFrame, result_percent: pd.DataFrame, clusters_means: pd.DataFrame, complex_compositions: pd.DataFrame, counts: pd.DataFrame, genes: pd.DataFrame, result_precision: int, pvalue: float, counts_data: str ) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame): """ Sets the results data structure from method generated data. Results documents are defined by specs. """ core_logger.info('Building results') interactions: pd.DataFrame = interactions_original.loc[interactions.index] interactions['interaction_index'] = interactions.index interactions = interactions.merge(counts_relations, how='left', left_on='multidata_1_id', right_on='id_multidata', ) interactions = interactions.merge(counts_relations, how='left', left_on='multidata_2_id', right_on='id_multidata', suffixes=('_1', '_2')) interactions.set_index('interaction_index', inplace=True, drop=True) interacting_pair = cpdb_statistical_analysis_helper.interacting_pair_build(interactions) def simple_complex_indicator(interaction: pd.Series, suffix: str) -> str: """ Add simple/complex prefixes to interaction components """ if interaction['is_complex{}'.format(suffix)]: return 'complex:{}'.format(interaction['name{}'.format(suffix)]) return 'simple:{}'.format(interaction['name{}'.format(suffix)]) interactions['partner_a'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_1'), axis=1) interactions['partner_b'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_2'), axis=1) significant_mean_rank, significant_means = cpdb_statistical_analysis_helper.build_significant_means( real_mean_analysis, result_percent, pvalue) significant_means = significant_means.round(result_precision) gene_columns = ['{}_{}'.format(counts_data, suffix) for suffix in ('1', '2')] gene_renames = {column: 'gene_{}'.format(suffix) for column, suffix in zip(gene_columns, ['a', 'b'])} # Remove useless columns interactions_data_result = pd.DataFrame( interactions[['id_cp_interaction', 'partner_a', 'partner_b', 'receptor_1', 'receptor_2', *gene_columns, 'annotation_strategy']].copy()) interactions_data_result = pd.concat([interacting_pair, interactions_data_result], axis=1, sort=False) interactions_data_result['secreted'] = (interactions['secreted_1'] | interactions['secreted_2']) interactions_data_result['is_integrin'] = (interactions['integrin_1'] | interactions['integrin_2']) interactions_data_result.rename( columns={**gene_renames, 'receptor_1': 'receptor_a', 'receptor_2': 'receptor_b'}, inplace=True) # Dedupe rows and filter only desired columns interactions_data_result.drop_duplicates(inplace=True) means_columns = ['id_cp_interaction', 'interacting_pair', 'partner_a', 'partner_b', 'gene_a', 'gene_b', 'secreted', 'receptor_a', 'receptor_b', 'annotation_strategy', 'is_integrin'] interactions_data_result = interactions_data_result[means_columns] real_mean_analysis = real_mean_analysis.round(result_precision) significant_means = significant_means.round(result_precision) # Round result decimals for key, cluster_means in clusters_means.items(): clusters_means[key] = cluster_means.round(result_precision) # Document 1 pvalues_result = pd.concat([interactions_data_result, result_percent], axis=1, join='inner', sort=False) # Document 2 means_result = pd.concat([interactions_data_result, real_mean_analysis], axis=1, join='inner', sort=False) # Document 3 significant_means_result = pd.concat([interactions_data_result, significant_mean_rank, significant_means], axis=1, join='inner', sort=False) # Document 5 deconvoluted_result = deconvoluted_complex_result_build(clusters_means, interactions, complex_compositions, counts, genes, counts_data) return pvalues_result, means_result, significant_means_result, deconvoluted_result def deconvoluted_complex_result_build(clusters_means: pd.DataFrame, interactions: pd.DataFrame, complex_compositions: pd.DataFrame, counts: pd.DataFrame, genes: pd.DataFrame, counts_data: str) -> pd.DataFrame: genes_counts = list(counts.index) genes_filtered = genes[genes['id_multidata'].apply(lambda gene: gene in genes_counts)] deconvoluted_complex_result_1 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, '_1', counts_data) deconvoluted_simple_result_1 = deconvolute_interaction_component(interactions, '_1', counts_data) deconvoluted_complex_result_2 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, '_2', counts_data) deconvoluted_simple_result_2 = deconvolute_interaction_component(interactions, '_2', counts_data) deconvoluted_result = deconvoluted_complex_result_1.append( [deconvoluted_simple_result_1, deconvoluted_complex_result_2, deconvoluted_simple_result_2], sort=False) deconvoluted_result.set_index('multidata_id', inplace=True, drop=True) deconvoluted_columns = ['gene_name', 'name', 'is_complex', 'protein_name', 'complex_name', 'id_cp_interaction', 'gene'] deconvoluted_result = deconvoluted_result[deconvoluted_columns] deconvoluted_result.rename({'name': 'uniprot'}, axis=1, inplace=True) deconvoluted_result = pd.concat([deconvoluted_result, clusters_means], axis=1, join='inner', sort=False) deconvoluted_result.set_index('gene', inplace=True, drop=True) deconvoluted_result.drop_duplicates(inplace=True) return deconvoluted_result def deconvolute_interaction_component(interactions, suffix, counts_data): interactions = interactions[~interactions['is_complex{}'.format(suffix)]] deconvoluted_result = pd.DataFrame() deconvoluted_result['gene'] = interactions['{}{}'.format(counts_data, suffix)] deconvoluted_result[ ['multidata_id', 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor']] = \ interactions[ ['multidata{}_id'.format(suffix), 'protein_name{}'.format(suffix), 'gene_name{}'.format(suffix), 'name{}'.format(suffix), 'is_complex{}'.format(suffix), 'id_cp_interaction', 'receptor{}'.format(suffix)]] deconvoluted_result['complex_name'] = pd.np.nan return deconvoluted_result def deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, suffix, counts_data): return_properties = [counts_data, 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor', 'complex_name'] if complex_compositions.empty: return pd.DataFrame( columns=return_properties) deconvoluted_result = pd.DataFrame() component = pd.DataFrame() component[counts_data] = interactions['{}{}'.format(counts_data, suffix)] component[[counts_data, 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'id_multidata', 'receptor']] = \ interactions[['{}{}'.format(counts_data, suffix), 'protein_name{}'.format(suffix), 'gene_name{}'.format(suffix), 'name{}'.format(suffix), 'is_complex{}'.format(suffix), 'id_cp_interaction', 'multidata{}_id'.format(suffix), 'receptor{}'.format(suffix)]] deconvolution_complex = pd.merge(complex_compositions, component, left_on='complex_multidata_id', right_on='id_multidata') deconvolution_complex = pd.merge(deconvolution_complex, genes_filtered, left_on='protein_multidata_id', right_on='protein_multidata_id', suffixes=['_complex', '_simple']) deconvoluted_result['gene'] = deconvolution_complex['{}_simple'.format(counts_data)] deconvoluted_result[ ['multidata_id', 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor', 'complex_name']] = \ deconvolution_complex[ ['complex_multidata_id', 'protein_name_simple', 'gene_name_simple', 'name_simple', 'is_complex_complex', 'id_cp_interaction', 'receptor_simple', 'name_complex']] return deconvoluted_result
the-stack_0_4237
#!/usr/bin/python # -*- coding:utf-8 -*- """ CNN/Convnets/Convolutional neural networks keras tensorflow """ from keras.datasets import cifar10 from keras.models import Sequential from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers import Dense from keras.constraints import maxnorm from keras.layers import Dropout from keras.layers import Flatten from keras.utils import np_utils from keras.optimizers import SGD def train(): """ 训练 """ epochs = 10 lrate = 0.01 decay = lrate/epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model = create_model() model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(model.summary()) model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=epochs, batch_size=32) return model def create_model(): """ 创建模型 """ model = Sequential() # 52.59%的准确率(3, 32, 32) #model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(3, 32, 32), padding = "same", kernel_constraint=maxnorm(3))) #model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(3, 32, 32), padding="same", kernel_constraint=maxnorm(3))) # 68.28%的准确率(32, 32, 3) model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3), padding = "same", kernel_constraint=maxnorm(3))) model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3), padding="same", kernel_constraint=maxnorm(3))) # 防止过拟合 # model.add(Dropout(0.2)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3))) # model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) return model if __name__ == '__main__': # 加载测试数据,一堆32*32*3的图片 # 10类 # 50000训练数据 # 10000测试数据 (X_train,Y_train),(X_test,Y_test)=cifar10.load_data() print(X_train.shape) print(X_test.shape) # 将数据转换为0-1的浮点数 X_train=X_train/255.0 X_test=X_test/255.0 # 将Y转换为标签矩阵 # 属于哪一类,哪一列为1,其余为0 Y_train=np_utils.to_categorical(Y_train) Y_test=np_utils.to_categorical(Y_test) # reshape for tf #X_train = X_train.reshape(X_train.shape[0], 3, 32, 32) #X_test = X_test.reshape(-1, 3, 32, 32) # 训练 model = train() # 模型准群率评估 # 52.59%的准确率(3, 32, 32) # 68.28%的准确率(32, 32, 3) scores = model.evaluate(X_test, Y_test, verbose=0) print("Final Accuracy: %.2f%%" % (scores[1]*100)) #保存模型及训练结果 jsonFile=model.to_json() with open('output/cifar10.json','w') as file: file.write(jsonFile) model.save_weights('output/cifar10.h5')
the-stack_0_4238
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2014, Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- # Author: Nicolas P .Rougier # Date: 04/03/2014 # ----------------------------------------------------------------------------- from vispy import gloo, app from vispy.gloo import Program vertex = """ uniform float theta; attribute vec4 color; attribute vec2 position; varying vec4 v_color; void main() { float ct = cos(theta); float st = sin(theta); float x = 0.75* (position.x*ct - position.y*st); float y = 0.75* (position.x*st + position.y*ct); gl_Position = vec4(x, y, 0.0, 1.0); v_color = color; } """ fragment = """ varying vec4 v_color; void main() { gl_FragColor = v_color; } """ class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, size=(512, 512), title='Rotating quad', close_keys='escape') self.timer = app.Timer(1./60., self.on_timer) def on_initialize(self, event): # Build program & data self.program = Program(vertex, fragment, count=4) self.program['color'] = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1)] self.program['position'] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)] self.clock = 0 self.timer.start() def on_draw(self, event): gloo.set_clear_color('white') gloo.clear(color=True) self.program.draw('triangle_strip') def on_resize(self, event): gloo.set_viewport(0, 0, *event.size) def on_timer(self, event): self.clock += 0.001 * 1000.0 / 60. self.program['theta'] = self.clock self.update() if __name__ == '__main__': c = Canvas() c.show() app.run()
the-stack_0_4240
from icalendar import vCalAddress from app.config import ICAL_VERSION, PRODUCT_ID from app.routers.export import ( create_ical_calendar, create_ical_event, event_to_ical ) class TestExport: def test_create_ical_calendar(self): cal = create_ical_calendar() assert cal.get('version') == ICAL_VERSION assert cal.get('prodid') == PRODUCT_ID def test_create_ical_event(self, event): ical_event = create_ical_event(event) assert event.owner.email in ical_event.get('organizer') assert ical_event.get('summary') == event.title def test_add_attendees(self, event, user): ical_event = create_ical_event(event) ical_event.add( 'attendee', vCalAddress(f'MAILTO:{user.email}'), encode=0 ) attendee = vCalAddress(f'MAILTO:{user.email}') assert attendee == ical_event.get('attendee') def test_event_to_ical(self, user, event): ical_event = event_to_ical(event, [user.email]) def does_contain(item: str) -> bool: """Returns if calendar contains item.""" return bytes(item, encoding='utf8') in bytes(ical_event) assert does_contain(ICAL_VERSION) assert does_contain(PRODUCT_ID) assert does_contain(event.owner.email) assert does_contain(event.title)
the-stack_0_4242
# step 1. imports from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, ForeignKey, Float, DateTime) from sqlalchemy.orm import sessionmaker, mapper, relationship from sqlalchemy.ext.horizontal_shard import ShardedSession from sqlalchemy.sql import operators, visitors import datetime # step 2. databases echo = True db1 = create_engine('sqlite://', echo=echo) db2 = create_engine('sqlite://', echo=echo) db3 = create_engine('sqlite://', echo=echo) db4 = create_engine('sqlite://', echo=echo) # step 3. create session function. this binds the shard ids # to databases within a ShardedSession and returns it. create_session = sessionmaker(class_=ShardedSession) create_session.configure(shards={ 'north_america':db1, 'asia':db2, 'europe':db3, 'south_america':db4 }) # step 4. table setup. meta = MetaData() # we need a way to create identifiers which are unique across all # databases. one easy way would be to just use a composite primary key, where one # value is the shard id. but here, we'll show something more "generic", an # id generation function. we'll use a simplistic "id table" stored in database # #1. Any other method will do just as well; UUID, hilo, application-specific, etc. ids = Table('ids', meta, Column('nextid', Integer, nullable=False)) def id_generator(ctx): # in reality, might want to use a separate transaction for this. c = db1.connect() nextid = c.execute(ids.select(for_update=True)).scalar() c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1})) return nextid # table setup. we'll store a lead table of continents/cities, # and a secondary table storing locations. # a particular row will be placed in the database whose shard id corresponds to the # 'continent'. in this setup, secondary rows in 'weather_reports' will # be placed in the same DB as that of the parent, but this can be changed # if you're willing to write more complex sharding functions. weather_locations = Table("weather_locations", meta, Column('id', Integer, primary_key=True, default=id_generator), Column('continent', String(30), nullable=False), Column('city', String(50), nullable=False) ) weather_reports = Table("weather_reports", meta, Column('id', Integer, primary_key=True), Column('location_id', Integer, ForeignKey('weather_locations.id')), Column('temperature', Float), Column('report_time', DateTime, default=datetime.datetime.now), ) # create tables for db in (db1, db2, db3, db4): meta.drop_all(db) meta.create_all(db) # establish initial "id" in db1 db1.execute(ids.insert(), nextid=1) # step 5. define sharding functions. # we'll use a straight mapping of a particular set of "country" # attributes to shard id. shard_lookup = { 'North America':'north_america', 'Asia':'asia', 'Europe':'europe', 'South America':'south_america' } def shard_chooser(mapper, instance, clause=None): """shard chooser. looks at the given instance and returns a shard id note that we need to define conditions for the WeatherLocation class, as well as our secondary Report class which will point back to its WeatherLocation via its 'location' attribute. """ if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location) def id_chooser(query, ident): """id chooser. given a primary key, returns a list of shards to search. here, we don't have any particular information from a pk so we just return all shard ids. often, youd want to do some kind of round-robin strategy here so that requests are evenly distributed among DBs. """ return ['north_america', 'asia', 'europe', 'south_america'] def query_chooser(query): """query chooser. this also returns a list of shard ids, which can just be all of them. but here we'll search into the Query in order to try to narrow down the list of shards to query. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids for column, operator, value in _get_query_comparisons(query): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object # and occur when using ORM-mapped attributes like # "WeatherLocation.continent"). A simpler comparison, though less accurate, # would be "column.key == 'continent'". if column.shares_lineage(weather_locations.c.continent): if operator == operators.eq: ids.append(shard_lookup[value]) elif operator == operators.in_op: ids.extend(shard_lookup[v] for v in value) if len(ids) == 0: return ['north_america', 'asia', 'europe', 'south_america'] else: return ids def _get_query_comparisons(query): """Search an orm.Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form (column, operator, values). "values" is a single value or tuple of values depending on the operator. """ binds = {} clauses = set() comparisons = [] def visit_bindparam(bind): # visit a bind parameter. Below we ensure # that we get the value whether it was specified # as part of query.params(), or is directly embedded # in the bind's "value" attribute. value = query._params.get(bind.key, bind.value) # some ORM functions place the bind's value as a # callable for deferred evaulation. Get that # actual value here. if callable(value): value = value() binds[bind] = value def visit_column(column): clauses.add(column) def visit_binary(binary): # special handling for "col IN (params)" if binary.left in clauses and \ binary.operator == operators.in_op and \ hasattr(binary.right, 'clauses'): comparisons.append( (binary.left, binary.operator, tuple(binds[bind] for bind in binary.right.clauses) ) ) elif binary.left in clauses and binary.right in binds: comparisons.append( (binary.left, binary.operator,binds[binary.right]) ) elif binary.left in binds and binary.right in clauses: comparisons.append( (binary.right, binary.operator,binds[binary.left]) ) # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. if query._criterion is not None: visitors.traverse_depthfirst(query._criterion, {}, {'bindparam':visit_bindparam, 'binary':visit_binary, 'column':visit_column } ) return comparisons # further configure create_session to use these functions create_session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, query_chooser=query_chooser ) # step 6. mapped classes. class WeatherLocation(object): def __init__(self, continent, city): self.continent = continent self.city = city class Report(object): def __init__(self, temperature): self.temperature = temperature # step 7. mappers mapper(WeatherLocation, weather_locations, properties={ 'reports':relationship(Report, backref='location') }) mapper(Report, weather_reports) # save and load objects! tokyo = WeatherLocation('Asia', 'Tokyo') newyork = WeatherLocation('North America', 'New York') toronto = WeatherLocation('North America', 'Toronto') london = WeatherLocation('Europe', 'London') dublin = WeatherLocation('Europe', 'Dublin') brasilia = WeatherLocation('South America', 'Brasila') quito = WeatherLocation('South America', 'Quito') tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) quito.reports.append(Report(85)) sess = create_session() for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]: sess.add(c) sess.flush() sess.expunge_all() t = sess.query(WeatherLocation).get(tokyo.id) assert t.city == tokyo.city assert t.reports[0].temperature == 80.0 north_american_cities = sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America') assert [c.city for c in north_american_cities] == ['New York', 'Toronto'] asia_and_europe = sess.query(WeatherLocation).filter(WeatherLocation.continent.in_(['Europe', 'Asia'])) assert set([c.city for c in asia_and_europe]) == set(['Tokyo', 'London', 'Dublin'])
the-stack_0_4243
from setuptools import setup import os VERSION = "2.8.3" def get_long_description(): with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"), encoding="utf8", ) as fp: return fp.read() setup( name="github-to-sqlite", description="Save data from GitHub to a SQLite database", long_description=get_long_description(), long_description_content_type="text/markdown", author="Simon Willison", url="https://github.com/dogsheep/github-to-sqlite", license="Apache License, Version 2.0", version=VERSION, packages=["github_to_sqlite"], entry_points=""" [console_scripts] github-to-sqlite=github_to_sqlite.cli:cli """, install_requires=["sqlite-utils>=2.7.2", "requests", "PyYAML"], extras_require={"test": ["pytest", "requests-mock", "bs4"]}, tests_require=["github-to-sqlite[test]"], )
the-stack_0_4244
class Team: def __init__(self, NO): self.NO = NO self.fighter_list = None self.order = None # previous index of the order self.fight_cnt = 0 @property def fighter_list(self): return self._fighter_list @fighter_list.setter def fighter_list(self, fighter_list): self._fighter_list = fighter_list def set_order(self, order): self.order = [] for a_order in order: self.order.append(int(a_order)) self.fight_cnt = 0 def get_next_fighter(self): if self.fight_cnt >= len(self.order): return None prev_fighter_idx = self.order[self.fight_cnt] fighter = None for _fighter in self.fighter_list: if _fighter.properties["NO"] == prev_fighter_idx: fighter = _fighter break self.fight_cnt += 1 return fighter
the-stack_0_4245
import asyncio import logging import signal import sys from functools import partial from typing import Union, List, Callable, Tuple import serial from bleak import BleakClient from serial_asyncio import open_serial_connection from genki_wave.callbacks import WaveCallback from genki_wave.constants import API_CHAR_UUID, BAUDRATE from genki_wave.data.writing import get_start_api_package from genki_wave.protocols import ProtocolAsyncio, ProtocolThread, CommunicateCancel from genki_wave.utils import get_serial_port, get_or_create_event_loop logging.basicConfig(format="%(levelname).4s:%(asctime)s [%(filename)s:%(lineno)d] - %(message)s ") logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def prepare_protocol_as_bleak_callback_asyncio(protocol: ProtocolAsyncio) -> Callable: async def _inner(sender: str, data: bytearray) -> None: # NOTE: `bleak` expects a function with this signature await protocol.data_received(data) return _inner def prepare_protocol_as_bleak_callback(protocol: ProtocolThread) -> Callable: def _inner(sender: str, data: bytearray) -> None: # NOTE: `bleak` expects a function with this signature protocol.data_received(data) return _inner def bleak_callback(protocol: ProtocolAsyncio) -> Callable: """Wraps our protocol as a callback with the correct signature bleak expects NOTE: 1) Bleak checks if a function is a co-routine so we need to wrap the class method into an `async` function and 2) we need to take care that `asyncio.Queue` is correctly handled so we have 2 different wrappers, one for a regular `queue.Queue` and one for `asyncio.Queue`. """ if isinstance(protocol, ProtocolAsyncio): callback = prepare_protocol_as_bleak_callback_asyncio(protocol) elif isinstance(protocol, ProtocolThread): callback = prepare_protocol_as_bleak_callback(protocol) else: raise ValueError(f"Unknown protocol type {type(protocol)}") return callback def make_disconnect_callback(comm: CommunicateCancel): def cb(client): if not comm.cancel: print(f"Client {client.address} disconnected unexpectedly, exiting") sys.exit(1) return cb async def producer_bluetooth( protocol: Union[ProtocolAsyncio, ProtocolThread], comm: CommunicateCancel, ble_address: str, ) -> None: """Receives data from a serially connected wave ring and passes it to the `protocol` Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process ble_address: Address of the bluetooth device to connect to. E.g. 'D5:73:DB:85:B4:A1' Note: The producer doesn't return a value, but the data gets added to the `protocol` that can be accessed from other parts of the program i.e. some `consumer` """ print(f"Connecting to wave at address {ble_address}") callback = bleak_callback(protocol) async with BleakClient(ble_address, disconnected_callback=make_disconnect_callback(comm)) as client: await client.start_notify(API_CHAR_UUID, callback) await client.write_gatt_char(API_CHAR_UUID, get_start_api_package(), False) print("Connected to Wave") while True: # This `while` loop and `asyncio.sleep` statement is some magic that is required to continually fetch # the data from the bluetooth device. await asyncio.sleep(0.1) if comm.cancel: print("Recieved a cancel signal, stopping ble client") break await client.stop_notify(API_CHAR_UUID) async def producer_serial(protocol: ProtocolAsyncio, comm: CommunicateCancel, serial_port: str): """Receives data from a serially connected wave ring and passes it to the `protocol` Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process serial_port: The serial port to read from Note: The producer doesn't return a value, but the data gets added to the `protocol` that can be accessed from other parts of the program i.e. some `consumer` """ reader, writer = await open_serial_connection(url=serial_port, baudrate=BAUDRATE, parity=serial.PARITY_EVEN) writer.write(get_start_api_package()) while True: # The number of bytes read here is an arbitrary power of 2 on the order of a size of a single package packet = await reader.read(n=128) await protocol.data_received(packet) if comm.cancel: print("Recieved a cancel signal, stopping serial connection") break async def consumer( protocol: ProtocolAsyncio, comm: CommunicateCancel, callbacks: Union[List[WaveCallback], Tuple[WaveCallback]], ) -> None: """Consumes the data from a producer via a protocol Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process callbacks: A list/tuple of callbacks that handle the data passed from the wave ring when available """ while True: package = await protocol.queue.get() if comm.is_cancel(package) or comm.cancel: print("Got a cancel message. Exiting consumer loop...") comm.cancel = True break for callback in callbacks: callback(package) def make_sigint_handler(comm: CommunicateCancel): """Create a signal handler to cancel an asyncio loop using signals.""" def handler(*args): comm.cancel = True return handler def _run_asyncio( callbacks: List[WaveCallback], producer: Union[producer_bluetooth, producer_serial], protocol: ProtocolAsyncio ) -> None: """Runs a producer and a consumer, hooking into the data using the supplied callbacks Args: callbacks: See docs for `consumer` producer: A callable that takes 2 arguments, a protocol and a communication object protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. """ # A singleton that sends messages about whether the data transfer has been canceled. comm = CommunicateCancel() loop = get_or_create_event_loop() loop.add_signal_handler(signal.SIGINT, make_sigint_handler(comm)) # Note: The consumer and the producer send the data via the instance of `protocol` tasks = asyncio.gather(producer(protocol, comm), consumer(protocol, comm, callbacks)) loop.run_until_complete(tasks) def run_asyncio_bluetooth(callbacks: List[WaveCallback], ble_address) -> None: """Runs an async `consumer-producer` loop using user supplied callbacks for a bluetooth device Args: callbacks: A list/tuple of callbacks that handle the data passed from the wave ring ble_address: Address of the bluetooth device to connect to. E.g. 'D5:73:DB:85:B4:A1' """ _run_asyncio(callbacks, partial(producer_bluetooth, ble_address=ble_address), ProtocolAsyncio()) def run_asyncio_serial(callbacks: List[WaveCallback], serial_port: str = None) -> None: """Runs an async `consumer-producer` loop using user supplied callbacks for a serial device Args: callbacks: A list/tuple of callbacks that handle the data passed from the wave ring serial_port: The serial port to read from. If `None` will try to determine it automatically based on the operating system the script is running on """ serial_port = get_serial_port() if serial_port is None else serial_port _run_asyncio(callbacks, partial(producer_serial, serial_port=serial_port), ProtocolAsyncio())
the-stack_0_4246
# Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the mixin interface class for creating differentiable quantum tapes with TensorFlow. """ # pylint: disable=protected-access, attribute-defined-outside-init import numpy as np import tensorflow as tf try: from tensorflow.python.eager.tape import should_record_backprop except ImportError: from tensorflow.python.eager.tape import should_record as should_record_backprop from pennylane.tape.queuing import AnnotatedQueue class TFInterface(AnnotatedQueue): """Mixin class for applying an TensorFlow interface to a :class:`~.JacobianTape`. TensorFlow-compatible quantum tape classes can be created via subclassing: .. code-block:: python class MyTFQuantumTape(TFInterface, JacobianTape): Alternatively, the TensorFlow interface can be dynamically applied to existing quantum tapes via the :meth:`~.apply` class method. This modifies the tape **in place**. Once created, the TensorFlow interface can be used to perform quantum-classical differentiable programming. .. note:: If using a device that supports native TensorFlow computation and backpropagation, such as :class:`~.DefaultQubitTF`, the TensorFlow interface **does not need to be applied**. It is only applied to tapes executed on non-TensorFlow compatible devices. **Example** Once a TensorFlow quantum tape has been created, it can be differentiated using the gradient tape: .. code-block:: python dev = qml.device("default.qubit", wires=1) p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64) with tf.GradientTape() as tape: with TFInterface.apply(JacobianTape()) as qtape: qml.Rot(p[0], p[1] ** 2 + p[0] * p[2], p[1] * tf.sin(p[2]), wires=0) expval(qml.PauliX(0)) result = qtape.execute(dev) >>> print(result) tf.Tensor([0.06982072], shape=(1,), dtype=float64) >>> grad = tape.gradient(result, p) >>> print(grad) tf.Tensor([0.29874274 0.39710271 0.09958091], shape=(3,), dtype=float64) The TensorFlow interface defaults to ``tf.float64`` output. This can be modified by providing the ``dtype`` argument when applying the interface: >>> p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float32) >>> with tf.GradientTape() as tape: ... TFInterface.apply(qtape, dtype=tf.float32) # reusing the previous qtape ... result = qtape.execute(dev) >>> print(result) tf.Tensor([0.06982072], shape=(1,), dtype=float32) >>> grad = tape.gradient(result, p) >>> print(grad) tf.Tensor([0.2895088 0.38464668 0.09645163], shape=(3,), dtype=float32) """ dtype = tf.float64 @property def interface(self): # pylint: disable=missing-function-docstring return "tf" def _update_trainable_params(self): params = self.get_parameters(trainable_only=False) trainable_params = set() for idx, p in enumerate(params): # Determine which input tensors/Variables are being recorded for backpropagation. # The function should_record_backprop, documented here: # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/eager/tape.py#L167 # accepts lists of *Tensors* (not Variables), returning True if all are being watched by one or more # existing gradient tapes, False if not. if isinstance(p, (tf.Variable, tf.Tensor)) and should_record_backprop( # we need to convert any Variable objects to Tensors here, otherwise # should_record_backprop will raise an error [tf.convert_to_tensor(p)] ): trainable_params.add(idx) self.trainable_params = trainable_params @staticmethod def convert_to_numpy(tensors): """Converts any TensorFlow tensors in a sequence to NumPy arrays. Args: tensors (Sequence[Any, tf.Variable, tf.Tensor]): input sequence Returns: list[Any, array]: list with all tensors converted to NumPy arrays """ return [i.numpy() if isinstance(i, (tf.Variable, tf.Tensor)) else i for i in tensors] @tf.custom_gradient def _execute(self, params, **input_kwargs): # unwrap free parameters args = self.convert_to_numpy(params) # unwrap constant parameters all_params = self.get_parameters(trainable_only=False) all_params_unwrapped = self.convert_to_numpy(all_params) self.set_parameters(all_params_unwrapped, trainable_only=False) res = self.execute_device(args, input_kwargs["device"]) self.set_parameters(all_params, trainable_only=False) def grad(grad_output, **tfkwargs): variables = tfkwargs.get("variables", None) self.set_parameters(all_params_unwrapped, trainable_only=False) jacobian = self.jacobian(input_kwargs["device"], params=args, **self.jacobian_options) self.set_parameters(all_params, trainable_only=False) jacobian = tf.constant(jacobian, dtype=self.dtype) # Reshape gradient output array as a 2D row-vector. grad_output_row = tf.reshape(grad_output, [1, -1]) # Calculate the vector-Jacobian matrix product, and unstack the output. grad_input = tf.matmul(grad_output_row, jacobian) grad_input = tf.unstack(tf.reshape(grad_input, [-1])) if variables is not None: return grad_input, variables return grad_input if res.dtype == np.dtype("object"): res = np.hstack(res) return tf.convert_to_tensor(res, dtype=self.dtype), grad @classmethod def apply(cls, tape, dtype=tf.float64): """Apply the TensorFlow interface to an existing tape in-place. Args: tape (.JacobianTape): a quantum tape to apply the TF interface to dtype (tf.dtype): the dtype that the returned quantum tape should output **Example** >>> with JacobianTape() as tape: ... qml.RX(0.5, wires=0) ... expval(qml.PauliZ(0)) >>> TFInterface.apply(tape) >>> tape <TFQuantumTape: wires=<Wires = [0]>, params=1> """ tape_class = getattr(tape, "__bare__", tape.__class__) tape.__bare__ = tape_class tape.__class__ = type("TFQuantumTape", (cls, tape_class), {"dtype": dtype}) tape._update_trainable_params() return tape
the-stack_0_4248
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import pytest from playwright.async_api import Error, Page from tests.server import Server async def test_evaluate_handle(page, server): await page.goto(server.EMPTY_PAGE) main_frame = page.main_frame assert main_frame.page == page window_handle = await main_frame.evaluate_handle("window") assert window_handle async def test_frame_element(page, server, utils): await page.goto(server.EMPTY_PAGE) frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.attach_frame(page, "frame2", server.EMPTY_PAGE) frame3 = await utils.attach_frame(page, "frame3", server.EMPTY_PAGE) frame1handle1 = await page.query_selector("#frame1") frame1handle2 = await frame1.frame_element() frame3handle1 = await page.query_selector("#frame3") frame3handle2 = await frame3.frame_element() assert await frame1handle1.evaluate("(a, b) => a === b", frame1handle2) assert await frame3handle1.evaluate("(a, b) => a === b", frame3handle2) assert await frame1handle1.evaluate("(a, b) => a === b", frame3handle1) is False async def test_frame_element_with_content_frame(page, server, utils): await page.goto(server.EMPTY_PAGE) frame = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) handle = await frame.frame_element() content_frame = await handle.content_frame() assert content_frame == frame async def test_frame_element_throw_when_detached(page, server, utils): await page.goto(server.EMPTY_PAGE) frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await page.eval_on_selector("#frame1", "e => e.remove()") error = None try: await frame1.frame_element() except Error as e: error = e assert error.message == "Frame has been detached." async def test_evaluate_throw_for_detached_frames(page, server, utils): frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.detach_frame(page, "frame1") error = None try: await frame1.evaluate("7 * 8") except Error as e: error = e assert "Execution Context is not available in detached frame" in error.message async def test_evaluate_isolated_between_frames(page, server, utils): await page.goto(server.EMPTY_PAGE) await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) assert len(page.frames) == 2 [frame1, frame2] = page.frames assert frame1 != frame2 await asyncio.gather( frame1.evaluate("window.a = 1"), frame2.evaluate("window.a = 2") ) [a1, a2] = await asyncio.gather( frame1.evaluate("window.a"), frame2.evaluate("window.a") ) assert a1 == 1 assert a2 == 2 async def test_should_handle_nested_frames(page, server, utils): await page.goto(server.PREFIX + "/frames/nested-frames.html") assert utils.dump_frames(page.main_frame) == [ "http://localhost:<PORT>/frames/nested-frames.html", " http://localhost:<PORT>/frames/frame.html (aframe)", " http://localhost:<PORT>/frames/two-frames.html (2frames)", " http://localhost:<PORT>/frames/frame.html (dos)", " http://localhost:<PORT>/frames/frame.html (uno)", ] async def test_should_send_events_when_frames_are_manipulated_dynamically( page, server, utils ): await page.goto(server.EMPTY_PAGE) # validate frameattached events attached_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) await utils.attach_frame(page, "frame1", "./assets/frame.html") assert len(attached_frames) == 1 assert "/assets/frame.html" in attached_frames[0].url # validate framenavigated events navigated_frames = [] page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.evaluate( """() => { frame = document.getElementById('frame1') frame.src = './empty.html' return new Promise(x => frame.onload = x) }""" ) assert len(navigated_frames) == 1 assert navigated_frames[0].url == server.EMPTY_PAGE # validate framedetached events detached_frames = [] page.on("framedetached", lambda frame: detached_frames.append(frame)) await utils.detach_frame(page, "frame1") assert len(detached_frames) == 1 assert detached_frames[0].is_detached() async def test_framenavigated_when_navigating_on_anchor_urls(page, server): await page.goto(server.EMPTY_PAGE) async with page.expect_event("framenavigated"): await page.goto(server.EMPTY_PAGE + "#foo") assert page.url == server.EMPTY_PAGE + "#foo" async def test_persist_main_frame_on_cross_process_navigation(page, server): await page.goto(server.EMPTY_PAGE) main_frame = page.main_frame await page.goto(server.CROSS_PROCESS_PREFIX + "/empty.html") assert page.main_frame == main_frame async def test_should_not_send_attach_detach_events_for_main_frame(page, server): has_events = [] page.on("frameattached", lambda frame: has_events.append(True)) page.on("framedetached", lambda frame: has_events.append(True)) await page.goto(server.EMPTY_PAGE) assert has_events == [] async def test_detach_child_frames_on_navigation(page, server): attached_frames = [] detached_frames = [] navigated_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) page.on("framedetached", lambda frame: detached_frames.append(frame)) page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.goto(server.PREFIX + "/frames/nested-frames.html") assert len(attached_frames) == 4 assert len(detached_frames) == 0 assert len(navigated_frames) == 5 attached_frames = [] detached_frames = [] navigated_frames = [] await page.goto(server.EMPTY_PAGE) assert len(attached_frames) == 0 assert len(detached_frames) == 4 assert len(navigated_frames) == 1 async def test_framesets(page, server): attached_frames = [] detached_frames = [] navigated_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) page.on("framedetached", lambda frame: detached_frames.append(frame)) page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.goto(server.PREFIX + "/frames/frameset.html") assert len(attached_frames) == 4 assert len(detached_frames) == 0 assert len(navigated_frames) == 5 attached_frames = [] detached_frames = [] navigated_frames = [] await page.goto(server.EMPTY_PAGE) assert len(attached_frames) == 0 assert len(detached_frames) == 4 assert len(navigated_frames) == 1 async def test_frame_from_inside_shadow_dom(page, server): await page.goto(server.PREFIX + "/shadow.html") await page.evaluate( """async url => { frame = document.createElement('iframe'); frame.src = url; document.body.shadowRoot.appendChild(frame); await new Promise(x => frame.onload = x); }""", server.EMPTY_PAGE, ) assert len(page.frames) == 2 assert page.frames[1].url == server.EMPTY_PAGE async def test_frame_name(page, server, utils): await utils.attach_frame(page, "theFrameId", server.EMPTY_PAGE) await page.evaluate( """url => { frame = document.createElement('iframe'); frame.name = 'theFrameName'; frame.src = url; document.body.appendChild(frame); return new Promise(x => frame.onload = x); }""", server.EMPTY_PAGE, ) assert page.frames[0].name == "" assert page.frames[1].name == "theFrameId" assert page.frames[2].name == "theFrameName" async def test_frame_parent(page, server, utils): await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.attach_frame(page, "frame2", server.EMPTY_PAGE) assert page.frames[0].parent_frame is None assert page.frames[1].parent_frame == page.main_frame assert page.frames[2].parent_frame == page.main_frame async def test_should_report_different_frame_instance_when_frame_re_attaches( page, server, utils ): frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await page.evaluate( """() => { window.frame = document.querySelector('#frame1') window.frame.remove() }""" ) assert frame1.is_detached() async with page.expect_event("frameattached") as frame2_info: await page.evaluate("() => document.body.appendChild(window.frame)") frame2 = await frame2_info.value assert frame2.is_detached() is False assert frame1 != frame2 async def test_strict_mode(page: Page, server: Server): await page.goto(server.EMPTY_PAGE) await page.set_content( """ <button>Hello</button> <button>Hello</button> """ ) with pytest.raises(Error): await page.text_content("button", strict=True) with pytest.raises(Error): await page.query_selector("button", strict=True)
the-stack_0_4249
# -*- coding: utf-8 -*- """ werkzeug ~~~~~~~~ Werkzeug is the Swiss Army knife of Python web development. It provides useful classes and functions for any WSGI application to make the life of a python web developer much easier. All of the provided classes are independent from each other so you can mix it with any other library. :copyright: 2007 Pallets :license: BSD-3-Clause """ import sys from types import ModuleType __version__ = "0.15.4" # This import magic raises concerns quite often which is why the implementation # and motivation is explained here in detail now. # # The majority of the functions and classes provided by Werkzeug work on the # HTTP and WSGI layer. There is no useful grouping for those which is why # they are all importable from "werkzeug" instead of the modules where they are # implemented. The downside of that is, that now everything would be loaded at # once, even if unused. # # The implementation of a lazy-loading module in this file replaces the # werkzeug package when imported from within. Attribute access to the werkzeug # module will then lazily import from the modules that implement the objects. # import mapping to objects in other modules all_by_module = { "werkzeug.debug": ["DebuggedApplication"], "werkzeug.local": [ "Local", "LocalManager", "LocalProxy", "LocalStack", "release_local", ], "werkzeug.serving": ["run_simple"], "werkzeug.test": ["Client", "EnvironBuilder", "create_environ", "run_wsgi_app"], "werkzeug.testapp": ["test_app"], "werkzeug.exceptions": ["abort", "Aborter"], "werkzeug.urls": [ "url_decode", "url_encode", "url_quote", "url_quote_plus", "url_unquote", "url_unquote_plus", "url_fix", "Href", "iri_to_uri", "uri_to_iri", ], "werkzeug.formparser": ["parse_form_data"], "werkzeug.utils": [ "escape", "environ_property", "append_slash_redirect", "redirect", "cached_property", "import_string", "dump_cookie", "parse_cookie", "unescape", "format_string", "find_modules", "header_property", "html", "xhtml", "HTMLBuilder", "validate_arguments", "ArgumentValidationError", "bind_arguments", "secure_filename", ], "werkzeug.wsgi": [ "get_current_url", "get_host", "pop_path_info", "peek_path_info", "ClosingIterator", "FileWrapper", "make_line_iter", "LimitedStream", "responder", "wrap_file", "extract_path_info", ], "werkzeug.datastructures": [ "MultiDict", "CombinedMultiDict", "Headers", "EnvironHeaders", "ImmutableList", "ImmutableDict", "ImmutableMultiDict", "TypeConversionDict", "ImmutableTypeConversionDict", "Accept", "MIMEAccept", "CharsetAccept", "LanguageAccept", "RequestCacheControl", "ResponseCacheControl", "ETags", "HeaderSet", "WWWAuthenticate", "Authorization", "FileMultiDict", "CallbackDict", "FileStorage", "OrderedMultiDict", "ImmutableOrderedMultiDict", ], "werkzeug.useragents": ["UserAgent"], "werkzeug.http": [ "parse_etags", "parse_date", "http_date", "cookie_date", "parse_cache_control_header", "is_resource_modified", "parse_accept_header", "parse_set_header", "quote_etag", "unquote_etag", "generate_etag", "dump_header", "parse_list_header", "parse_dict_header", "parse_authorization_header", "parse_www_authenticate_header", "remove_entity_headers", "is_entity_header", "remove_hop_by_hop_headers", "parse_options_header", "dump_options_header", "is_hop_by_hop_header", "unquote_header_value", "quote_header_value", "HTTP_STATUS_CODES", ], "werkzeug.wrappers": [ "BaseResponse", "BaseRequest", "Request", "Response", "AcceptMixin", "ETagRequestMixin", "ETagResponseMixin", "ResponseStreamMixin", "CommonResponseDescriptorsMixin", "UserAgentMixin", "AuthorizationMixin", "WWWAuthenticateMixin", "CommonRequestDescriptorsMixin", ], "werkzeug.middleware.dispatcher": ["DispatcherMiddleware"], "werkzeug.middleware.shared_data": ["SharedDataMiddleware"], "werkzeug.security": ["generate_password_hash", "check_password_hash"], # the undocumented easteregg ;-) "werkzeug._internal": ["_easteregg"], } # modules that should be imported when accessed as attributes of werkzeug attribute_modules = frozenset(["exceptions", "routing"]) object_origins = {} for module, items in all_by_module.items(): for item in items: object_origins[item] = module class module(ModuleType): """Automatically import objects from the modules.""" def __getattr__(self, name): if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) elif name in attribute_modules: __import__("werkzeug." + name) return ModuleType.__getattribute__(self, name) def __dir__(self): """Just show what we want to show.""" result = list(new_module.__all__) result.extend( ( "__file__", "__doc__", "__all__", "__docformat__", "__name__", "__path__", "__package__", "__version__", ) ) return result # keep a reference to this module so that it's not garbage collected old_module = sys.modules["werkzeug"] # setup the new module and patch it into the dict of loaded modules new_module = sys.modules["werkzeug"] = module("werkzeug") new_module.__dict__.update( { "__file__": __file__, "__package__": "werkzeug", "__path__": __path__, "__doc__": __doc__, "__version__": __version__, "__all__": tuple(object_origins) + tuple(attribute_modules), "__docformat__": "restructuredtext en", } ) # Due to bootstrapping issues we need to import exceptions here. # Don't ask :-( __import__("werkzeug.exceptions")
the-stack_0_4255
from django.contrib.auth.models import AnonymousUser from core.models.group import get_user_group from core.models.project import Project from rest_framework import serializers class ProjectsField(serializers.Field): def to_representation(self, project_mgr): request_user = self.parent.request_user if isinstance(request_user, AnonymousUser): return None try: group = get_user_group(request_user.username) projects = project_mgr.filter(owner=group) # Modifications to how 'project' should be displayed here: return [p.uuid for p in projects] except Project.DoesNotExist: return None def to_internal_value(self, data, files, field_name, into): value = data.get(field_name) if value is None: return related_obj = self.parent.instance user = self.parent.request_user group = get_user_group(user.username) # Retrieve the New Project(s) if isinstance(value, list): project_id = value[0] else: project_id = value new_project = Project.objects.get(id=project_id, owner=group) related_obj.project = new_project related_obj.save() # Modifications to how 'project' should be displayed here: into[field_name] = project_id
the-stack_0_4258
from __future__ import absolute_import import urlparse import boto3 class S3DirectoryGenerator(object): def __init__(self, s3_url): parsed_s3_url = urlparse.urlparse(s3_url) if parsed_s3_url.scheme != 's3': raise SyntaxError('Invalid S3 scheme') self.bucket_name = parsed_s3_url.netloc self.bucket_path = parsed_s3_url.path[1:] if parsed_s3_url.path.startswith('/') else parsed_s3_url.path bucket_path_split = self.bucket_path.split('/') try: client = boto3.client('s3') region = client.get_bucket_location(Bucket=self.bucket_name)['LocationConstraint'] except: region=None s3_connection = boto3.resource('s3', region_name=region) self.bucket = s3_connection.Bucket(self.bucket_name) if bucket_path_split[-1] == '': # directory listing self.strip_length = len(self.bucket_path) else: # prefix listing self.strip_length = len('/'.join(bucket_path_split[:-1])) def __iter__(self): return self.generator() def generator(self): for o in self.bucket.objects.filter(Prefix=self.bucket_path): key = o.key[self.strip_length:] # S3 doesn't really have a concept of dirs. The convention is '/' is path separator, we do the same path = key.split('/') if path[0] == '' and not self.bucket_path.endswith('/'): # we assume the S3 prefix is a directory that wasn't terminated with a '/' path.pop(0) yield (path, o.size)
the-stack_0_4259
# qubit number=4 # total number=40 import cirq import qiskit from qiskit import IBMQ from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2 import numpy as np import networkx as nx def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f # NOTE: use multi_control_toffoli_gate ('noancilla' mode) # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[3]) # number=19 prog.cz(input_qubit[0],input_qubit[3]) # number=20 prog.h(input_qubit[3]) # number=21 prog.cx(input_qubit[0],input_qubit[3]) # number=23 prog.x(input_qubit[3]) # number=24 prog.cx(input_qubit[0],input_qubit[3]) # number=25 prog.cx(input_qubit[0],input_qubit[3]) # number=17 prog.rx(-0.48380526865282825,input_qubit[3]) # number=26 prog.h(input_qubit[1]) # number=2 prog.y(input_qubit[3]) # number=18 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 prog.y(input_qubit[3]) # number=12 prog.h(input_qubit[0]) # number=5 oracle = build_oracle(n-1, f) prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]]) prog.h(input_qubit[1]) # number=6 prog.h(input_qubit[2]) # number=7 prog.h(input_qubit[1]) # number=34 prog.cz(input_qubit[0],input_qubit[1]) # number=35 prog.h(input_qubit[1]) # number=36 prog.cx(input_qubit[0],input_qubit[1]) # number=31 prog.cx(input_qubit[0],input_qubit[1]) # number=37 prog.x(input_qubit[1]) # number=38 prog.cx(input_qubit[0],input_qubit[1]) # number=39 prog.cx(input_qubit[0],input_qubit[1]) # number=33 prog.cx(input_qubit[0],input_qubit[1]) # number=30 prog.h(input_qubit[3]) # number=8 prog.h(input_qubit[0]) # number=9 prog.y(input_qubit[2]) # number=10 prog.x(input_qubit[2]) # number=22 prog.y(input_qubit[2]) # number=11 prog.x(input_qubit[0]) # number=13 prog.x(input_qubit[0]) # number=14 # circuit end for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': a = "111" b = "0" f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b) prog = make_circuit(4,f) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True)) sample_shot =8000 info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_QC2762.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.__len__(),file=writefile) print(circuit1,file=writefile) writefile.close()
the-stack_0_4260
import json import random import sys from allennlp_reasoning_explainqa.common.constants import CORRECT_OPTION_TAG from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import ( F1MeasureCustomRetrievalEval, ) from allennlp_reasoning_explainqa.training.metrics.explanation_eval import ( ExplanationEval, ) # Sets random seed to a nothing-up-my-sleeve number so that we have # deterministic evaluation scores. random.seed(12345) # Sets random seed to a nothing-up-my-sleeve number so that we have # deterministic evaluation scores. random.seed(12345) def evaluate(prediction_filename, label_filename): chainid_to_label = json.load(open(label_filename, "r")) chain_count = len(chainid_to_label) predictions_lines = open(prediction_filename, "r").readlines() predictions = [json.loads(row) for row in predictions_lines] prediction_count = len(predictions) if chain_count != prediction_count: print( f"Label file {label_filename} has {chain_count} chains, but prediction file {prediction_filename} has {prediction_count} predictions. These must be equal." ) sys.exit(1) f1eval = F1MeasureCustomRetrievalEval(pos_label=1) explanation_eval = ExplanationEval() chain_ids_covered = [] cnt = 0 for row in predictions: assert "score" in row, "Prediction should contain field score" assert "chain_id" in row, "Prediction should contain field chain_id" score = row["score"] chain_id = row["chain_id"] qid = chain_id.strip().split("_")[0] print("qid,chain_id,score = ", qid, chain_id, score) gtlabel = chainid_to_label[chain_id] f1eval(int(gtlabel), score) explanation_eval(qid, CORRECT_OPTION_TAG, int(gtlabel), score) chain_ids_covered.append(chain_id) cnt += 1 assert len(chain_ids_covered) == len( chainid_to_label ), "Found {} chains but expected {} chains".format( len(chain_ids_covered), len(chainid_to_label) ) binclf_performance = f1eval.get_metric(reset=True) print("f1.get_metric() = ", binclf_performance) explanation_performance = explanation_eval.get_metric(reset=True) print("explanation_eval.get_metric() = ", explanation_performance) final_metrics = { "auc_roc": binclf_performance["auc_roc"], "explainP1": explanation_performance["explainP1"], "explainNDCG": explanation_performance["explainNDCG"], } print("=" * 32) print(": auc_roc = ", binclf_performance["auc_roc"]) print(": P1 = ", explanation_performance["explainP1"]) print(": explainNDCG = ", explanation_performance["explainNDCG"]) print("=" * 32) return final_metrics if __name__ == "__main__": prediction_filename = sys.argv[1] label_filename = sys.argv[2] metrics_filename = sys.argv[3] print( f"Evaluating prediction file {prediction_filename} with label file {label_filename}" ) metrics = evaluate(prediction_filename, label_filename) print(f"Writing final metrics to file: {metrics_filename}") json.dump(metrics, open(metrics_filename, "w"))
the-stack_0_4261
import matplotlib.pyplot as plt import pandas as pd from rich import pretty, print from rich.progress import BarColumn, Progress from sklearn.metrics import ( accuracy_score, auc, classification_report, f1_score, plot_confusion_matrix, roc_auc_score, roc_curve, ) from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import LabelBinarizer, LabelEncoder import utils def draw_roc(y_test, y_pred): lb = LabelBinarizer() lb.fit(y_test) lb.classes_.tolist() fpr = dict() tpr = dict() roc_auc = dict() by_test = lb.transform(y_test) by_pred = lb.transform(y_pred) for i in range(4): fpr[i], tpr[i], _ = roc_curve(by_test[:, i], by_pred[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) roc_auc = roc_auc_score(by_test, by_pred, average=None) plt.figure(figsize=(8, 5)) for i in range(4): plt.plot( fpr[i], tpr[i], label="%s ROC curve (area = %0.2f)" % (lb.classes_.tolist()[i], roc_auc[i]), ) plt.plot([0, 1], [0, 1], "k--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.title("Single Hidden Layer Neural Network Roc-Curve") plt.xlabel("False Positive Rate", fontsize=10) plt.ylabel("True Positive Rate", fontsize=10) plt.tick_params(axis="both", which="major", labelsize=12) plt.legend(loc="lower right", fontsize=7, frameon=False) plt.show() def draw_confusion_matrix(Clf, X, y): titles_options = [ ("Confusion matrix, without normalization", None), ("Neural network confusion matrix", "true"), ] # colors: Wistia too yellow for title, normalize in titles_options: disp = plot_confusion_matrix(Clf, X, y, cmap="PuBuGn", normalize=normalize) disp.ax_.set_title(title) plt.show() def execute_and_report(learn_rate, acti, current_params): clf = MLPClassifier( activation=acti, learning_rate_init=learn_rate, random_state=5213890, hidden_layer_sizes=current_params, ) clf.fit(train_x, train_y) # Apply on the training set print("Training set:") y_pred = clf.predict(train_x) print(classification_report(train_y, y_pred)) # Apply on the test set and evaluate the performance y_pred = clf.predict(test_x) print("Test set:") print(classification_report(test_y, y_pred)) acc = accuracy_score(test_y, y_pred) * 100 f1 = f1_score(test_y, y_pred, average="weighted") * 100 # draw draw draw_confusion_matrix(clf, test_x, test_y) draw_roc(test_y, y_pred) # plt.plot(clf.loss_curve_) # plt.show() # report return { "Params": f"{acti}, {learn_rate}, {current_params}", "accuracy %": round(acc, 2), "F1 weighted %": round(f1, 2), } pretty.install() pd.set_option("display.max_rows", None) # DATASET train_x, train_y, test_x, test_y = utils.load_tracks_xyz( buckets="discrete", extractclass=("album", "type"), splits=2 ).values() # feature to reshape label_encoders = dict() column2encode = [ ("track", "language_code"), ("album", "listens"), ("track", "license"), ("album", "comments"), ("album", "date_created"), ("album", "favorites"), ("artist", "comments"), ("artist", "date_created"), ("artist", "favorites"), ("track", "comments"), ("track", "date_created"), ("track", "duration"), ("track", "favorites"), ("track", "interest"), ("track", "listens"), ] for col in column2encode: le = LabelEncoder() le.fit(test_x[col]) train_x[col] = le.fit_transform(train_x[col]) test_x[col] = le.fit_transform(test_x[col]) label_encoders[col] = le le = LabelEncoder() le.fit(train_y) test_y = le.fit_transform(test_y) train_y = le.fit_transform(train_y) class_name = ("album", "type") # Preparation count = 0 reports = pd.DataFrame(columns=["Params", "accuracy %", "F1 weighted %"]) params = [ { "activations": "identity", "learning_rate_inits": 0.001, "hidden_layer_sizes": (40, 40), }, { "activations": "identity", "learning_rate_inits": 0.001, "hidden_layer_sizes": (40, 20, 8), # old single layer "learning_rate_inits": 0.02, # old single layer "hidden_layer_sizes": (40,), }, ] testing_params = [params[-1]] activations = ["identity", "logistic", "tanh", "relu"] learning_rate_inits = [0.01, 0.001, 0.02] # progress reporting init progress = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", "{task.completed} of {task.total}", ) with progress: # adjust len if needed task_layers = progress.add_task("[red]Building…", total=len(params) * 2) for best_params in params: learn_rate = best_params["learning_rate_inits"] acti = best_params["activations"] hidd = best_params["hidden_layer_sizes"] row = execute_and_report(learn_rate, acti, hidd) reports = reports.append(row, ignore_index=True) count += 1 progress.advance(task_layers) # ------- switch up datasets: put in the 10-feature dataframe train_x, train_y, test_x, test_y = utils.load_tracks_xyz( buckets="discrete", extractclass=("album", "type"), splits=2, small=True ).values() # feature to reshape label_encoders = dict() column2encode = [ ("track", "duration"), ("track", "interest"), ("track", "listens"), ] for col in column2encode: le = LabelEncoder() le.fit(test_x[col]) train_x[col] = le.fit_transform(train_x[col]) test_x[col] = le.fit_transform(test_x[col]) label_encoders[col] = le le = LabelEncoder() le.fit(train_y) test_y = le.fit_transform(test_y) train_y = le.fit_transform(train_y) class_name = ("album", "type") # rerun neural networks for best_params in params: learn_rate = best_params["learning_rate_inits"] acti = best_params["activations"] hidd = best_params["hidden_layer_sizes"] row = execute_and_report(learn_rate, acti, hidd) reports = reports.append(row, ignore_index=True) count += 1 progress.advance(task_layers) # end switching up datasets ------- # results print(reports.sort_values(by=["accuracy %", "F1 weighted %"], ascending=False)) print(f"I have built {count} neural networks")
the-stack_0_4262
import secrets; from app import app; from .rvp import pvr; from .algo import final; from flask import render_template, request, redirect, flash @app.route("/", methods=["GET","POST"]) def index(): secret_key=secrets.token_hex(16) app.config["SECRET_KEY"]=secret_key if(request.method=="POST"): req=request.form percentile=req["percentile"] rank=req["rank"] state=req["state"] pwd=req["pwd"] gender=req["gender"] category=req["category"] sortby=str(req["sortby"]) if(percentile=="" and rank==""): flash("Please enter either your Rank or your Percentile",'error') return redirect(request.url) if(rank==""): ranks=pvr(float(percentile),pwd,category); ranks=int(ranks); if(ranks<=0): ranks=2; result=final(ranks,float(percentile),category,state,gender,pwd,sortby); if(rank): result=final(int(rank),percentile,category,state,gender,pwd,sortby); ranks=rank; return render_template("public/result.html",ranks=ranks,category=category,tables=[result.to_html(classes='data')], titles=result.columns.values) return render_template("public/index.html")
the-stack_0_4263
from __future__ import print_function from builtins import object from pyethapp.eth_protocol import ETHProtocol, TransientBlockBody from devp2p.service import WiredService from devp2p.protocol import BaseProtocol from devp2p.app import BaseApp from ethereum.tools import tester import rlp class PeerMock(object): packets = [] config = dict() def send_packet(self, packet): self.packets.append(packet) def setup(): peer = PeerMock() proto = ETHProtocol(peer, WiredService(BaseApp())) proto.service.app.config['eth'] = dict(network_id=1337) chain = tester.Chain() cb_data = [] def cb(proto, **data): cb_data.append((proto, data)) return peer, proto, chain, cb_data, cb def test_basics(): peer, proto, chain, cb_data, cb = setup() assert isinstance(proto, BaseProtocol) d = dict() d[proto] = 1 assert proto in d assert d[proto] == 1 assert not proto proto.start() assert proto def test_status(): peer, proto, chain, cb_data, cb = setup() genesis = head = chain.chain.get_descendants(chain.chain.get_block_by_number(0))[-1] # test status proto.send_status( chain_difficulty=chain.chain.get_score(head), chain_head_hash=head.hash, genesis_hash=genesis.hash ) packet = peer.packets.pop() proto.receive_status_callbacks.append(cb) proto._receive_status(packet) _p, _d = cb_data.pop() assert _p == proto assert isinstance(_d, dict) assert _d['chain_difficulty'] == chain.chain.get_score(head) print(_d) assert _d['chain_head_hash'] == head.hash assert _d['genesis_hash'] == genesis.hash assert 'eth_version' in _d assert 'network_id' in _d def test_blocks(): peer, proto, chain, cb_data, cb = setup() # test blocks chain.mine(number_of_blocks=2) assert chain.block.number == 3 # monkey patch to make "blocks" attribute available chain.blocks = chain.chain.get_descendants(chain.chain.get_block_by_number(0)) proto.send_blockbodies(*chain.blocks) packet = peer.packets.pop() assert len(rlp.decode(packet.payload)) == 3 def list_cb(proto, blocks): # different cb, as we expect a list of blocks cb_data.append((proto, blocks)) proto.receive_blockbodies_callbacks.append(list_cb) proto._receive_blockbodies(packet) _p, blocks = cb_data.pop() assert isinstance(blocks, tuple) for block in blocks: assert isinstance(block, TransientBlockBody) assert isinstance(block.transactions, tuple) assert isinstance(block.uncles, tuple) # assert that transactions and uncles have not been decoded assert len(block.transactions) == 0 assert len(block.uncles) == 0 # newblock approximate_difficulty = chain.blocks[-1].difficulty * 3 proto.send_newblock(block=chain.blocks[-1], chain_difficulty=approximate_difficulty) packet = peer.packets.pop() proto.receive_newblock_callbacks.append(cb) proto._receive_newblock(packet) _p, _d = cb_data.pop() assert 'block' in _d assert 'chain_difficulty' in _d assert _d['chain_difficulty'] == approximate_difficulty assert _d['block'].header == chain.blocks[-1].header assert isinstance(_d['block'].transactions, tuple) assert isinstance(_d['block'].uncles, tuple) # assert that transactions and uncles have not been decoded assert len(_d['block'].transactions) == 0 assert len(_d['block'].uncles) == 0
the-stack_0_4264
"""Provide the Message class.""" from typing import TYPE_CHECKING, Any, Dict from ...const import API_PATH from .base import RedditBase from .mixins import FullnameMixin, InboxableMixin, ReplyableMixin from .redditor import Redditor from .subreddit import Subreddit if TYPE_CHECKING: # pragma: no cover from ... import Reddit class Message(InboxableMixin, ReplyableMixin, FullnameMixin, RedditBase): """A class for private messages. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ======================= ============================================================ Attribute Description ======================= ============================================================ ``author`` Provides an instance of :class:`.Redditor`. ``body`` The body of the message, as Markdown. ``body_html`` The body of the message, as HTML. ``created_utc`` Time the message was created, represented in `Unix Time`_. ``dest`` Provides an instance of :class:`.Redditor`. The recipient of the message. ``id`` The ID of the message. ``name`` The full ID of the message, prefixed with ``t4_``. ``subject`` The subject of the message. ``was_comment`` Whether or not the message was a comment reply. ======================= ============================================================ .. _Unix Time: https://en.wikipedia.org/wiki/Unix_time """ STR_FIELD = "id" @classmethod def parse(cls, data: Dict[str, Any], reddit: "Reddit"): """Return an instance of Message or SubredditMessage from ``data``. :param data: The structured data. :param reddit: An instance of :class:`.Reddit`. """ if data["author"]: data["author"] = Redditor(reddit, data["author"]) if data["dest"].startswith("#"): data["dest"] = Subreddit(reddit, data["dest"][1:]) else: data["dest"] = Redditor(reddit, data["dest"]) if data["replies"]: replies = data["replies"] data["replies"] = reddit._objector.objectify(replies["data"]["children"]) else: data["replies"] = [] if data["subreddit"]: data["subreddit"] = Subreddit(reddit, data["subreddit"]) return SubredditMessage(reddit, _data=data) return cls(reddit, _data=data) @property def _kind(self) -> str: """Return the class's kind.""" return self._reddit.config.kinds["message"] def __init__(self, reddit: "Reddit", _data: Dict[str, Any]): """Construct an instance of the Message object.""" super().__init__(reddit, _data=_data) self._fetched = True def delete(self): """Delete the message. .. note:: Reddit does not return an indication of whether or not the message was successfully deleted. For example, to delete the most recent message in your inbox: .. code-block:: python next(reddit.inbox.all()).delete() """ self._reddit.post(API_PATH["delete_message"], data={"id": self.fullname}) class SubredditMessage(Message): """A class for messages to a subreddit. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ======================= ============================================================ Attribute Description ======================= ============================================================ ``author`` Provides an instance of :class:`.Redditor`. ``body`` The body of the message, as Markdown. ``body_html`` The body of the message, as HTML. ``created_utc`` Time the message was created, represented in `Unix Time`_. ``dest`` Provides an instance of :class:`.Redditor`. The recipient of the message. ``id`` The ID of the message. ``name`` The full ID of the message, prefixed with ``t4_``. ``subject`` The subject of the message. ``subreddit`` If the message was sent from a subreddit, provides an instance of :class:`.Subreddit`. ``was_comment`` Whether or not the message was a comment reply. ======================= ============================================================ .. _Unix Time: https://en.wikipedia.org/wiki/Unix_time """ def mute(self): """Mute the sender of this SubredditMessage. For example, to mute the sender of the first SubredditMessage in the authenticated users' inbox: .. code-block:: python from praw.models import SubredditMessage msg = next( message for message in reddit.inbox.all() if isinstance(message, SubredditMessage) ) msg.mute() """ self._reddit.post(API_PATH["mute_sender"], data={"id": self.fullname}) def unmute(self): """Unmute the sender of this SubredditMessage. For example, to unmute the sender of the first SubredditMessage in the authenticated users' inbox: .. code-block:: python from praw.models import SubredditMessage msg = next( message for message in reddit.inbox.all() if isinstance(message, SubredditMessage) ) msg.unmute() """ self._reddit.post(API_PATH["unmute_sender"], data={"id": self.fullname})
the-stack_0_4265
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Cmor(AutotoolsPackage): """Climate Model Output Rewriter is used to produce CF-compliant netCDF files. The structure of the files created by the library and the metadata they contain fulfill the requirements of many of the climate community's standard model experiments.""" homepage = "http://cmor.llnl.gov" url = "https://github.com/PCMDI/cmor/archive/3.1.2.tar.gz" version('3.3.0', 'cfdeeddab1aedb823e26ec38723bd67e') version('3.2.0', 'b48105105d4261012c19cd65e89ff7a6') version('3.1.2', '72f7227159c901e4bcf80d2c73a8ce77') variant('fortran', default=True, description='Enable Fortran API') variant('python', default=False, description='Enable PYTHON support') depends_on('uuid') depends_on('netcdf') depends_on('udunits2') depends_on('hdf5@:1.8.19') extends('python', when='+python') depends_on('python@:2.8', when='+python') depends_on('py-numpy', type=('build', 'run'), when='+python') @run_before('configure') def validate(self): if '+fortran' in self.spec and not self.compiler.fc: msg = 'cannot build a fortran variant without a fortran compiler' raise RuntimeError(msg) def configure_args(self): extra_args = ['--disable-debug'] if '+fortran' in self.spec: extra_args.append('--enable-fortran') else: extra_args.append('--disable-fortran') return extra_args def install(self, spec, prefix): make('install') if '+python' in spec: setup_py('install', '--prefix=' + prefix)
the-stack_0_4266
''' https://blog.csdn.net/xuzhexing/article/details/90729390 https://blog.csdn.net/weixin_44580210/article/details/90314878 粒子滤波定位可以比单纯地利用观测值更精确 步骤: 1.初始:用大量粒子模拟运动状态,这些粒子在整个运动空间内均匀分布 2.预测:根据状态转移方程(运动方程),将每一个粒子带入,得到预测粒子,这里应该包括粒子的速度角速度,以及xy值,进行高维的预测 3.校正:对预测粒子进行评价,这里用下一时刻的观测值(有噪声)与预测粒子的距离作评价 距离越短,则对应粒子的权重越大,可以用高斯方程计算距离与对应权重的关系 4.重采样:对所有粒子的权重归一化,并进行筛选,既要保留权重大的粒子,又要小部分权重 小的粒子,具体的方法: 1.多项式重采样 2.残差重采样 3.分层重采样 4.系统重采样 重采样带来的新问题是,权值越大的粒子子代越多,相反则子代越少甚至无子代。 这样重采样后的粒子群多样性减弱,从而不足以用来近似表征后验密度。克服这一 问题的方法有多种,最简单的就是直接增加足够多的粒子,但这常会导致运算量的 急剧膨胀。其它方法可以去查看有关文献,这里暂不做介绍。 5.更新:用重采样后生成的粒子更新原有的粒子,用这些粒子的位置均值代表粒子滤波的结果,重复步骤2 ''' import numpy as np import math import matplotlib.pyplot as plgt ''' 假设 速度的测量方差为0.5 角速度的测量方差为5度,0.087 传感器测量rf标志物的误差为0.5m, ''' def guassian_noise(sigma):#这里用标准差 y=np.random.randn()*sigma return y v_error=guassian_noise(0.5) w_error=guassian_noise(0.087) dist_error=guassian_noise(0.5) RANGE_DITECT=10#最大探测距离 NP=200 NTh = NP / 2.0 # Number of particle for re-sampling T_max=100.0 dt=0.1 L=2.5#车长 #用的是最简单的运动方程,根据此时的状态[x y yaw]',以及速度角速度[u,w],求出下一时刻的状态 def motion_model(x, u): # F = np.array([[1.0, 0, 0], # [0, 1.0, 0], # [0, 0, 1.0]])#第四行为0,表示输入的x的最后一个数字没有用,只是为了利于矩阵方程的求解,所以加入了第四列 # print(x.shape) B = np.array([[dt * math.cos(x[2, 0]), 0], [dt * math.sin(x[2, 0]), 0], [0.0, dt]]) x = x+ B@u return x def dead_reckoning(x_true,u): u[0]+=v_error#给速度和角速度加上高斯噪声 u[1]+=w_error return motion_model(x_true,u) def gauss_likelihood(x, sigma): p = 1.0 / math.sqrt(2.0 * math.pi * sigma ** 2) * \ math.exp(-x ** 2 / (2 * sigma ** 2)) return p #根据上一时刻的真实值,和这一时刻测量得到的速度角速度,得到下一时刻的真实值,以及rf目标的观测值,速度角速度的测量值(加了噪声) def observation(x_true, u, rf_id): x_true_real=motion_model(x_true,u)#根据匀速圆周运动模型获得这一时刻的真实值 z=np.zeros((0,3))# 0*3的数列,为了后面方便进行堆叠 for i in range(len(rf_id[:,0])): dx=x_true[0,0]-rf_id[i,0] dy=x_true[1,0]-rf_id[i,1] dist=math.hypot(dx,dy) if dist<RANGE_DITECT: dist+=dist_error#距离加上噪声,表示传感器测量的有误差 zi=np.array([[dist,rf_id[i,0],rf_id[i,1]]]) z=np.vstack(z,zi) ud=np.array([[0,0]]).T ud[0,0]=u[0,0]+v_error ud[1,0]=u[1,0]+w_error return x_true_real, z,ud def re_sampling(px,pw): N_eff = 1.0 / (pw.dot(pw.T))[0, 0] # Effective particle number,计算有效粒子数 1/(权值的平方和) if N_eff < NTh:#如果有效粒子数太少,则进行重采样 w_cum = np.cumsum(pw) #每个位置的权值是前面权值和[1,3,5]->[1,4,9],即为轮盘采样的方法 base = np.arange(0.0, 1.0, 1 / NP)#返回0.0-1.0步长为1/NP的数列 re_sample_id = base + np.random.uniform(0, 1 / NP)#加上噪声,形成均匀分布的随机采样值 indexes = [] ind = 0 for ip in range(NP): while re_sample_id[ip] > w_cum[ind]: ind += 1 indexes.append(ind) #存储重采样后的id px = px[:, indexes] pw = np.zeros((1, NP)) + 1.0 / NP # init weight return px,pw #输入 粒子群,权重,rf目标观测值,以及测量的速度角速度(有噪声) def pf_localization(px, pw, z, ud): # 预测:根据状态转移方程(运动方程),将每一个粒子带入,得到预测粒子,这里应该包括粒子的速度角速度,以及xy值,进行高维的预测 for i in range(NP): x_pf_tmp=px[:,i] #每一个粒子的状态 x y yaw w_tmp=pw[0,i]#每一个粒子的权重 x_pf_tmp=motion_model(x_pf_tmp,ud)#根据粒子的状态以及测量得到的速度角速度预测粒子的下一个位置 #根据粒子滤波预测得到的rf距离值和传感器测量的rf距离进行粒子权重的更新????这里的rf的距离真实值无法获取????????? for j in range(len(z[0,:])): dx=x_pf_tmp[0]-z[j,1] dy=x_pf_tmp[1]-z[j,2] pre_dist=math.hypot(dx,dy) dz=pre_dist-z[j,0] w_tmp*=gauss_likelihood(dz, math.sqrt(0.2*0.2))#用预测得到的距离与测量得到的距离的高斯值作为权重 px[:,i]=x_pf_tmp#更新粒子的状态 pw[0,i]=w_tmp#更新粒子权重 #权重归一化 pw=pw/pw.sum() px,pw=re_sampling(px,pw) # 重采样 [email protected] # 计算得到的粒子滤波的结果。这里感觉应该先进行重采样,与原文作者不一样 return px,pw,x_pf def main(): print(__file__ + " start!!") time = 0.0 # RF_ID positions [x, y],用来代替一些已知位置的点 rf_id = np.array([[10.0, 0.0], [10.0, 10.0], [0.0, 15.0], [-5.0, 20.0]]) # State Vector [x y yaw]' 第四列只是为了便于进行矩阵计算 x_pf = np.zeros((3, 1)) x_true = np.zeros((3, 1))#3*1 #粒子及其权重的初始化 px = np.zeros((3, NP)) # Particle store 粒子群,x y yaw各对应一群粒子 pw = np.zeros((1, NP)) + 1.0 / NP # Particle weight 粒子权重均匀分布 # x_dr = x_true # Dead reckoning # history h_true=np.array([[x_true[0,0],x_true[1,0]]]) # 保存真实值 x y # h_dead_reckoning=np.array([[x_true[0],x_true[1]]])#保存航迹推算的值 h_pf=np.array([[x_pf[0],x_pf[1]]])#保存粒子滤波的结果 v=1.0 # m/s yaw_rate=0.1 # rad/s while time<T_max: time+=dt u=np.array([[v, yaw_rate]]).T#获得转置矩阵 (2,1)矩阵 x_true_real, z,ud=observation(x_true, u, rf_id) # x_dr=dead_reckoning(x_true) #根据上一时刻的真实值和 px,pw,x_pf=pf_localization(px, pw, z, ud) h_true=np.vstack(h_true,np.array([[x_true[0],x_true[1]]])) #保存真实值 h_pf=np.vstack(h_pf,np.array([[x_pf[0],x_pf[1]]])) plt.cla() # for stopping simulation with the esc key. plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None]) plt.plot(h_true[:,0],h_true[:,1],'-g') plt.plot(h_pf[:,0],h_pf[:,1],'-b') plt.plot(rf_id[:,0],rf_id[:,1],'*r') for i in range(len(z[:,0])): plt.plot([x_true[0, 0], z[i, 1]], [x_true[1, 0], z[i, 2]], "-k") plt.axis("equal") plt.grid(True) plt.pause(0.001) if __name__ == '__main__': main()
the-stack_0_4270
#!/usr/bin/env python """Package: mininet Test creation and pings for topologies with link and/or CPU options.""" import unittest import sys from functools import partial from mininet.net import Mininet from mininet.node import OVSSwitch, UserSwitch, IVSSwitch from mininet.node import CPULimitedHost from mininet.link import TCLink from mininet.topo import Topo from mininet.log import setLogLevel from mininet.util import quietRun from mininet.clean import cleanup # Number of hosts for each test N = 2 class SingleSwitchOptionsTopo(Topo): "Single switch connected to n hosts." def __init__(self, n=2, hopts=None, lopts=None): if not hopts: hopts = {} if not lopts: lopts = {} Topo.__init__(self, hopts=hopts, lopts=lopts) switch = self.addSwitch('s1') for h in range(n): host = self.addHost('h%s' % (h + 1)) self.addLink(host, switch) # Tell pylint not to complain about calls to other class # pylint: disable=E1101 class testOptionsTopoCommon( object ): """Verify ability to create networks with host and link options (common code).""" switchClass = None # overridden in subclasses @staticmethod def tearDown(): "Clean up if necessary" if sys.exc_info != ( None, None, None ): cleanup() def runOptionsTopoTest( self, n, msg, hopts=None, lopts=None ): "Generic topology-with-options test runner." mn = Mininet( topo=SingleSwitchOptionsTopo( n=n, hopts=hopts, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) dropped = mn.run( mn.ping ) hoptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in hopts.items() ) loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg += ( '%s%% of pings were dropped during mininet.ping().\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'hopts = %s\n' 'lopts = %s\n' 'host = CPULimitedHost\n' 'link = TCLink\n' 'Switch = %s\n' % ( dropped, n, hoptsStr, loptsStr, self.switchClass ) ) self.assertEqual( dropped, 0, msg=msg ) def assertWithinTolerance( self, measured, expected, tolerance_frac, msg ): """Check that a given value is within a tolerance of expected tolerance_frac: less-than-1.0 value; 0.8 would yield 20% tolerance. """ upperBound = ( float( expected ) + ( 1 - tolerance_frac ) * float( expected ) ) lowerBound = float( expected ) * tolerance_frac info = ( 'measured value is out of bounds\n' 'expected value: %s\n' 'measured value: %s\n' 'failure tolerance: %s\n' 'upper bound: %s\n' 'lower bound: %s\n' % ( expected, measured, tolerance_frac, upperBound, lowerBound ) ) msg += info self.assertGreaterEqual( float( measured ), lowerBound, msg=msg ) self.assertLessEqual( float( measured ), upperBound, msg=msg ) def testCPULimits( self ): "Verify topology creation with CPU limits set for both schedulers." CPU_FRACTION = 0.1 CPU_TOLERANCE = 0.8 # CPU fraction below which test should fail hopts = { 'cpu': CPU_FRACTION } #self.runOptionsTopoTest( N, hopts=hopts ) mn = Mininet( SingleSwitchOptionsTopo( n=N, hopts=hopts ), host=CPULimitedHost, switch=self.switchClass, waitConnected=True ) mn.start() results = mn.runCpuLimitTest( cpu=CPU_FRACTION ) mn.stop() hostUsage = '\n'.join( 'h%s: %s' % ( n + 1, results[ (n - 1) * 5 : (n * 5) - 1 ] ) for n in range( N ) ) hoptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in hopts.items() ) msg = ( '\nTesting cpu limited to %d%% of cpu per host\n' 'cpu usage percent per host:\n%s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'hopts = %s\n' 'host = CPULimitedHost\n' 'Switch = %s\n' % ( CPU_FRACTION * 100, hostUsage, N, hoptsStr, self.switchClass ) ) for pct in results: #divide cpu by 100 to convert from percentage to fraction self.assertWithinTolerance( pct/100, CPU_FRACTION, CPU_TOLERANCE, msg ) def testLinkBandwidth( self ): "Verify that link bandwidths are accurate within a bound." if self.switchClass is UserSwitch: self.skipTest( 'UserSwitch has very poor performance -' ' skipping for now' ) BW = 5 # Mbps BW_TOLERANCE = 0.8 # BW fraction below which test should fail # Verify ability to create limited-link topo first; lopts = { 'bw': BW, 'use_htb': True } # Also verify correctness of limit limitng within a bound. mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), link=TCLink, switch=self.switchClass, waitConnected=True ) bw_strs = mn.run( mn.iperf, fmt='m' ) loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting link bandwidth limited to %d Mbps per link\n' 'iperf results[ client, server ]: %s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( BW, bw_strs, N, loptsStr, self.switchClass ) ) # On the client side, iperf doesn't wait for ACKs - it simply # reports how long it took to fill up the TCP send buffer. # As long as the kernel doesn't wait a long time before # delivering bytes to the iperf server, its reported data rate # should be close to the actual receive rate. serverRate, _clientRate = bw_strs bw = float( serverRate.split(' ')[0] ) self.assertWithinTolerance( bw, BW, BW_TOLERANCE, msg ) def testLinkDelay( self ): "Verify that link delays are accurate within a bound." DELAY_MS = 15 DELAY_TOLERANCE = 0.8 # Delay fraction below which test should fail REPS = 3 lopts = { 'delay': '%sms' % DELAY_MS, 'use_htb': True } mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), link=TCLink, switch=self.switchClass, autoStaticArp=True, waitConnected=True ) mn.start() for _ in range( REPS ): ping_delays = mn.pingFull() mn.stop() test_outputs = ping_delays[0] # Ignore unused variables below # pylint: disable=W0612 node, dest, ping_outputs = test_outputs sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs pingFailMsg = 'sent %s pings, only received %s' % ( sent, received ) self.assertEqual( sent, received, msg=pingFailMsg ) # pylint: enable=W0612 loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting Link Delay of %s ms\n' 'ping results across 4 links:\n' '(Sent, Received, rttmin, rttavg, rttmax, rttdev)\n' '%s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default' 'switch = %s\n' % ( DELAY_MS, ping_outputs, N, loptsStr, self.switchClass ) ) for rttval in [rttmin, rttavg, rttmax]: # Multiply delay by 4 to cover there & back on two links self.assertWithinTolerance( rttval, DELAY_MS * 4.0, DELAY_TOLERANCE, msg ) def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting packet loss with %d%% loss rate\n' 'number of dropped pings during mininet.ping(): %s\n' 'expected number of dropped packets: 1\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( LOSS_PERCENT, dropped_total, N, loptsStr, self.switchClass ) ) self.assertGreater( dropped_total, 0, msg ) def testMostOptions( self ): "Verify topology creation with most link options and CPU limits." lopts = { 'bw': 10, 'delay': '5ms', 'use_htb': True } hopts = { 'cpu': 0.5 / N } msg = '\nTesting many cpu and link options\n' self.runOptionsTopoTest( N, msg, hopts=hopts, lopts=lopts ) # pylint: enable=E1101 class testOptionsTopoOVSKernel( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (OVS kernel switch).""" longMessage = True switchClass = OVSSwitch @unittest.skip( 'Skipping OVS user switch test for now' ) class testOptionsTopoOVSUser( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (OVS user switch).""" longMessage = True switchClass = partial( OVSSwitch, datapath='user' ) @unittest.skipUnless( quietRun( 'which ivs-ctl' ), 'IVS is not installed' ) class testOptionsTopoIVS( testOptionsTopoCommon, unittest.TestCase ): "Verify ability to create networks with host and link options (IVS)." longMessage = True switchClass = IVSSwitch @unittest.skipUnless( quietRun( 'which ofprotocol' ), 'Reference user switch is not installed' ) class testOptionsTopoUserspace( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (UserSwitch).""" longMessage = True switchClass = UserSwitch if __name__ == '__main__': setLogLevel( 'warning' ) unittest.main()
the-stack_0_4271
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, "name"): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError( "The '%s' attribute has no file associated with it." % self.field.name ) def _get_file(self): self._require_file() if getattr(self, "_file", None) is None: self._file = self.storage.open(self.name, "rb") return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode="rb"): self._require_file() if getattr(self, "_file", None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, "_file"): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, "_file", None) return file is None or file.closed def close(self): file = getattr(self, "_file", None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { "name": self.name, "closed": False, "_committed": True, "_file": None, "instance": self.instance, "field": self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__( self, verbose_name=None, name=None, upload_to="", storage=None, **kwargs ): self._primary_key_set_explicitly = "primary_key" in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % ( self.__class__.__qualname__, Storage.__module__, Storage.__qualname__, ) ) self.upload_to = upload_to kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id="fields.E201", ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith("/"): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id="fields.E202", hint="Remove the leading slash.", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs["upload_to"] = self.upload_to if self.storage is not default_storage: kwargs["storage"] = getattr(self, "_storage_callable", self.storage) return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for # database insertion. if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or "") def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FileField, "max_length": self.max_length, **kwargs, } ) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, "_dimensions_cache"): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__( self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs, ): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( "Cannot use ImageField because Pillow is not installed.", hint=( "Get Pillow at https://pypi.org/project/Pillow/ " 'or run command "python -m pip install Pillow".' ), obj=self, id="fields.E210", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs["width_field"] = self.width_field if self.height_field: kwargs["height_field"] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not ( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.ImageField, **kwargs, } )
the-stack_0_4273
#!/usr/bin/env python3 import argparse import ctypes import os import readline import socket import subprocess import sys import threading readline.get_history_length() # throw this away because we import readline for prompt stuff parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-l', '--listen', type=str, dest='listen', default='0.0.0.0', help='address to bind and sniff packets') parser.description = """\ This is a Python program to scan a network for live hosts by spraying UDP traffic and inspecting responses. """ args = parser.parse_args() def main(): # make this work on windows too if os.name == 'nt': socket_protocol = socket.IPPROTO_IP else: socket_protocol = socket.IPPROTO_ICMP # set up raw socket and bind sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol) sniffer.bind((args.listen,0)) # we want to include headers sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) # if windows explicitly set promiscuous mode if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) # read a single packet data = sniffer.recv(65536) print(data) hexdump(data) # if windows explicitly set promiscuous mode if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) def hexdump(src, length=16, sep='.'): """ https://gist.github.com/1mm0rt41PC/c340564823f283fe530b """ result = [] for i in range(0, len(src), length): subSrc = src[i:i + length] hexa = '' for h in range(0, len(subSrc)): if h == length / 2: hexa += ' ' h = subSrc[h] if not isinstance(h, int): h = ord(h) h = hex(h).replace('0x', '') if len(h) == 1: h = '0' + h hexa += h + ' ' hexa = hexa.strip(' ') text = '' for c in subSrc: if not isinstance(c, int): c = ord(c) if 0x20 <= c < 0x7F: text += chr(c) else: text += sep result.append(('%08X: %-' + str(length * (2 + 1) + 1) + 's |%s|') % (i, hexa, text)) print('\n'.join(result)) if __name__ == '__main__': main()
the-stack_0_4274
""" Single-subject data (two sessions) in native space ================================================== The example shows the analysis of an SPM dataset studying face perception. The analysis is performed in native space. Realignment parameters are provided with the input images, but those have not been resampled to a common space. The experimental paradigm is simple, with two conditions; viewing a face image or a scrambled face image, supposedly with the same low-level statistical properties, to find face-specific responses. For details on the data, please see: Henson, R.N., Goshen-Gottstein, Y., Ganel, T., Otten, L.J., Quayle, A., Rugg, M.D. Electrophysiological and haemodynamic correlates of face perception, recognition and priming. Cereb Cortex. 2003 Jul;13(7):793-805. http://www.dx.doi.org/10.1093/cercor/13.7.793 This example takes a lot of time because the input are lists of 3D images sampled in different positions (encoded by different affine functions). """ print(__doc__) ######################################################################### # Fetch the SPM multimodal_faces data. from nilearn.datasets import fetch_spm_multimodal_fmri subject_data = fetch_spm_multimodal_fmri() ######################################################################### # Specfiy timing and design matrix parameters. tr = 2. # repetition time, in seconds slice_time_ref = 0. # Sample at the beginning of each acquisition. drift_model = 'Cosine' # We use a discrete cosine transform to model signal drifts. high_pass = .01 # The cutoff for the drift model is 0.01 Hz. hrf_model = 'spm + derivative' # The hemodynamic response function is the SPM canonical one. ######################################################################### # Resample the images. # # This is achieved by the concat_imgs function of Nilearn. from nilearn.image import concat_imgs, resample_img, mean_img fmri_img = [concat_imgs(subject_data.func1, auto_resample=True), concat_imgs(subject_data.func2, auto_resample=True)] affine, shape = fmri_img[0].affine, fmri_img[0].shape print('Resampling the second image (this takes time)...') fmri_img[1] = resample_img(fmri_img[1], affine, shape[:3]) ######################################################################### # Let's create mean image for display purposes. mean_image = mean_img(fmri_img) ######################################################################### # Make the design matrices. import numpy as np import pandas as pd from nilearn.glm.first_level import make_first_level_design_matrix design_matrices = [] ######################################################################### # Loop over the two sessions. for idx, img in enumerate(fmri_img, start=1): # Build experimental paradigm n_scans = img.shape[-1] events = pd.read_table(subject_data['events{}'.format(idx)]) # Define the sampling times for the design matrix frame_times = np.arange(n_scans) * tr # Build design matrix with the reviously defined parameters design_matrix = make_first_level_design_matrix( frame_times, events, hrf_model=hrf_model, drift_model=drift_model, high_pass=high_pass, ) # put the design matrices in a list design_matrices.append(design_matrix) ######################################################################### # We can specify basic contrasts (to get beta maps). # We start by specifying canonical contrast that isolate design matrix columns. contrast_matrix = np.eye(design_matrix.shape[1]) basic_contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) ######################################################################### # We actually want more interesting contrasts. The simplest contrast # just makes the difference between the two main conditions. We # define the two opposite versions to run one-tailed t-tests. We also # define the effects of interest contrast, a 2-dimensional contrasts # spanning the two conditions. contrasts = { 'faces-scrambled': basic_contrasts['faces'] - basic_contrasts['scrambled'], 'scrambled-faces': -basic_contrasts['faces'] + basic_contrasts['scrambled'], 'effects_of_interest': np.vstack((basic_contrasts['faces'], basic_contrasts['scrambled'])) } ######################################################################### # Fit the GLM for the 2 sessions by specifying a FirstLevelModel and then # fitting it. from nilearn.glm.first_level import FirstLevelModel print('Fitting a GLM') fmri_glm = FirstLevelModel() fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices) ######################################################################### # Now we can compute contrast-related statistical maps (in z-scale), and plot # them. print('Computing contrasts') from nilearn import plotting # Iterate on contrasts for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) # compute the contrasts z_map = fmri_glm.compute_contrast( contrast_val, output_type='z_score') # plot the contrasts as soon as they're generated # the display is overlaid on the mean fMRI image # a threshold of 3.0 is used, more sophisticated choices are possible plotting.plot_stat_map( z_map, bg_img=mean_image, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True, title=contrast_id) plotting.show() ######################################################################### # Based on the resulting maps we observe that the analysis results in # wide activity for the 'effects of interest' contrast, showing the # implications of large portions of the visual cortex in the # conditions. By contrast, the differential effect between "faces" and # "scrambled" involves sparser, more anterior and lateral regions. It # also displays some responses in the frontal lobe.
the-stack_0_4276
""" Exercício Python 113: Reescreva a função leiaInt() que fizemos no desafio 104, incluindo agora a possibilidade da digitação de um número de tipo inválido. Aproveite e crie também uma função leiaFloat() com a mesma funcionalidade. """ def leiaInt(mensagem): value = 0 while True: try: value = int(input(mensagem)) except KeyboardInterrupt: print('O usuario decidiu nao informar o numero') break except: print('Ocorreu um erro, certifique-se de que digitou um numero inteiro...') else: break return value def leiaFloat(mensagem): value = 0 while True: try: value = float(input(mensagem)) except KeyboardInterrupt: print('\n\nO usuario decidiu nao informar o numero') break except: print('Ocorreu um erro, certifique-se de que digitou um numero inteiro...') else: break return value # MainProgram try: number1 = leiaInt('insira um numero inteiro: ') number2 = leiaFloat('insira um numero real:') except: print('Erros localizados, da proxima insira os dados corretamente') finally: print(f'o numero int {number1} e o real {number2}')
the-stack_0_4277
#!/usr/bin/env python2.5 # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper classes which help converting a url to a list of SB expressions.""" import array import logging import re import string import urllib import urlparse import util class UrlParseError(Exception): pass def GenerateSafeChars(): """ Return a string containing all 'safe' characters that shouldn't be escaped for url encoding. This includes all printable characters except '#%' and whitespace characters. """ unfiltered_chars = string.digits + string.ascii_letters + string.punctuation filtered_list = [c for c in unfiltered_chars if c not in '%#'] return array.array('c', filtered_list).tostring() class ExpressionGenerator(object): """Class does the conversion url -> list of SafeBrowsing expressions. This class converts a given url into the list of all SafeBrowsing host-suffix, path-prefix expressions for that url. These are expressions that are on the SafeBrowsing lists. """ HEX = re.compile(r'^0x([a-fA-F0-9]+)$') OCT = re.compile(r'^0([0-7]+)$') DEC = re.compile(r'^(\d+)$') IP_WITH_TRAILING_SPACE = re.compile(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) ') POSSIBLE_IP = re.compile(r'^(?i)((?:0x[0-9a-f]+|[0-9\\.])+)$') FIND_BAD_OCTAL_REGEXP = re.compile(r'(^|\.)0\d*[89]') # This regular expression parses the host and port from a hostname. Note: any # user and password are removed from the hostname. HOST_PORT_REGEXP = re.compile(r'^(?:.*@)?(?P<host>[^:]*)(:(?P<port>\d+))?$') SAFE_CHARS = GenerateSafeChars() # Dict that maps supported schemes to their default port number. DEFAULT_PORTS = {'http': '80', 'https': '443', 'ftp': '21'} def __init__(self, url): parse_exception = UrlParseError('failed to parse URL "%s"' % (url,)) canonical_url = ExpressionGenerator.CanonicalizeUrl(url) if not canonical_url: raise parse_exception # Each element is a list of host components used to build expressions. self._host_lists = [] # A list of paths used to build expressions. self._path_exprs = [] url_split = urlparse.urlsplit(canonical_url) canonical_host, canonical_path = url_split[1], url_split[2] self._MakeHostLists(canonical_host, parse_exception) if url_split[3]: # Include canonicalized path with query arguments self._path_exprs.append(canonical_path + '?' + url_split[3]) self._path_exprs.append(canonical_path) # Get the first three directory path components and create the 4 path # expressions starting at the root (/) and successively appending directory # path components, including the trailing slash. E.g.: # /a/b/c/d.html -> [/, /a/, /a/b/, /a/b/c/] path_parts = canonical_path.rstrip('/').lstrip('/').split('/')[:3] if canonical_path.count('/') < 4: # If the last component in not a directory we remove it. path_parts.pop() while path_parts: self._path_exprs.append('/' + '/'.join(path_parts) + '/') path_parts.pop() if canonical_path != '/': self._path_exprs.append('/') @staticmethod def CanonicalizeUrl(url): """Canonicalize the given URL for the SafeBrowsing protocol. Args: url: URL to canonicalize. Returns: A canonical URL or None if the URL could not be canonicalized. """ # Start by stripping off the fragment identifier. tmp_pos = url.find('#') if tmp_pos >= 0: url = url[0:tmp_pos] # Stripping off leading and trailing white spaces. url = url.lstrip().rstrip() # Remove any embedded tabs and CR/LF characters which aren't escaped. url = url.replace('\t', '').replace('\r', '').replace('\n', '') # Un-escape and re-escpae the URL just in case there are some encoded # characters in the url scheme for example. url = ExpressionGenerator._Escape(url) url_split = urlparse.urlsplit(url) if not url_split[0]: # URL had no scheme. In this case we assume it is http://. url = 'http://' + url url_split = urlparse.urlsplit(url) url_scheme = url_split[0].lower() if url_scheme not in ExpressionGenerator.DEFAULT_PORTS: return None # Unsupported scheme. # Note: applying HOST_PORT_REGEXP also removes any user and password. m = ExpressionGenerator.HOST_PORT_REGEXP.match(url_split[1]) if not m: return None host, port = m.group('host'), m.group('port') canonical_host = ExpressionGenerator.CanonicalizeHost(host) if not canonical_host: return None # Now that the host is canonicalized we add the port back if it's not the # default port for that url scheme. if port and port != ExpressionGenerator.DEFAULT_PORTS[url_scheme]: canonical_host += ':' + port canonical_path = ExpressionGenerator.CanonicalizePath(url_split[2]) # If the URL ends with ? we want to keep the ?. canonical_url = url_split[0] + '://' + canonical_host + canonical_path if url_split[3] != '' or url.endswith('?'): canonical_url += '?' + url_split[3] return canonical_url @staticmethod def CanonicalizePath(path): """Canonicalize the given path.""" if not path: return '/' # There are some cases where the path will not start with '/'. Example: # "ftp://host.com?q" -- the hostname is 'host.com' and the path '%3Fq'. # Browsers typically do prepend a leading slash to the path in this case, # we'll do the same. if path[0] != '/': path = '/' + path path = ExpressionGenerator._Escape(path) path_components = [] for path_component in path.split('/'): # If the path component is '..' we skip it and remove the preceding path # component if there are any. if path_component == '..': if len(path_components) > 0: path_components.pop() # We skip empty path components to remove successive slashes (i.e., # // -> /). Note: this means that the leading and trailing slash will # also be removed and need to be re-added afterwards. # # If the path component is '.' we also skip it (i.e., /./ -> /). elif path_component != '.' and path_component != '': path_components.append(path_component) # Put the path components back together and re-add the leading slash which # got stipped by removing empty path components. canonical_path = '/' + '/'.join(path_components) # If necessary we also re-add the trailing slash. if path.endswith('/') and not canonical_path.endswith('/'): canonical_path += '/' return canonical_path @staticmethod def CanonicalizeHost(host): """Canonicalize the given host. Returns None in case of an error.""" if not host: return None host = ExpressionGenerator._Escape(host.lower()) ip = ExpressionGenerator.CanonicalizeIp(host) if ip: # Host is an IP address. host = ip else: # Host is a normal hostname. # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) < 2: return None host = '.'.join(host_split) return host @staticmethod def CanonicalizeIp(host): """ Return a canonicalized IP if host can represent an IP and None otherwise. """ if len(host) <= 15: # The Windows resolver allows a 4-part dotted decimal IP address to have a # space followed by any old rubbish, so long as the total length of the # string doesn't get above 15 characters. So, "10.192.95.89 xy" is # resolved to 10.192.95.89. # If the string length is greater than 15 characters, # e.g. "10.192.95.89 xy.wildcard.example.com", it will be resolved through # DNS. m = ExpressionGenerator.IP_WITH_TRAILING_SPACE.match(host) if m: host = m.group(1) if not ExpressionGenerator.POSSIBLE_IP.match(host): return None # Basically we should parse octal if we can, but if there are illegal octal # numbers, i.e. 08 or 09, then we should just look at decimal and hex. allow_octal = not ExpressionGenerator.FIND_BAD_OCTAL_REGEXP.search(host) # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) > 4: return None ip = [] for i in xrange(len(host_split)): m = ExpressionGenerator.HEX.match(host_split[i]) if m: base = 16 else: m = ExpressionGenerator.OCT.match(host_split[i]) if m and allow_octal: base = 8 else: m = ExpressionGenerator.DEC.match(host_split[i]) if m: base = 10 else: return None n = long(m.group(1), base) if n > 255: if i < len(host_split) - 1: n &= 0xff ip.append(n) else: bytes = [] shift = 0 while n > 0 and len(bytes) < 4: bytes.append(n & 0xff) n >>= 8 if len(ip) + len(bytes) > 4: return None bytes.reverse() ip.extend(bytes) else: ip.append(n) while len(ip) < 4: ip.append(0) return '%u.%u.%u.%u' % tuple(ip) def Expressions(self): """ A generator of the possible expressions. """ for host_parts in self._host_lists: host = '.'.join(host_parts) for p in self._path_exprs: yield Expression(host, p) @staticmethod def _Escape(unescaped_str): """Fully unescape the given string, then re-escape once. Args: unescaped_str: string that should be escaped. Returns: Escaped string according to the SafeBrowsing protocol. """ unquoted = urllib.unquote(unescaped_str) while unquoted != unescaped_str: unescaped_str = unquoted unquoted = urllib.unquote(unquoted) return urllib.quote(unquoted, ExpressionGenerator.SAFE_CHARS) def _MakeHostLists(self, host, parse_exception): """ Canonicalize host and build self._host_lists. """ ip = ExpressionGenerator.CanonicalizeIp(host) if ip is not None: # Is an IP. self._host_lists.append([ip]) return # Is a hostname. # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) < 2: raise parse_exception start = len(host_split) - 5 stop = len(host_split) - 1 if start <= 0: start = 1 self._host_lists.append(host_split) for i in xrange(start, stop): self._host_lists.append(host_split[i:]) class Expression(object): """Class which represents a host-suffix, path-prefix expression.""" def __init__(self, host, path): self._host = host self._path = path self._value = host + path self._hash_value = util.GetHash256(self._value) def __str__(self): return self.Value() def __repr__(self): """ Not really a good repr. This is for debugging. """ return self.Value() def Value(self): return self._value def HashValue(self): return self._hash_value
the-stack_0_4279
from .model import Model from radar.models.geofence import Geofence from radar.models.region import Region from radar.models.place import Place class RadarContext(Model): """Location context Parameters: live (bool) geofences (`list` of :class:`~radar.models.geofence.Geofence`) place (`list` of :class:`~radar.models.place.Place`, optional) country (:class:`~radar.models.region.Region`, optional) state (:class:`~radar.models.region.Region`, optional) dma (:class:`~radar.models.region.Region`, optional) postalCode (:class:`~radar.models.region.Region`, optional) fraud (FraudObject, optional) """ OBJECT_NAME = "Context" _DISPLAY_ATTRIBUTES = ( "live", "geofences", "place", "country", "state", "dma", "postalCode", ) def __init__(self, radar, data={}): """Initialize a Radar Model instance Args: radar (:class:`~radar.RadarClient`): RadarClient for instance CRUD actions raw_json (dict): raw data to initialize the model with """ self._radar = radar self.raw_json = data for attribute, value in data.items(): if attribute == "geofences": geofences = [Geofence(radar, geofence) for geofence in data[attribute]] setattr(self, attribute, geofences) elif attribute == "place": place = Place(radar, data[attribute]) setattr(self, attribute, place) elif attribute in ["country", "state", "dma", "postalCode"]: region = Region(radar, data[attribute]) setattr(self, attribute, region) else: setattr(self, attribute, value)
the-stack_0_4281
from pprint import pprint import yaml from tabulate import tabulate from funcy import project from wws.commands import utils class Rm: def __init__(self): super().__init__() def process(self, args): """ edits the warp database """ if args['debug']: pprint(args) with open(args['workspace_warp_database'],'r+') as f: data = yaml.load(f, Loader=yaml.FullLoader) if not data: data = [] remove_entries = dict() keep_entries = dict() remove_entries = [ d for d in data if any( [ a for a in args['alias'] if a.upper() in d['alias'].upper() ] ) ] keep_entries = [ d for d in data if not any( [ a for a in args['alias'] if a.upper() in d['alias'].upper() ] ) ] if not args['verbose']: data = [ project(d,['alias', 'local', 'remote' ]) for d in remove_entries] else: data = remove_entries if not data: print("Nothing to remove.") exit() print("Entries to remove:") print(tabulate(data, headers="keys", tablefmt = "psql")) rm = utils._confirm("Are you sure to remove these entries?") if rm: f.seek(0) f.truncate() yaml.dump(keep_entries, f, default_flow_style=False) print("Aliases were removed but data remain untouched. Please remove the listed source directories.")
the-stack_0_4283
#!/usr/bin/env python3 import asyncio import time from psnawp_api import psnawp from pypresence import Presence from asset_updater import add_game_icon from playstationpresence.lib.files import load_config, load_game_data, load_game_icons from playstationpresence.lib.notifiable import Notifiable from playstationpresence.lib.rpc_retry import rpc_retry from requests.exceptions import * from threading import Event class PlaystationPresence: def __init__(self): self.notifier = None self.rpc = None self.exit_event = Event() self.old_info: dict = {'onlineStatus': None, 'titleId': None} self.config: dict = load_config() self.supported_games: set[str] = load_game_data() self.game_icons: set[str] = load_game_icons() self.psapi = psnawp.PSNAWP(self.config['npsso']) self.psnid = self.config['PSNID'] self.initRpc() def initRpc(self): self.rpc = Presence(self.config['discordClientId'], pipe=0, loop=asyncio.new_event_loop()) self.rpc.connect() def quit(self): self.exit_event.set() if self.notifier is not None: self.notifier.visible = False self.notifier.stop() def notify(self, message): print(message) if self.notifier is not None: self.notifier.title = message self.notifier.notify(message, "playstationpresence") @rpc_retry def clearStatus(self): self.rpc.clear() self.notify(f"Status changed to Offline") @rpc_retry def updateStatus(self, show_time: bool, state: str, large_image: str, details: str): if show_time: start_time = int(time.time()) self.rpc.update(state=state, start=start_time, small_image="ps5_main", small_text=self.psnid, large_image=large_image, large_text=state, details=details) else: self.rpc.update(state=state, small_image="ps5_main", small_text=self.psnid, large_image=large_image, large_text=state, details=details) self.notify(f"Status changed to {state}") def processPresenceInfo(self, mainpresence: dict): if mainpresence is None: return # Read PSN API data onlineStatus: str = mainpresence['primaryPlatformInfo']['onlineStatus'] onlinePlatform: str = mainpresence['primaryPlatformInfo']['platform'] game_info: list[dict] = mainpresence.get('gameTitleInfoList', None) # Check online status if onlineStatus == "offline": if self.old_info['onlineStatus'] != onlineStatus: self.clearStatus() self.old_info = {'onlineStatus': onlineStatus, 'titleId': None} elif game_info == None: # Set home menu state if self.old_info['onlineStatus'] != "online" or self.old_info['titleId'] != None: self.updateStatus(False, "Home Menu", "ps5_main", f"Online on {onlinePlatform}") self.old_info = {'onlineStatus': onlineStatus, 'titleId': None} elif self.old_info['titleId'] != game_info[0]['npTitleId']: # New title id is different -> update # Read game data game: dict[str, str] = game_info[0] # large_icon logic if game['npTitleId'] in self.supported_games: large_icon = game['npTitleId'].lower() else: # Game not known self.notify("Game not in library, checking for icon") # Check if icon exists if game['npTitleId'] in self.game_icons: self.notify("Game icon found\CONSIDER PUSHING NEW DISCORD ASSETS") else: # Get icon add_game_icon(game['npTitleId'], game['npTitleIconUrl']) self.notify("Reloading icons") self.game_icons = load_game_icons() large_icon = "ps5_main" # Update status self.updateStatus(True, game['titleName'], large_icon, f"Playing on {game['launchPlatform']}") self.old_info = {'onlineStatus': onlineStatus, 'titleId': game['npTitleId']} def mainloop(self, notifier: Notifiable): if notifier is not None: self.notifier = notifier self.notifier.visible = True while not self.exit_event.is_set(): mainpresence: dict = None user_online_id = None try: user_online_id = self.psapi.user(online_id=self.psnid) mainpresence = user_online_id.get_presence() # Uncomment for debug info about currently running game #print(mainpresence) except (ConnectionError, HTTPError) as e: print("Error when trying to read presence") print(e) self.processPresenceInfo(mainpresence) # Adjust this to be higher if you get ratelimited self.exit_event.wait(30) self.clearStatus() self.rpc.close()
the-stack_0_4284
#! /usr/bin/env python """ based on this quickstart: from https://developers.google.com/google-apps/calendar/quickstart/python Don't forget to put CLIENT_SECRET_FILE in ~/.credentials Note: the above URL redirects to https://developers.google.com/calendar/quickstart/python which has a different sequence for get_credentials(). The one in this file still seems to work... TODO: test that it really does work and perhaps update to the newer version. Usage: The google calendar is the database. Calendar events are added as they are made known to the calendar owner (me). Members are encouraged to add this calendar to their calendar viewing apps so they can see who else will be in the cabin on any given night. The first word in the event 'summary' (the thing that shows up in your calendar view) shold be the member name. Append something in Camel-case to avoid name collisions e.g. 'BobB' and 'BobS'. Guests are indicated by a +N (separated by whitespace. N is the guest count). Around Thursday of each week, I assign rooms by inserting the room name into the 'description' in the calendar event. Then I run this script and, if the output looks OK, paste it into the communication to the members (Slack, email, whatever). Often, members have a room preference which I keep in an event in my personal calendar. I try to honor their preferences and follow other social norms such as not booking un-related men and women in the same bed/room but on popular nights, that might be unavoidable. As members pay their guest fees, I add a '$' (w/ whitespace) to the 'summary'. The '$' moves them from the 'deadbeat' list to the 'sponsor' list in the weekly communications. Customization: Obviously, you need to use your own google calendar. Replace ROOMS with the appropriate selection for your situation. DAYS_PEAK, GUEST_FEES_MID, and GUEST_FEES_PEAK may also need your attention. Member names are extracted from the calendar, so no need to do anything in this file, but you should probably examine fix_spelling() and add_guest_fees() since they implement rules that are specific to my cabin. I don't use f-strings because the raspberry pi that I sometimes run this on only has python 3.4 and I'm too lazy to install 3.7 """ import datetime import os import json USE_STR = """ --Show room usage in Lone Clone Ski Cabin-- Note: Enter guests as 'member +N' and, when paid, 'member $ +N' Usage: rooms [--counts] [--debug] [--nights] [--offline] [--peak] [--raw] [--shift=<S>] [--whosup] [--year=<Y>] rooms -h | --help rooms -v | --version Options: -h --help Show this screen. -c --counts show how many times each member has used each room -d --debug show stuff -n --nights show who slept where, each night -o --offline don't get the live calendar. Use a test data set -p --peak Show peak nights for this season, exlcuding Fri and Sat -r --raw show the raw calendar events -s --shift <S> move 'today' by integer number of days -v --version show the version -w --whosup show who's up in the next week -y --year <Y> year season starts [default: 2019] """ try: import httplib2 from googleapiclient import discovery from oauth2client import client from oauth2client import tools from oauth2client.file import Storage import docopt except ImportError: IMP_ERR_STR = '** Failed import! Type "workon rooms" and try again, Bob **' print('\n%s\n'%('*'*len(IMP_ERR_STR)), IMP_ERR_STR, '\n%s\n'%('*'*len(IMP_ERR_STR))) # If modifying these scopes, delete your previously saved credentials # at ~/.credentials/calendar-python-quickstart.json SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' APPLICATION_NAME = 'Google Calendar API Python Quickstart' # why do I have 2 different client secret files? TODO CLIENT_SECRET_FILE = 'calendar-python-quickstart.json' CLIENT_SECRET_FILE_ANOTHER = 'client_secret.json' ROOMS = ('in-law', 'master', 'middle', 'bunk', 'loft',) # assignable rooms in the cabin """ DAYS_PEAK is a list of days-of-the-week or dates that guest fee is higher than not. The dates are specific to the Julian calendar of each season. The year index is the season *start* year. Note: Fri and Sat should always be the first 2 entries """ NIGHTS_PEAK = { '2016': ['Fri', 'Sat']+['12/%2d'%x for x in range(18, 32)]+['01/01', '01/02', '02/19',], #pylint: disable=C0326 '2017': ['Fri', 'Sat']+['12/%2d'%x for x in range(17, 32)]+['01/01', '02/18',], #pylint: disable=C0326 '2018': ['Fri', 'Sat']+['12/%2d'%x for x in range(16, 32)]+['01/01', '02/17',], #pylint: disable=C0326 '2019': ['Fri', 'Sat']+['12/%2d'%x for x in range(15, 32)]+['01/01', '02/16',], #pylint: disable=C0326 '2020': ['Fri', 'Sat']+['12/%2d'%x for x in range(20, 32)]+['01/01', '02/14',], #pylint: disable=C0326 } # "mid week" and "weekend/holiday" guest fee in dollars GUEST_FEE_MID = 30 GUEST_FEE_PEAK = 35 def get_credentials(opts): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, CLIENT_SECRET_FILE) if opts['--debug']: print('** using credentials at '+credential_path) with open(credential_path) as cred_file: cred_text = cred_file.read() print('\n'.join(cred_text.split(','))) store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE_ANOTHER, SCOPES) flow.user_agent = APPLICATION_NAME # if flags: credentials = tools.run_flow(flow, store) #, flags) # else: # Needed only for compatibility with Python 2.6 # credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) # except, I'm not storing them? return credentials def get_events(cred, **kwargs): """ Wraps the service.events() call """ http = cred.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http) # throws Warning and ImportError # print(f'service.events={dir(service.events)}') """ #pylint: disable=E1101 pylint thinks there is no events()... but there is """ #pylint: disable=W0105 cal_events = service.events().list(**kwargs).execute() #pylint: disable=E1101 return cal_events.get('items', []) def get_events_raw(credentials, opts): """ Grab the entire calendar for the season from Nov 29 to May 1 ctor the dicts with night, leave, summary, description keys. nightShort is added later by more_dates() """ day0 = datetime.datetime(*opts['season_start']).isoformat()+'Z' day1 = datetime.datetime(*opts['season_end']).isoformat()+'Z' events = get_events( credentials, timeMin=day0, timeMax=day1, singleEvents=True, orderBy='startTime', calendarId="primary") return events def events_to_raw_dates(events, opts): #pylint: disable=W0613 """ make a new list: dates_raw that has only the fields I care about: night, leave, member, and room """ dates_raw = [] for event in events: day_dict = {} day_dict['night'] = event['start'].get('dateTime', event['start'].get('date'))[:10] day_dict['leave'] = event['end'].get('dateTime', event['end'].get('date'))[:10] # summary is the member name, description has room assignment for k in (('summary', 'member',), ('description', 'where', ),): try: day_dict[k[1]] = event[k[0]].strip() except KeyError: day_dict[k[1]] = '' dates_raw += [day_dict] # dates_raw[] is a list of # {'night':'2016-12-15', 'summary':'Logan', 'where':'master', 'leave':'2016-12-16',} return dates_raw def expand_multi_nights(dates_raw): """ expand multi-night stays into individual nights """ dates_multi_night = [] for one_date in dates_raw: # add day of week one_date['date'] = datetime.datetime.strptime(one_date['night'], '%Y-%m-%d') nights = (datetime.datetime.strptime(one_date['leave'], '%Y-%m-%d').date() - one_date['date'].date()).days - 1 for i in range(nights): new_date = one_date.copy() new_date['date'] = datetime.datetime.strptime(one_date['night'], '%Y-%m-%d') \ + datetime.timedelta(days=i+1) dates_multi_night += [new_date] dates_raw += dates_multi_night def add_day_of_week(dates_raw): """ Use 'date of "2016-12-23" to make night_abrev of "Fri 12/23" """ for one_date in dates_raw: one_date['night_abrev'] = one_date['date'].strftime('%a %m/%d') dates_raw = dates_raw.sort(key=lambda x: x['date']) def fix_spelling(dates_raw): """ Common data entry errors: fix the dict and flag it for me to fix the google calendar """ for date in dates_raw: for field, wrong, right in [ ('where', 'inlaw', 'in-law',), ('member', 'Sarah', 'Sara',), ]: if wrong in date[field]: print('** spellcheck:', date) date[field] = date[field].replace(wrong, right) # in-law, not inlaw, e.g. if 'Glen ' in date['member']: # special treatment for missing n in Glenn print('** spellcheck:', date) date['member'] = date['summary'].replace('Glen', 'Glenn') # two n in Glenn return dates_raw def select_dates(dates_raw, opts, day0=None, day1=None): """ return a subset of the events from today+day0 to today+day1 None in day0 means begining of current ski season None in day1 means end of current ski season """ dt_today = datetime.datetime.utcnow() if opts['--shift']: dt_today += datetime.timedelta(days=int(opts['--shift'])) season_start = datetime.datetime(*opts['season_start']) season_end = datetime.datetime(*opts['season_end']) date0 = season_start if day0 is None else dt_today + datetime.timedelta(days=day0) date1 = season_end if day1 is None else dt_today + datetime.timedelta(days=day1) if opts['--debug']: print('select', date0.strftime('%a %m/%d'), date1.strftime('%a %m/%d')) return [e for e in dates_raw if bool(date0 <= e['date'] <= date1)] def debug_print_raw(dates_raw): """ Debugging aid formatted to copy into code """ print('** dates_raw') print('{'+ '},\n{'.join([', '.join( ["'%s':'%s'"%(n, e[n]) for n in ('night', 'leave', 'member', 'where')] ) for e in dates_raw]) +'}') def show_raw(dates_raw): """ Debugging aid formatted for humans """ print('') print('%10s %20s %-20s'%('', '', 'Raw Calendar',)+' '.join(['%10s'%r for r in ROOMS])) for date in dates_raw: print('%10s %-20s %-20s'%(date['night'], date['member'], date['where'].strip()) + ' '.join(['%10s'%date[room] for room in ROOMS])) def put_members_in_rooms(dates_raw): """ add ['middle']='Logan', ['bunk']='' etc so that all dates have all rooms as keys, w/ or w/o a member """ for date in dates_raw: for room in ROOMS: if room in date['where'].lower(): date[room] = gevent_to_member_name(date) # just the first name else: date[room] = '' def add_guest_fee(event, opts): """ add 'guest_fee' key to a dates_raw event 0 means no guest, negative means fee is OWED, positive means paid a '+ indicatees guests but not Z+1 (Sam is not charged). Enter "Z +1" to indicate not Sam (chargable) """ if '+' in event['member'] and 'Z+1' not in event['member']: event['guest_fee'] = GUEST_FEE_PEAK if any([x in event['night_abrev'] \ for x in NIGHTS_PEAK[opts['--year']]]) else GUEST_FEE_MID # remove the 'paid' indicator ('$') str_guest_count = event['member'].replace('$','') # look for the guest count after the '+' # we don't get here if 'Z+1' in the event so OK to split on '+' str_guest_count = str_guest_count.split('+')[-1].strip() try: guest_count = int(str_guest_count) except ValueError: print('** FAILED to convert guest count', event['member'], 'on', event['night_abrev']) guest_count = 1 event['guest_fee'] = guest_count * event['guest_fee'] # look for 'paid' indicator to see who's been naughty and who's been nice if '$' not in event['member']: event['guest_fee'] = -event['guest_fee'] # OWED else: event['guest_fee'] = 0 return event def get_deadbeat_sponsors(dates_past): """ return dicts of members and their guest fee accounts. deadbeats owe guest fees sponsors have paid their guest fees. A member may appear in both. """ # init the member dicts with {name: []} deadbeats = {gevent_to_member_name(event): [] for event in dates_past} sponsors = {gevent_to_member_name(event): [] for event in dates_past} for event in dates_past: if event['guest_fee'] < 0: deadbeats[gevent_to_member_name(event)] += [(event['night_abrev'], -event['guest_fee'])] if event['guest_fee'] > 0: sponsors[gevent_to_member_name(event)] += [(event['night_abrev'], event['guest_fee'])] return deadbeats, sponsors def show_guest_fees(members): """ members is a dict created by get_deadbeat_sponsors(): member: [(night, fee), (night, fee), (night, fee), ...] for each member, prints $sum, member, dates or ' none' if there are no guest fees. """ out_lst = [] total = 0 for member in members: mem_total = sum([x[1] for x in members[member]]) dates = [x[0].split()[1] for x in members[member]] if mem_total: out_lst += ['$%4d %10s: %s'%(mem_total, member, ", ".join(dates))] total += mem_total if out_lst: print('\n'.join(out_lst)) print('$%4d %10s'%(total, 'total')) else: print(' none') def get_whos_up(dates_selected): """ return members_dict['Bob'] = [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ] for use by show_whos_up() """ members_dict = {} p_ord = 0 for event in dates_selected: member = event['member'] try: members_dict[member] += [(event['where'], event['night_abrev']),] except KeyError: members_dict[member] = [p_ord, member, (event['where'], event['night_abrev']),] p_ord += 1 return members_dict def show_whos_up(whos_up_dict): """ This output gets pasted into my periodic emails who room: day date, date, date [, room: date, date] I generate a dict, keyed on the member, with values of a list: [order#, member, (rooms,day),(rooms,day),...)] I repeat the rooms for each day because it can change during a stay. """ # whos_up_dict['Bob'] = [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ] # sort by the begining night of stay (the p_ord value, above) # for member_ass in sorted(list(whos_up_dict.items()), key=lambda k_v: k_v[1][0]): for member_ass in list(whos_up_dict.items()): # member_ass = ('Bob', [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ]) day_tup = member_ass[1][2:] # [('middle','Mon 12/24'), ('middle','Tue 12/25'),] room = day_tup[0][0] # save the room so we only print it when it changes print('%20s %7s: %s,'%(member_ass[0], day_tup[0][0], day_tup[0][1]), end=' ') for a_day in day_tup[1:]: if a_day[0] == room: print(a_day[1].split()[1]+',', end=' ') else: print('%7s: %s,'%(a_day[0], a_day[1].split()[1]), end=' ') room = a_day[0] # save the room again print('') def show_missing_rooms(dates_raw, opts): """ Flag the data entry error condition: all members in the cabin on a given night must be in a room. Otherwise, the count will be wrong and the priority system breaks down. """ dates_raw = select_dates(dates_raw, opts, None, 0) missing_rooms_str = [] for date in dates_raw: if not date['where']: # catch members in cabin but not assigned to any room missing_rooms_str += \ ['** On %s, where did "%s" sleep?'%(date['night_abrev'], date['member'])] if missing_rooms_str: print('** Missing rooms ! **') print('\n'.join(missing_rooms_str)) def show_nights(dates_past, opts): #pylint: disable=W0613 """ colapse the raw calendar to show each night on one line date, inlaw, master, middle, bunk, loft who, who, who, who, who """ if dates_past: dates_combo = [dates_past[0].copy()] for date in dates_past[1:]: if dates_combo[-1]['night_abrev'] not in date['night_abrev']: # new date dates_combo += [date.copy()] else: for room in ROOMS: sep = ',' if date[room] and dates_combo[-1][room] else '' dates_combo[-1][room] = dates_combo[-1][room]+sep+date[room] # dates_combo[] is {'night':'2016-12-15', 'member':'Logan', 'where':'master', # 'master':'Logan', 'in-law':'Bob', 'middle':'Mark', ...} print('\n%10s '%('Nights')+' '.join(['%16s'%room for room in ROOMS])) for date in dates_combo: print('%10s '%(date['night_abrev'])+' '.join(['%16s'%date[room] for room in ROOMS])) else: print('\n** no events found by show_dates()') def count_members_in_rooms(dates_raw, opts): #pylint: disable=W0613 """ Construct the memberCount dict { 'Bob': {'inlaw': count, 'master' count, ...}...} for season up to today. """ # init the member_counts with the first {name: {rooms}} member_counts = {gevent_to_member_name(event): \ {room:0 for room in ROOMS+('total',)} for event in dates_raw} # add ['middle']='Logan' or blank for all rooms for event in dates_raw: # print '*****',gevent_to_member_name(event), # '+++', event['member'], '====', event['where'], '*****' member_counts[gevent_to_member_name(event)]['total'] = \ member_counts[gevent_to_member_name(event)]['total']+1 for room in ROOMS: if room in event['where'].lower(): try: member_counts[event[room]][room] = member_counts[event[room]][room]+1 except KeyError as why: msg = getattr(why, 'message', repr(why)) print("FAILED room=%s\nevent=%r\n%s\n"%(room, event, msg)) print("member_counts=%r\n"%member_counts) return member_counts def show_room_counts(member_counts): """ Room priority is based on which member has used the room the least. display: date, who, where inlaw, master, middle, bunk, loft total who, count, count, count, count, count """ # show how many times each member has slept in each room print('\n%4s%10s'%('', 'Counts')+' '.join(['%8s'%room for room in ROOMS])) for member in member_counts: print('%4d%10s'%(member_counts[member]['total'], member)+ ' '.join(['%8s'%('%d'%member_counts[member][room] if member_counts[member][room] else '') for room in ROOMS])) def gevent_to_member_name(event): """ Each calendar event has only one member name as the first word in the summary. extract the member name ignoring whatever else is in the summary. Should be run *after* fix_spelling() """ member = event['member'].split()[0].replace(',', '') return member def opts_add_season(opts): """ The Lone CLone cabin runs for the first weekend in Dec to the last in April. Sometimes, that includes the end of November ;-) """ opts['season_start'] = (int(opts['--year']), 11, 29,) opts['season_end'] = (int(opts['--year'])+1, 5, 1,) def read_test_dates_raw(file_name): """Read test data from a json encoded file. """ with open(file_name,'r') as fp: dates_raw_test = json.load(fp) return dates_raw_test def write_test_dates_raw(file_name, test_data): """Write test data to a json encoded file. """ with open(file_name,'w') as fp: json.dump(test_data, fp) def create_test_dates_raw(): """Todo: make a list of dicts as expected from google calendar """ return [] # yes, lots of branches and statements #pylint: disable=R0912 def main(opts): #pylint: disable=R0915 """ the program """ # ignore line-to-long #pylint: disable=C0301 if opts['--offline']: dates_raw = read_test_dates_raw('test.json') # start in the middle of the test data test_shift = datetime.datetime.strptime(dates_raw[len(dates_raw)//2]['night'], '%Y-%m-%d') opts['--year'] = str(datetime.datetime.strptime(dates_raw[0]['night'], '%Y-%m-%d').year) opts_add_season(opts) test_shift -= datetime.datetime.utcnow() test_shift = test_shift.days if opts['--shift']: opts['--shift'] = str(int(opts['--shift']) + test_shift) else: opts['--shift'] = str(test_shift) else: opts_add_season(opts) credentials = get_credentials(opts) events_raw = get_events_raw(credentials, opts) # print('events', ',\n'.join([repr(x) for x in events_raw])) # translate 'start' and 'end' to 'night' and 'leave' # translate 'summary' and 'description' to 'member' and 'where' dates_raw = events_to_raw_dates(events_raw, opts) # print ',\n'.join([repr(x) for x in dates_raw]) #pylint: enable=C0301 if opts['--debug']: print('opts:\n', '\n'.join(['%s: %r'%(k, opts[k]) for k in opts if '--' in k])) debug_print_raw(dates_raw) # dates_raw is a list of dicts. The dates_raw dicts need a few more fields... expand_multi_nights(dates_raw) # add more date dicts to fill in between night and leaving add_day_of_week(dates_raw) # add 'night_abrev' field to the date dicts dates_raw = fix_spelling(dates_raw) # catch data entry errors put_members_in_rooms(dates_raw) # to each date, add entries for each room if opts['--shift']: dt_today = datetime.datetime.now() + datetime.timedelta(days=int(opts['--shift'])) print('Shifted to ', ('%s'%dt_today)[:16]) # dates_raw[] is now a list of {'night':'2016-12-15', 'member':'Peter', # 'where':'master', 'master':'Peter', 'in-law':'', 'middle':'', ...} # always flag any members I failed to assign to a room show_missing_rooms(select_dates(dates_raw, opts, None, 0), opts) if opts['--whosup']: print("Here's who I've heard from:") dates_coming_up = select_dates(dates_raw, opts, -2, 7) whos_up_dict = get_whos_up(dates_coming_up) if whos_up_dict: show_whos_up(whos_up_dict) else: print(' no one!\n') if opts['--raw']: show_raw(dates_raw) # always show the guest fee accounts # give members 2 days before mentioning guest fees dates_guests = [add_guest_fee(event, opts) for event in select_dates(dates_raw, opts, None, -2)] # dates_guests[] includes a 'guest_fee' key (+ paid, - owed) deadbeats, sponsors = get_deadbeat_sponsors(dates_guests) print('\nMembers who owe guest fees:') show_guest_fees(deadbeats) print('\nMembers who have paid their guest fees: (Yay!)') show_guest_fees(sponsors) dates_past = select_dates(dates_raw, opts, None, 0) if opts['--nights']: show_nights(dates_past, opts) if opts['--counts']: member_counts = count_members_in_rooms(dates_past, opts) # member_counts{} = {'Bob':{'in-law':1, 'master':0, 'middle':0, # 'bunk':1, 'loft':0}, 'Mark:{'master':1,...},...} show_room_counts(member_counts) if opts['--peak']: nights_extra = NIGHTS_PEAK[opts['--year']][2:] # ignore Fri, Sat entries print('\nPeak nights starting %s, excluding Fri & Sat nights:'%opts['--year'], end='') str_peak = ', '.join(['%s%s'%('' if i%8 != 0 else '\n ', x) for i, x in enumerate(nights_extra)]) print(str_peak) if __name__ == '__main__': OPTS = docopt.docopt(USE_STR, version='0.9.0') main(OPTS)
the-stack_0_4285
# coding: utf8 """ Implementation of finite DPP MCMC samplers: - `add_exchange_delete_sampler` - `add_delete_sampler` - `basis_exchange_sampler` - `zonotope_sampler` .. seealso: `Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/finite_dpps/mcmc_sampling.html>`_ """ import time import numpy as np import scipy.linalg as la # For zonotope sampler from cvxopt import matrix, spmatrix, solvers solvers.options['show_progress'] = False solvers.options['glpk'] = {'msg_lev': 'GLP_MSG_OFF'} from dppy.utils import det_ST, check_random_state ############################################ # Approximate samplers for projection DPPs # ############################################ def dpp_sampler_mcmc(kernel, mode='AED', **params): """ Interface function with initializations and samplers for MCMC schemes. .. seealso:: - :ref:`finite_dpps_mcmc_sampling_add_exchange_delete` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AD_and_E_sampler <initialize_AD_and_E_sampler>` """ rng = check_random_state(params.get('random_state', None)) s_init = params.get('s_init', None) nb_iter = params.get('nb_iter', 10) T_max = params.get('T_max', None) size = params.get('size', None) # = Tr(K) for projection correlation K if mode == 'AED': # Add-Exchange-Delete S'=S+t, S-t+u, S-t if s_init is None: s_init = initialize_AED_sampler(kernel, random_state=rng) sampl = add_exchange_delete_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) elif mode == 'AD': # Add-Delete S'=S+t, S-t if s_init is None: s_init = initialize_AD_and_E_sampler(kernel, random_state=rng) sampl = add_delete_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) elif mode == 'E': # Exchange S'=S-t+u if s_init is None: s_init = initialize_AD_and_E_sampler(kernel, size, random_state=rng) sampl = basis_exchange_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) return sampl def initialize_AED_sampler(kernel, random_state=None): """ .. seealso:: - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` """ rng = check_random_state(random_state) N = kernel.shape[0] ground_set = np.arange(N) S0, det_S0 = [], 0.0 nb_iter = 100 tol = 1e-9 for _ in range(nb_iter): if det_S0 > tol: break else: T = rng.choice(2 * N, size=N, replace=False) S0 = np.intersect1d(T, ground_set, assume_unique=True) det_S0 = det_ST(kernel, S0) else: raise ValueError('Initialization problem, you may be using a size `k` > rank of the kernel') return S0.tolist() def initialize_AD_and_E_sampler(kernel, size=None, random_state=None): """ .. seealso:: - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` """ rng = check_random_state(random_state) N = kernel.shape[0] S0, det_S0 = [], 0.0 it_max = 100 tol = 1e-9 for _ in range(it_max): if det_S0 > tol: break else: S0 = rng.choice(N, size=size if size else rng.randint(1, N + 1), replace=False) det_S0 = det_ST(kernel, S0) else: raise ValueError('Initialization problem, you may be using a size `k` > rank of the kernel') return S0.tolist() def add_exchange_delete_sampler(kernel, s_init=None, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for generic DPPs, it is a mix of add/delete and basis exchange MCMC samplers. :param kernel: Kernel martrix :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: list of `nb_iter` approximate sample of DPP(kernel) :rtype: array_like .. seealso:: Algorithm 3 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] ground_set = np.arange(N) S0, det_S0 = s_init, det_ST(kernel, s_init) size_S0 = len(S0) # Size of the current sample chain = [S0] # Initialize the collection (list) of sample # Evaluate running time... t_start = time.time() if T_max else 0 for _ in range(1, nb_iter): S1 = S0.copy() # S1 = S0 # Pick one element s in S_0 by index uniformly at random s_ind = rng.choice(size_S0 if size_S0 else N) # , size=1)[0] # Unif t in [N]-S0 t = rng.choice(np.delete(ground_set, S0)) U = rng.rand() ratio = size_S0 / N # Proportion of items in current sample # Add: S1 = S0 + t if U < 0.5 * (1 - ratio)**2: S1.append(t) # S1 = S0 + t # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0 * (size_S0 + 1) / (N - size_S0): S0, det_S0 = S1, det_S1 chain.append(S1) size_S0 += 1 else: chain.append(S0) # Exchange: S1 = S0 - s + t elif (0.5 * (1 - ratio)**2 <= U) & (U < 0.5 * (1 - ratio)): del S1[s_ind] # S1 = S0 - s S1.append(t) # S1 = S1 + t = S0 - s + t # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < (det_S1 / det_S0): S0, det_S0 = S1, det_S1 chain.append(S1) # size_S0 stays the same else: chain.append(S0) # Delete: S1 = S0 - s elif (0.5 * (1 - ratio) <= U) & (U < 0.5 * (ratio**2 + (1 - ratio))): del S1[s_ind] # S0 - s # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0 * size_S0 / (N - (size_S0 - 1)): S0, det_S0 = S1, det_S1 chain.append(S1) size_S0 -= 1 else: chain.append(S0) else: chain.append(S0) if T_max: if time.time() - t_start < T_max: break return chain def add_delete_sampler(kernel, s_init, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for generic DPP(kernel), it performs local moves by removing/adding one element at a time. :param kernel: Kernel martrix :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). Default is None. :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: list of `nb_iter` approximate sample of DPP(kernel) :rtype: array_like .. seealso:: Algorithm 1 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] # Number of elements # Initialization S0, det_S0 = s_init, det_ST(kernel, s_init) chain = [S0] # Initialize the collection (list) of sample # Evaluate running time... t_start = time.time() if T_max else 0 for _ in range(1, nb_iter): # With proba 1/2 try to add/delete an element if rng.rand() < 0.5: # Perform the potential add/delete move S1 = S0 +/- s S1 = S0.copy() # S1 = S0 s = rng.choice(N) # Uniform item in [N] if s in S1: S1.remove(s) # S1 = S0 - s else: S1.append(s) # S1 = SO + s # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0: S0, det_S0 = S1, det_S1 chain.append(S1) else: chain.append(S0) else: chain.append(S0) if T_max: if time.time() - t_start < T_max: break return chain def basis_exchange_sampler(kernel, s_init, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for projection DPPs, based on the basis exchange property. :param kernel: Feature vector matrix, feature vectors are stacked columnwise. It is assumed to be full row rank. :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). Default is None. :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: MCMC chain of approximate sample (stacked row_wise i.e. nb_iter rows). :rtype: array_like .. seealso:: Algorithm 2 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] # Number of elements ground_set = np.arange(N) # Ground set size = len(s_init) # Size of the sample (cardinality is fixed) # Initialization S0, det_S0 = s_init, det_ST(kernel, s_init) chain = np.zeros((nb_iter, size), dtype=int) chain[0] = S0 # Evaluate running time... t_start = time.time() if T_max else 0 for it in range(1, nb_iter): # With proba 1/2 try to swap 2 elements if rng.rand() < 0.5: # Perform the potential exchange move S1 = S0 - s + t S1 = S0.copy() # S1 = S0 # Pick one element s in S0 by index uniformly at random s_ind = rng.choice(size) # Pick one element t in [N]\S0 uniformly at random t = rng.choice(np.delete(ground_set, S0)) S1[s_ind] = t # S_1 = S0 - S0[s_ind] + t det_S1 = det_ST(kernel, S1) # det K_S1 # Accept_reject the move w. proba if rng.rand() < det_S1 / det_S0: S0, det_S0 = S1, det_S1 chain[it] = S1 else: # if reject, stay in the same state chain[it] = S0 else: chain[it] = S0 if T_max: if time.time() - t_start < T_max: break return chain.tolist() ############ # ZONOTOPE # ############ def extract_basis(y_sol, eps=1e-5): """ Subroutine of zono_sampling to extract the tile of the zonotope in which a point lies. It extracts the indices of entries of the solution of LP :eq:`eq:Px` that are in (0,1). :param y_sol: Optimal solution of LP :eq:`eq:Px` :type y_sol: list :param eps: Tolerance :math:`y_i^* \\in (\\epsilon, 1-\\epsilon), \\quad \\epsilon \\geq 0` :eps type: float :return: Indices of the feature vectors spanning the tile in which the point is lies. :math:`B_{x} = \\left\\{ i \\, ; \\, y_i^* \\in (0,1) \\right\\}` :rtype: list .. seealso:: Algorithm 3 in :cite:`GaBaVa17` - :func:`zono_sampling <zono_sampling>` """ basis = np.where((eps < y_sol) & (y_sol < 1 - eps))[0] return basis def zonotope_sampler(A_zono, **params): """ MCMC based sampler for projection DPPs. The similarity matrix is the orthogonal projection matrix onto the row span of the feature vector matrix. Samples are of size equal to the ransampl_size of the projection matrix also equal to the rank of the feature matrix (assumed to be full row rank). :param A_zono: Feature vector matrix, feature vectors are stacked columnwise. It is assumed to be full row rank. :type A_zono: array_like :param params: Dictionary containing the parameters - ``'lin_obj'`` (list): Linear objective (:math:`c`) of the linear program used to identify the tile in which a point lies. Default is a random Gaussian vector. - ``'x_0'` (list): Initial point. - ``'nb_iter'`` (int): Number of iterations of the MCMC chain. Default is 10. - ``'T_max'`` (float): Maximum running time of the algorithm (in seconds). Default is None. - ``'random_state`` (default None) :type params: dict :return: MCMC chain of approximate samples (stacked row_wise i.e. nb_iter rows). :rtype: array_like .. seealso:: Algorithm 5 in :cite:`GaBaVa17` - :func:`extract_basis <extract_basis>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` """ rng = check_random_state(params.get('random_state', None)) r, N = A_zono.shape # Sizes of r=samples=rank(A_zono), N=ground set # Linear objective c = matrix(params.get('lin_obj', rng.randn(N))) # Initial point x0 = A*u, u~U[0,1]^n x0 = matrix(params.get('x_0', A_zono.dot(rng.rand(N)))) nb_iter = params.get('nb_iter', 10) T_max = params.get('T_max', None) ################### # Linear problems # ################### # Canonical form # min c.T*x min c.T*x # s.t. G*x <= h <=> s.t. G*x + s = h # A*x = b A*x = b # s >= 0 # CVXOPT # =====> solvers.lp(c, G, h, A, b, solver='glpk') ################################################# # To access the tile Z(B_x) # Solve P_x(A,c) ###################################################### # y^* = # argmin c.T*y argmin c.T*y # s.t. A*y = x <=> s.t. A *y = x # 0 <= y <= 1 [ I_n] *y <= [1^n] # [-I_n] [0^n] ###################################################### # Then B_x = \{ i ; y_i^* \in ]0,1[ \} A = spmatrix(0.0, [], [], (r, N)) A[:, :] = A_zono G = spmatrix(0.0, [], [], (2 * N, N)) G[:N, :] = spmatrix(1.0, range(N), range(N)) G[N:, :] = spmatrix(-1.0, range(N), range(N)) # Endpoints of segment # D_x \cap Z(A) = [x+alpha_m*d, x-alpha_M*d] ########################################################################### # alpha_m/_M = argmin +/-alpha argmin [+/-1 0^N].T * [alpha,lambda] # s.t. x + alpha d = A lambda <=> s.t. [-d A] *[alpha, lambda] = x # 0 <= lambda <= 1 [0^N I_N] *[alpha, lambda] <= [1^N] # [0^N -I_N] [0^N] ########################################################################## c_mM = matrix(0.0, (N + 1, 1)) c_mM[0] = 1.0 A_mM = spmatrix(0.0, [], [], (r, N + 1)) A_mM[:, 1:] = A G_mM = spmatrix(0.0, [], [], (2 * N, N + 1)) G_mM[:, 1:] = G # Common h to both kind of LP # cf. 0 <= y <= 1 and 0 <= lambda <= 1 h = matrix(0.0, (2 * N, 1)) h[:N, :] = 1.0 ################## # Initialization # ################## B_x0 = [] while len(B_x0) != r: # Initial tile B_x0 # Solve P_x0(A,c) y_star = solvers.lp(c, G, h, A, x0, solver='glpk')['x'] # Get the tile B_x0 = extract_basis(np.asarray(y_star)) # Initialize sequence of sample chain = np.zeros((nb_iter, r), dtype=int) chain[0] = B_x0 # Compute the det of the tile (Vol(B)=abs(det(B))) det_B_x0 = la.det(A_zono[:, B_x0]) t_start = time.time() if T_max else 0 for it in range(1, nb_iter): # Take uniform direction d defining D_x0 d = matrix(rng.randn(r, 1)) # Define D_x0 \cap Z(A) = [x0 + alpha_m*d, x0 - alpha_M*d] # Update the constraint [-d A] * [alpha,lambda] = x A_mM[:, 0] = -d # Find alpha_m/M alpha_m = solvers.lp(c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] alpha_M = solvers.lp(-c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] # Propose x1 ~ U_{[x0+alpha_m*d, x0-alpha_M*d]} x1 = x0 + (alpha_m + (alpha_M - alpha_m) * rng.rand()) * d # Proposed tile B_x1 # Solve P_x1(A,c) y_star = solvers.lp(c, G, h, A, x1, solver='glpk')['x'] # Get the tile B_x1 = extract_basis(np.asarray(y_star)) # Accept/Reject the move with proba Vol(B1)/Vol(B0) if len(B_x1) != r: # if extract_basis returned smtg ill conditioned chain[it] = B_x0 else: det_B_x1 = la.det(A_zono[:, B_x1]) if rng.rand() < abs(det_B_x1 / det_B_x0): x0, B_x0, det_B_x0 = x1, B_x1, det_B_x1 chain[it] = B_x1 else: chain[it] = B_x0 if T_max: if time.time() - t_start < T_max: break return chain
the-stack_0_4286
def checkPangram(s): List = [] # create list of 26 charecters and set false each entry for i in range(26): List.append(False) # converting the sentence to lowercase and iterating # over the sentence for c in s.lower(): if not c == " ": # make the corresponding entry True List[ord(c) -ord('a')]= True # check if any charecter is missing then return False for ch in List: if ch == False: return False return True # Driver Program to test above functions sentence = input() if (checkPangram(sentence)): print("Yes") else: print("No")
the-stack_0_4289
import logging import json import os import shutil import subprocess from .base import BaseExporter logger = logging.getLogger(__name__) __all__ = ["JSONExporter"] class JSONExporter(BaseExporter): short_name = "json_file" TESTS_DIR_NAME = "tests" SOLUTION_DIR_NAME = "solutions" VALIDATOR_DIR_NAME = "validators" SUBTASKS_DIR_NAME = "subtasks" CHECKER_DIR_NAME = "checker" GRADER_DIR_NAME = "graders" OTHER_FILES_DIR_NAME = "others" def __init__(self, revision): super().__init__(revision) def _do_export(self): def export_resources_to_path(prefix): for resource in self.revision.resource_set.all(): self.extract_from_storage_to_path( resource.file, os.path.join( prefix, resource.name ) ) def generate_clean_name(name): return name.replace(' ', '_').lower() # Exporting problem global data problem_data = self.revision.problem_data problem_data_dict = { "code": problem_data.code_name, "name": problem_data.title, "time_limit": problem_data.time_limit, "memory_limit": problem_data.memory_limit, "score_precision": problem_data.score_precision, } if problem_data.task_type: problem_data_dict.update({ "task_type": problem_data.task_type, "task_type_params": problem_data.task_type_parameters, }) self.write_to_file( "problem.json".format(problem_code=problem_data.code_name), json.dumps(problem_data_dict) ) self.write_to_file( "statement.md", self.revision.statement_set.get().content ) # Exporting problem files self.create_directory(self.OTHER_FILES_DIR_NAME) for file in self.revision.problem.files.all(): self.extract_from_storage_to_path( file, os.path.join( self.OTHER_FILES_DIR_NAME, file.name ) ) # Exporting testcases self.create_directory(self.TESTS_DIR_NAME) ignored_testcases = [] for testcase in self.revision.testcase_set.all(): if not testcase.input_file_generated() or not testcase.output_file_generated(): ignored_testcases.append(testcase) logger.warning("Testcase {} couldn't be generated. Skipping".format(testcase.name)) continue self.extract_from_storage_to_path( testcase.input_file, os.path.join( self.TESTS_DIR_NAME, "{testcase_name}.in".format(testcase_name=generate_clean_name(testcase.name)) ), ) self.extract_from_storage_to_path( testcase.output_file, os.path.join( "tests", "{testcase_name}.out".format(testcase_name=generate_clean_name(testcase.name)) ) ) # Exporting graders self.create_directory(self.GRADER_DIR_NAME) for grader in self.revision.grader_set.all(): self.extract_from_storage_to_path( grader.code, os.path.join( self.GRADER_DIR_NAME, grader.name, ) ) # Exporting subtasks self.create_directory(self.SUBTASKS_DIR_NAME) for subtask in self.revision.subtasks.all(): self.write_to_file( os.path.join( self.SUBTASKS_DIR_NAME, "{subtask_index:02}-{subtask_name}.json".format( subtask_index=subtask.index, subtask_name=subtask.name, )), json.dumps( { "score": subtask.score, "testcases": [ generate_clean_name(t.name) for t in subtask.testcases.all() ] } ) ) # Exporting solutions self.create_directory(self.SOLUTION_DIR_NAME) for solution in self.revision.solution_set.all(): if solution.verdict: solution_dir = os.path.join(self.SOLUTION_DIR_NAME, generate_clean_name(solution.verdict.name)) else: solution_dir = os.path.join(self.SOLUTION_DIR_NAME, "unknown_verdict") self.create_directory(solution_dir) self.extract_from_storage_to_path(solution.code, os.path.join(solution_dir, solution.name)) # We don't export generators. Tests are already generated so there is no use for them # Exporting checker( We only extract main checker) self.create_directory(self.CHECKER_DIR_NAME) for resource in self.revision.checker_set.all(): self.extract_from_storage_to_path( resource.file, os.path.join(self.CHECKER_DIR_NAME, resource.name) ) checker = problem_data.checker if checker is not None: self.extract_from_storage_to_path( checker.file, os.path.join(self.CHECKER_DIR_NAME, "checker{ext}".format( ext=os.path.splitext(checker.name)[1] )) ) export_resources_to_path("checker") # Exporting validators self.create_directory(self.VALIDATOR_DIR_NAME) for validator in self.revision.validator_set.all(): dirs = [] for subtask in validator.subtasks: dirs.append(subtask.name) for dir in dirs: full_dir = os.path.join(self.VALIDATOR_DIR_NAME, dir) self.create_directory(full_dir) self.extract_from_storage_to_path( validator.file, os.path.join( full_dir, validator.name ) ) export_resources_to_path("validators") # Exporting public self.create_directory("repo") os.system('git --git-dir="{repo_dir}" worktree add {work_dir} {commit_id}'.format( repo_dir=self.revision.repository_path, work_dir=self.get_absolute_path("repo"), commit_id=self.revision.commit_id )) tests_dir_in_repo = os.path.join('repo', 'tests') self.create_directory(tests_dir_in_repo) for testcase in self.revision.testcase_set.all(): if not testcase.input_file_generated() or not testcase.output_file_generated(): ignored_testcases.append(testcase) logger.warning("Testcase {} couldn't be generated. Skipping".format(testcase.name)) continue self.extract_from_storage_to_path( testcase.input_file, os.path.join( tests_dir_in_repo, "{testcase_name}.in".format(testcase_name=testcase.name) ), ) self.extract_from_storage_to_path( testcase.output_file, os.path.join( tests_dir_in_repo, "{testcase_name}.out".format(testcase_name=testcase.name) ) ) try: print(subprocess.check_output(['tps', 'make-public'], cwd=self.get_absolute_path("repo"), stderr=subprocess.STDOUT)) except subprocess.CalledProcessError as e: print(e.output) raise e self.create_directory("attachments") try: shutil.move(os.path.join(self.get_absolute_path("repo"), "{}.zip".format(problem_data.code_name)), self.get_absolute_path("attachments")) except OSError: try: shutil.move(os.path.join(self.get_absolute_path("repo"), "{}.zip".format(problem_data.code_name)), self.get_absolute_path("attachments")) except OSError as e: logger.error("Public archive not found") raise e shutil.rmtree(self.get_absolute_path("repo"))
the-stack_0_4290
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v7.resources.types import ad_group_criterion_label from google.ads.googleads.v7.services.types import ad_group_criterion_label_service from .base import AdGroupCriterionLabelServiceTransport, DEFAULT_CLIENT_INFO class AdGroupCriterionLabelServiceGrpcTransport(AdGroupCriterionLabelServiceTransport): """gRPC backend transport for AdGroupCriterionLabelService. Service to manage labels on ad group criteria. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__(self, *, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning) host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel(cls, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs ) def close(self): self.grpc_channel.close() @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_ad_group_criterion_label(self) -> Callable[ [ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest], ad_group_criterion_label.AdGroupCriterionLabel]: r"""Return a callable for the get ad group criterion label method over gRPC. Returns the requested ad group criterion label in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetAdGroupCriterionLabelRequest], ~.AdGroupCriterionLabel]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'get_ad_group_criterion_label' not in self._stubs: self._stubs['get_ad_group_criterion_label'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v7.services.AdGroupCriterionLabelService/GetAdGroupCriterionLabel', request_serializer=ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest.serialize, response_deserializer=ad_group_criterion_label.AdGroupCriterionLabel.deserialize, ) return self._stubs['get_ad_group_criterion_label'] @property def mutate_ad_group_criterion_labels(self) -> Callable[ [ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest], ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse]: r"""Return a callable for the mutate ad group criterion labels method over gRPC. Creates and removes ad group criterion labels. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.MutateAdGroupCriterionLabelsRequest], ~.MutateAdGroupCriterionLabelsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'mutate_ad_group_criterion_labels' not in self._stubs: self._stubs['mutate_ad_group_criterion_labels'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v7.services.AdGroupCriterionLabelService/MutateAdGroupCriterionLabels', request_serializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest.serialize, response_deserializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse.deserialize, ) return self._stubs['mutate_ad_group_criterion_labels'] __all__ = ( 'AdGroupCriterionLabelServiceGrpcTransport', )
the-stack_0_4292
# -*- coding: utf-8 -*- import copy import json from freezegun import freeze_time from mantarray_desktop_app import MICRO_TO_BASE_CONVERSION from mantarray_desktop_app import SERIAL_COMM_DEFAULT_DATA_CHANNEL from mantarray_desktop_app import START_MANAGED_ACQUISITION_COMMUNICATION from mantarray_desktop_app import STOP_MANAGED_ACQUISITION_COMMUNICATION import numpy as np from stdlib_utils import drain_queue from stdlib_utils import invoke_process_run_and_check_errors from stdlib_utils import put_object_into_queue_and_raise_error_if_eventually_still_empty from ..fixtures import QUEUE_CHECK_TIMEOUT_SECONDS from ..fixtures_data_analyzer import fixture_four_board_analyzer_process_beta_2_mode from ..fixtures_data_analyzer import set_magnetometer_config from ..fixtures_file_writer import GENERIC_BOARD_MAGNETOMETER_CONFIGURATION from ..helpers import confirm_queue_is_eventually_empty from ..helpers import confirm_queue_is_eventually_of_size from ..parsed_channel_data_packets import SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS __fixtures__ = [ fixture_four_board_analyzer_process_beta_2_mode, ] @freeze_time("2021-06-15 16:39:10.120589") def test_DataAnalyzerProcess__sends_outgoing_data_dict_to_main_as_soon_as_it_retrieves_a_data_packet_from_file_writer__and_sends_data_available_message_to_main( four_board_analyzer_process_beta_2_mode, mocker ): da_process = four_board_analyzer_process_beta_2_mode["da_process"] from_main_queue = four_board_analyzer_process_beta_2_mode["from_main_queue"] to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] outgoing_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][1] # mock so that well metrics don't populate outgoing data queue mocker.patch.object(da_process, "_dump_outgoing_well_metrics", autospec=True) # mock so performance log messages don't populate queue to main mocker.patch.object(da_process, "_handle_performance_logging", autospec=True) da_process.init_streams() # set config arbitrary sampling period test_sampling_period = 1000 set_magnetometer_config( four_board_analyzer_process_beta_2_mode, { "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION, "sampling_period": test_sampling_period, }, ) # start managed_acquisition put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_of_size(to_main_queue, 1) # remove message to main to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_empty(outgoing_data_queue) confirm_queue_is_eventually_empty(to_main_queue) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_of_size(outgoing_data_queue, 1) confirm_queue_is_eventually_of_size(to_main_queue, 1) # test data dump waveform_data_points = dict() for well_idx in range(24): default_channel_data = test_data_packet[well_idx][SERIAL_COMM_DEFAULT_DATA_CHANNEL] pipeline = da_process.get_pipeline_template().create_pipeline() pipeline.load_raw_gmr_data( np.array([test_data_packet["time_indices"], default_channel_data], np.int64), np.zeros((2, len(default_channel_data))), ) compressed_data = pipeline.get_force() waveform_data_points[well_idx] = { "x_data_points": compressed_data[0].tolist(), "y_data_points": (compressed_data[1] * MICRO_TO_BASE_CONVERSION).tolist(), } expected_outgoing_dict = { "waveform_data": {"basic_data": {"waveform_data_points": waveform_data_points}}, "earliest_timepoint": test_data_packet["time_indices"][0].item(), "latest_timepoint": test_data_packet["time_indices"][-1].item(), "num_data_points": len(test_data_packet["time_indices"]), } outgoing_msg = outgoing_data_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) assert outgoing_msg["data_type"] == "waveform_data" assert outgoing_msg["data_json"] == json.dumps(expected_outgoing_dict) # test message sent to main outgoing_msg = to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) expected_msg = { "communication_type": "data_available", "timestamp": "2021-06-15 16:39:10.120589", "num_data_points": len(test_data_packet["time_indices"]), "earliest_timepoint": test_data_packet["time_indices"][0], "latest_timepoint": test_data_packet["time_indices"][-1], } assert outgoing_msg == expected_msg def test_DataAnalyzerProcess__does_not_process_data_packets_after_receiving_stop_managed_acquisition_command_until_receiving_first_packet_of_new_stream( four_board_analyzer_process_beta_2_mode, mocker ): da_process = four_board_analyzer_process_beta_2_mode["da_process"] from_main_queue = four_board_analyzer_process_beta_2_mode["from_main_queue"] to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] # mock so these since not using real data mocked_process_data = mocker.patch.object( da_process, "_process_beta_2_data", autospec=True, return_value={} ) invoke_process_run_and_check_errors(da_process, perform_setup_before_loop=True) # set config arbitrary sampling period test_sampling_period = 10000 set_magnetometer_config( four_board_analyzer_process_beta_2_mode, { "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION, "sampling_period": test_sampling_period, }, ) # start managed_acquisition put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) # send first packet of first stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = True put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 1 # send another packet of first stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # stop managed acquisition and make sure next data packet in the first stream is not processed put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(STOP_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # start managed acquisition again and make sure next data packet in the first stream is not processed put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # send first data packet from second stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = True put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 3 # prevent BrokenPipeErrors drain_queue(to_main_queue) def test_DataAnalyzerProcess__processes_incoming_stim_packet(four_board_analyzer_process_beta_2_mode, mocker): # TODO Tanner (10/20/21): add to this test when ready to add stim handling da_process = four_board_analyzer_process_beta_2_mode["da_process"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] # can probably remove this spy and assertion once actual handling is implemented spied_process_stim_packet = mocker.spy(da_process, "_process_stim_packet") test_stim_packet = {"data_type": "stimulation"} put_object_into_queue_and_raise_error_if_eventually_still_empty(test_stim_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) spied_process_stim_packet.assert_called_once_with(test_stim_packet)
the-stack_0_4293
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="findmylibs", version="0.0.1", author="The Nomadic Coder", author_email="[email protected]", description="A package to probe installed libraries", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/atemysemicolon/findMyLibs", install_requires=['cmake'], packages=["findmylibs"], entry_points={ 'console_scripts': [ 'findmylibs = findmylibs.__main__:main', ]}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", ], )
the-stack_0_4295
import numpy as np trials=10_00_000 dice=int(input("Enter the no of dices :")) for i in np.arange(1*dice,dice*6 + 1): found=0 for _ in np.arange(trials): total=0 for _ in np.arange(dice): total+=np.random.randint(1,7) if(total==i): found+=1 print("Sum Value :",i,"probability",np.round((found/trials)*100,4),"%")
the-stack_0_4296
#!/usr/bin/env python3 ############################################################################################ # # # Program purpose: Find all the common characters in lexicographical order from # # two given lower case strings. If there are no common letters # # print “No common characters". # # Program Author : Happi Yvan <[email protected]> # # Creation Date : October 30, 2019 # # # ############################################################################################ from collections import Counter def find_common_chars(str1: str, str2: str) -> dict: data = {'found': False, 'info': ''} d1 = Counter(str1) d2 = Counter(str2) common_dict = d1 & d2 if len(common_dict) == 0: data['info'] = 'No common characters' return data data['found'] = True # list of common elements common_chars = list(common_dict.elements()) common_chars = sorted(common_chars) data['data'] = ''.join(common_chars) return data if __name__ == "__main__": str1 = 'Python' str2 = 'PHP' data_info = find_common_chars(str1=str1, str2=str2) if data_info['found']: print(f"Two strings: '{str1}' and '{str2}': {data_info['data']}") else: print(f"Two strings: '{str1}' and '{str2}': {data_info['info']}") str1 = 'Java' str2 = 'PHP' data_info = find_common_chars(str1=str1, str2=str2) if data_info['found']: print(f"Two strings: '{str1}' and '{str2}': {data_info['data']}") else: print(f"Two strings: '{str1}' and '{str2}': {data_info['info']}")
the-stack_0_4297
""" Copyright 2021 Inmanta Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contact: [email protected] """ from setuptools import setup, find_packages from os import path requires = [ 'inmanta-core', 'intervaltree' ] # read the contents of your README file this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name="inmantals", package_dir={"": "src"}, packages=find_packages("src"), install_requires=requires, version="1.2.0", description="Inmanta Language Server", long_description=long_description, long_description_content_type='text/markdown', author="Inmanta", author_email="[email protected]", license="Apache Software License", url="https://github.com/inmanta/vscode-inmanta", keywords=["ide", "language-server", "vscode", "inmanta"], classifiers=["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Telecommunications Industry", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: System :: Systems Administration", "Topic :: Utilities"], entry_points={ 'console_scripts': [ 'inmanta-language-server-tcp = inmantals.tcpserver:main', ], }, )
the-stack_0_4299
#!/usr/bin/python3 # -*- coding: utf-8 -*- import logging import sys import time import _ssl from sleekxmpp import ClientXMPP import config import events from common import VERSION class IdleBot(ClientXMPP): def __init__(self, jid, password, rooms, nick): ClientXMPP.__init__(self, jid, password) self.ssl_version = _ssl.PROTOCOL_TLSv1_2 self.rooms = rooms self.nick = nick self.add_event_handler('session_start', self.session_start) self.add_event_handler('groupchat_message', self.muc_message) self.add_event_handler('disconnected', self.disconnected) self.add_event_handler('presence_error', self.disconnected) self.add_event_handler('session_end', self.disconnected) self.priority = 0 self.status = None self.show = None self.logger = logging.getLogger(__name__) for room in self.rooms: self.add_event_handler('muc::%s::got_offline' % room, self.muc_offline) def talked_to_me(self, text): return text[:len(self.nick)].lower() == self.nick.lower() def disconnected(self, _): self.logger.warn("Disconnected! dbg: {}".format(str(_))) self.disconnect(wait=True) def session_start(self, _): self.get_roster() self.send_presence(ppriority=self.priority, pstatus=self.status, pshow=self.show) for room in self.rooms: self.logger.info('%s: joining' % room) ret = self.plugin['xep_0045'].joinMUC( room, self.nick, wait=True ) self.logger.info('%s: joined with code %s' % (room, ret)) def muc_message(self, msg_obj): """ Handle muc messages, return if irrelevant content or die by hangup. :param msg_obj: :return: """ # don't talk to yourself if msg_obj['mucnick'] == self.nick or 'groupchat' != msg_obj['type']: return False elif self.talked_to_me(msg_obj['body']) and 'hangup' in msg_obj['body']: self.logger.warn("got 'hangup' from '%s': '%s'" % ( msg_obj['mucnick'], msg_obj['body'] )) self.hangup() return False # elif msg_obj['mucnick'] in config.runtimeconf_get("other_bots", ()): # self.logger.debug("not talking to the other bot named {}".format( msg_obj['mucnick'])) # return False else: return True def muc_offline(self, msg_obj): if 'muc' in msg_obj.values: room = msg_obj.values['muc']['room'] user = msg_obj.values['muc']['nick'] if user == config.conf_get('bot_nickname'): self.logger.warn("Left my room, rejoin") self.plugin['xep_0045'].joinMUC( room, self.nick, wait=True ) def hangup(self): """ disconnect and exit """ self.disconnect(wait=True) def start(botclass, active=False): logging.basicConfig( level=config.conf_get('loglevel'), format=sys.argv[0] + ' %(asctime)s %(levelname).1s %(funcName)-15s %(message)s' ) logger = logging.getLogger(__name__) logger.info(VERSION) jid = config.conf_get('jid') if '/' not in jid: jid = '%s/%s' % (jid, botclass.__name__) bot = botclass( jid=jid, password=config.conf_get('password'), rooms=config.conf_get('rooms'), nick=config.conf_get('bot_nickname') ) bot.connect() bot.register_plugin('xep_0045') bot.register_plugin('xep_0199', {'keepalive': True}) bot.register_plugin('xep_0308') bot.process() config.runtimeconf_set('start_time', -time.time()) if active: pass events.event_loop.start() if '__main__' == __name__: start(IdleBot)
the-stack_0_4300
#!/usr/bin/python3 -i # # Copyright (c) 2015-2021 The Khronos Group Inc. # Copyright (c) 2015-2021 Valve Corporation # Copyright (c) 2015-2021 LunarG, Inc. # Copyright (c) 2015-2021 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Dustin Graves <[email protected]> # Author: Mark Lobodzinski <[email protected]> # Author: Dave Houlton <[email protected]> import os,re,sys,string,json import xml.etree.ElementTree as etree from generator import * from collections import namedtuple from common_codegen import * # This is a workaround to use a Python 2.7 and 3.x compatible syntax. from io import open # ParameterValidationGeneratorOptions - subclass of GeneratorOptions. # # Adds options used by ParameterValidationOutputGenerator object during Parameter validation layer generation. # # Additional members # prefixText - list of strings to prefix generated header with # (usually a copyright statement + calling convention macros). # protectFile - True if multiple inclusion protection should be # generated (based on the filename) around the entire header. # protectFeature - True if #ifndef..#endif protection should be # generated around a feature interface in the header file. # genFuncPointers - True if function pointer typedefs should be # generated # protectProto - If conditional protection should be generated # around prototype declarations, set to either '#ifdef' # to require opt-in (#ifdef protectProtoStr) or '#ifndef' # to require opt-out (#ifndef protectProtoStr). Otherwise # set to None. # protectProtoStr - #ifdef/#ifndef symbol to use around prototype # declarations, if protectProto is set # apicall - string to use for the function declaration prefix, # such as APICALL on Windows. # apientry - string to use for the calling convention macro, # in typedefs, such as APIENTRY. # apientryp - string to use for the calling convention macro # in function pointer typedefs, such as APIENTRYP. # indentFuncProto - True if prototype declarations should put each # parameter on a separate line # indentFuncPointer - True if typedefed function pointers should put each # parameter on a separate line # alignFuncParam - if nonzero and parameters are being put on a # separate line, align parameter names at the specified column class ParameterValidationGeneratorOptions(GeneratorOptions): def __init__(self, conventions = None, filename = None, directory = '.', genpath = None, apiname = None, profile = None, versions = '.*', emitversions = '.*', defaultExtensions = None, addExtensions = None, removeExtensions = None, emitExtensions = None, emitSpirv = None, sortProcedure = regSortFeatures, prefixText = "", apicall = '', apientry = '', apientryp = '', indentFuncProto = True, indentFuncPointer = False, alignFuncParam = 0, expandEnumerants = True, valid_usage_path = ''): GeneratorOptions.__init__(self, conventions = conventions, filename = filename, directory = directory, genpath = genpath, apiname = apiname, profile = profile, versions = versions, emitversions = emitversions, defaultExtensions = defaultExtensions, addExtensions = addExtensions, removeExtensions = removeExtensions, emitExtensions = emitExtensions, emitSpirv = emitSpirv, sortProcedure = sortProcedure) self.prefixText = prefixText self.apicall = apicall self.apientry = apientry self.apientryp = apientryp self.indentFuncProto = indentFuncProto self.indentFuncPointer = indentFuncPointer self.alignFuncParam = alignFuncParam self.expandEnumerants = expandEnumerants self.valid_usage_path = valid_usage_path # ParameterValidationOutputGenerator - subclass of OutputGenerator. # Generates param checker layer code. # # ---- methods ---- # ParamCheckerOutputGenerator(errFile, warnFile, diagFile) - args as for # OutputGenerator. Defines additional internal state. # ---- methods overriding base class ---- # beginFile(genOpts) # endFile() # beginFeature(interface, emit) # endFeature() # genType(typeinfo,name) # genStruct(typeinfo,name) # genGroup(groupinfo,name) # genEnum(enuminfo, name) # genCmd(cmdinfo) class ParameterValidationOutputGenerator(OutputGenerator): """Generate Parameter Validation code based on XML element attributes""" # This is an ordered list of sections in the header file. ALL_SECTIONS = ['command'] def __init__(self, errFile = sys.stderr, warnFile = sys.stderr, diagFile = sys.stdout): OutputGenerator.__init__(self, errFile, warnFile, diagFile) self.INDENT_SPACES = 4 self.declarations = [] inline_custom_source_preamble = """ """ # These functions have additional, custom-written checks in the utils cpp file. CodeGen will automatically add a call # to those functions of the form 'bool manual_PreCallValidateAPIName', where the 'vk' is dropped. # see 'manual_PreCallValidateCreateGraphicsPipelines' as an example. self.functions_with_manual_checks = [ 'vkCreateInstance', 'vkCreateDevice', 'vkCreateQueryPool', 'vkCreateRenderPass', 'vkCreateRenderPass2', 'vkCreateRenderPass2KHR', 'vkCreateBuffer', 'vkCreateImage', 'vkCreatePipelineLayout', 'vkCreateGraphicsPipelines', 'vkCreateComputePipelines', 'vkCreateRayTracingPipelinesNV', 'vkCreateRayTracingPipelinesKHR', 'vkCreateSampler', 'vkCreateDescriptorSetLayout', 'vkFreeDescriptorSets', 'vkUpdateDescriptorSets', 'vkBeginCommandBuffer', 'vkCmdSetViewport', 'vkCmdSetScissor', 'vkCmdSetLineWidth', 'vkCmdDrawIndirect', 'vkCmdDrawIndexedIndirect', 'vkCmdDrawMultiEXT', 'vkCmdDrawMultiIndexedEXT', 'vkCmdClearAttachments', 'vkCmdBindIndexBuffer', 'vkCmdCopyBuffer', 'vkCmdUpdateBuffer', 'vkCmdFillBuffer', 'vkCreateSwapchainKHR', 'vkCreateSharedSwapchainsKHR', 'vkQueuePresentKHR', 'vkCreateDescriptorPool', 'vkCmdDispatch', 'vkCmdDispatchIndirect', 'vkCmdDispatchBaseKHR', 'vkCmdPushDescriptorSetKHR', 'vkCmdSetExclusiveScissorNV', 'vkCmdSetViewportShadingRatePaletteNV', 'vkCmdSetCoarseSampleOrderNV', 'vkCmdDrawMeshTasksNV', 'vkCmdDrawMeshTasksIndirectNV', 'vkCmdDrawMeshTasksIndirectCountNV', 'vkAllocateMemory', 'vkCreateAccelerationStructureNV', 'vkCreateAccelerationStructureKHR', 'vkGetAccelerationStructureHandleNV', 'vkGetPhysicalDeviceImageFormatProperties', 'vkGetPhysicalDeviceImageFormatProperties2', 'vkGetPhysicalDeviceImageFormatProperties2KHR', 'vkCmdBuildAccelerationStructureNV', 'vkCreateFramebuffer', 'vkCmdSetLineStippleEXT', 'vkSetDebugUtilsObjectNameEXT', 'vkSetDebugUtilsObjectTagEXT', 'vkCmdSetViewportWScalingNV', 'vkAcquireNextImageKHR', 'vkAcquireNextImage2KHR', 'vkCmdBindTransformFeedbackBuffersEXT', 'vkCmdBeginTransformFeedbackEXT', 'vkCmdEndTransformFeedbackEXT', 'vkCmdDrawIndirectByteCountEXT', 'vkCreateSamplerYcbcrConversion', 'vkCreateSamplerYcbcrConversionKHR', 'vkImportSemaphoreFdKHR', 'vkCmdBindVertexBuffers', 'vkCreateImageView', 'vkCopyAccelerationStructureToMemoryKHR', 'vkCmdCopyAccelerationStructureToMemoryKHR', 'vkCopyAccelerationStructureKHR', 'vkCmdCopyAccelerationStructureKHR', 'vkCopyMemoryToAccelerationStructureKHR', 'vkCmdCopyMemoryToAccelerationStructureKHR', 'vkCmdDrawIndirectCount', 'vkCmdDrawIndirectCountKHR', 'vkCmdDrawIndexedIndirectCount', 'vkCmdDrawIndexedIndirectCountKHR', 'vkCmdWriteAccelerationStructuresPropertiesKHR', 'vkWriteAccelerationStructuresPropertiesKHR', 'vkGetRayTracingCaptureReplayShaderGroupHandlesKHR', 'vkCmdTraceRaysKHR', 'vkCmdTraceRaysNV', 'vkCmdTraceRaysIndirectKHR', 'vkCmdBuildAccelerationStructureIndirectKHR', 'vkGetDeviceAccelerationStructureCompatibilityKHR', 'vkCmdSetViewportWithCountEXT', 'vkCmdSetScissorWithCountEXT', 'vkCmdBindVertexBuffers2EXT', 'vkCmdCopyBuffer2KHR', 'vkCmdBuildAccelerationStructuresKHR', 'vkCmdBuildAccelerationStructuresIndirectKHR', 'vkBuildAccelerationStructuresKHR', 'vkGetAccelerationStructureBuildSizesKHR', 'vkCmdWriteAccelerationStructuresPropertiesNV', 'vkCreateDisplayModeKHR', 'vkCreatePrivateDataSlotEXT', 'vkCmdSetVertexInputEXT', 'vkCmdPushConstants', 'vkMergePipelineCaches' ] # Commands to ignore self.blacklist = [ 'vkGetInstanceProcAddr', 'vkGetDeviceProcAddr', 'vkEnumerateInstanceVersion', 'vkEnumerateInstanceLayerProperties', 'vkEnumerateInstanceExtensionProperties', 'vkEnumerateDeviceLayerProperties', 'vkEnumerateDeviceExtensionProperties', 'vkGetDeviceGroupSurfacePresentModes2EXT' ] # Structure fields to ignore self.structMemberBlacklist = { 'VkWriteDescriptorSet' : ['dstSet'], 'VkAccelerationStructureGeometryKHR' :['geometry'] } # Validation conditions for some special case struct members that are conditionally validated self.structMemberValidationConditions = { 'VkPipelineColorBlendStateCreateInfo' : { 'logicOp' : '{}logicOpEnable == VK_TRUE' } } # Header version self.headerVersion = None # Internal state - accumulators for different inner block text self.validation = [] # Text comprising the main per-api parameter validation routines self.stypes = [] # Values from the VkStructureType enumeration self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType self.handleTypes = set() # Set of handle type names self.commands = [] # List of CommandData records for all Vulkan commands self.structMembers = [] # List of StructMemberData records for all Vulkan structs self.validatedStructs = dict() # Map of structs type names to generated validation code for that struct type self.enumRanges = set() # Set of enum names self.enum_values_definitions = dict() # [enum, string] containing enumerated type map definitions self.flag_values_definitions = dict() # [flag, string] containing flag type map definitions self.stype_version_dict = dict() # String containing structtype to version map data self.flags = set() # Map of flags typenames self.flagBits = dict() # Map of flag bits typename to list of values self.newFlags = set() # Map of flags typenames /defined in the current feature/ self.required_extensions = dict() # Dictionary of required extensions for each item in the current extension self.extension_type = '' # Type of active feature (extension), device or instance self.extension_names = dict() # Dictionary of extension names to extension name defines self.structextends_list = [] # List of extensions which extend another struct self.struct_feature_protect = dict() # Dictionary of structnames and FeatureExtraProtect strings self.valid_vuids = set() # Set of all valid VUIDs self.vuid_dict = dict() # VUID dictionary (from JSON) self.alias_dict = dict() # Dict of cmd|struct aliases self.header_file = False # Header file generation flag self.source_file = False # Source file generation flag self.instance_extension_list = '' # List of instance extension name defines self.device_extension_list = '' # List of device extension name defines self.returnedonly_structs = [] # List of structs with 'returnonly' attribute self.called_types = set() # Set of types called via function/struct - not in list == app never passes in to validate # Named tuples to store struct and command data self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isstaticarray', 'isbool', 'israngedenum', 'isconst', 'isoptional', 'iscount', 'noautovalidity', 'len', 'extstructs', 'condition', 'cdecl']) self.CommandData = namedtuple('CommandData', ['name', 'params', 'cdecl', 'extension_type', 'result', 'promotion_info']) self.StructMemberData = namedtuple('StructMemberData', ['name', 'members']) # # Generate Copyright comment block for file def GenerateCopyright(self): copyright = '/* *** THIS FILE IS GENERATED - DO NOT EDIT! ***\n' copyright += ' * See parameter_validation_generator.py for modifications\n' copyright += ' *\n' copyright += ' * Copyright (c) 2015-2021 The Khronos Group Inc.\n' copyright += ' * Copyright (c) 2015-2021 LunarG, Inc.\n' copyright += ' * Copyright (C) 2015-2021 Google Inc.\n' copyright += ' *\n' copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n' copyright += ' * you may not use this file except in compliance with the License.\n' copyright += ' * Copyright (c) 2015-2017 Valve Corporation\n' copyright += ' * You may obtain a copy of the License at\n' copyright += ' *\n' copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n' copyright += ' *\n' copyright += ' * Unless required by applicable law or agreed to in writing, software\n' copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n' copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n' copyright += ' * See the License for the specific language governing permissions and\n' copyright += ' * limitations under the License.\n' copyright += ' *\n' copyright += ' * Author: Mark Lobodzinski <[email protected]>\n' copyright += ' * Author: Dave Houlton <[email protected]>\n' copyright += ' */\n\n' return copyright # # Increases the global indent variable def incIndent(self, indent): inc = ' ' * self.INDENT_SPACES if indent: return indent + inc return inc # # Decreases the global indent variable def decIndent(self, indent): if indent and (len(indent) > self.INDENT_SPACES): return indent[:-self.INDENT_SPACES] return '' # # Walk the JSON-derived dict and find all "vuid" key values def ExtractVUIDs(self, d): if hasattr(d, 'items'): for k, v in d.items(): if k == "vuid": yield v elif isinstance(v, dict): for s in self.ExtractVUIDs(v): yield s elif isinstance (v, list): for l in v: for s in self.ExtractVUIDs(l): yield s # # Called at file creation time def beginFile(self, genOpts): OutputGenerator.beginFile(self, genOpts) self.header_file = (genOpts.filename == 'parameter_validation.h') self.source_file = (genOpts.filename == 'parameter_validation.cpp') if not self.header_file and not self.source_file: print("Error: Output Filenames have changed, update generator source.\n") sys.exit(1) if self.source_file or self.header_file: # Output Copyright text s = self.GenerateCopyright() write(s, file=self.outFile) if self.header_file: return stype_map = '' stype_version_dict = dict() # Create contents of Structs->API version unordered map root = self.registry.reg for node in root.findall('feature'): version_name = node.get('name') version_name = version_name.replace('VK_', 'VK_API_') for enum_item in node.iter('enum'): if enum_item.get('extends') == "VkStructureType": struct_type_id = enum_item.get('name') self.stype_version_dict[struct_type_id] = version_name for extensions in root.findall('extensions'): for extension in extensions.findall('extension'): for entry in extension.iterfind('require/enum[@extends="VkStructureType"]'): alias = entry.get('alias') if alias is not None and (entry.get('comment') is None or 'typo' not in entry.get('comment')): self.stype_version_dict[alias] = extension.get('name') # Build map of structure type names to VkStructureType enum values # Find all types of category "struct" for struct in self.registry.tree.iterfind('types/type[@category="struct"]'): # Check if struct has member named "sType" of type "VkStructureType" which has values defined stype = struct.find('member[name="sType"][type="VkStructureType"][@values]') if stype is not None: # Store VkStructureType value for this type self.structTypes[struct.get('name')] = stype.get('values') self.valid_usage_path = genOpts.valid_usage_path vu_json_filename = os.path.join(self.valid_usage_path + os.sep, 'validusage.json') if os.path.isfile(vu_json_filename): json_file = open(vu_json_filename, 'r', encoding='utf-8') self.vuid_dict = json.load(json_file) json_file.close() if len(self.vuid_dict) == 0: print("Error: Could not find, or error loading %s/validusage.json\n", vu_json_filename) sys.exit(1) # # Build a set of all vuid text strings found in validusage.json for json_vuid_string in self.ExtractVUIDs(self.vuid_dict): self.valid_vuids.add(json_vuid_string) # # Headers write('#include "chassis.h"', file=self.outFile) self.newline() write('#include "stateless_validation.h"', file=self.outFile) self.newline() # # Called at end-time for final content output def endFile(self): if self.source_file: # C-specific self.newline() # Don't need flag/enum lists if app can never call it to be validated # But need to save everything as not all information is known until endFile() for flag, string in self.flag_values_definitions.items(): if flag == 'VkGeometryInstanceFlagsKHR': # only called in VkAccelerationStructureInstanceKHR which is never called anywhere explicitly continue flagBits = flag.replace('Flags', 'FlagBits') if flag in self.called_types or flagBits in self.called_types: write(string, file=self.outFile) for enum, string in self.enum_values_definitions.items(): if enum in self.called_types: write(string, file=self.outFile) self.newline() self.newline() api_func = 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkInstance instance, const char *api_name, const uint32_t promoted_version) const {\n' api_func += ' bool skip = false;\n' api_func += ' if (api_version < promoted_version) {\n' api_func += ' skip = LogError(instance,\n' api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s"\n' api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n' api_func += ' StringAPIVersion(promoted_version).c_str());\n' api_func += ' }\n' api_func += ' return skip;\n' api_func += '}\n\n' api_func += 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkPhysicalDevice pdev, const char *api_name, const uint32_t promoted_version) const {\n' api_func += ' bool skip = false;\n' api_func += ' const auto &target_pdev = physical_device_properties_map.find(pdev);\n' api_func += ' if (target_pdev != physical_device_properties_map.end()) {\n' api_func += ' auto effective_api_version = std::min(target_pdev->second->apiVersion, api_version);\n' api_func += ' if (effective_api_version < promoted_version) {\n' api_func += ' skip = LogError(instance,\n' api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s, "\n' api_func += ' "which is the minimum of version requested in pApplicationInfo (%s) and supported by this physical device (%s), "\n' api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n' api_func += ' StringAPIVersion(target_pdev->second->apiVersion).c_str(), StringAPIVersion(effective_api_version).c_str(),\n' api_func += ' StringAPIVersion(promoted_version).c_str());\n' api_func += ' }\n' api_func += ' }\n' api_func += ' return skip;\n' api_func += '}\n' write(api_func, file=self.outFile) pnext_handler = 'bool StatelessValidation::ValidatePnextStructContents(const char *api_name, const ParameterName &parameter_name,\n' pnext_handler += ' const VkBaseOutStructure* header, const char *pnext_vuid) const {\n' pnext_handler += ' bool skip = false;\n' pnext_handler += ' switch(header->sType) {\n' # Do some processing here to extract data from validatedstructs... for item in self.structextends_list: postProcSpec = {} postProcSpec['ppp'] = '' if not item else '{postProcPrefix}' postProcSpec['pps'] = '' if not item else '{postProcSuffix}' postProcSpec['ppi'] = '' if not item else '{postProcInsert}' pnext_case = '\n' pnext_check = '' protect = '' # Guard struct cases with feature ifdefs, if necessary if item in self.struct_feature_protect.keys(): protect = self.struct_feature_protect[item] pnext_case += '#ifdef %s\n' % protect pnext_case += ' // Validation code for %s structure members\n' % item pnext_case += ' case %s: { // Covers VUID-%s-sType-sType\n' % (self.structTypes[item], item) # pNext version/extension-enabled checks ver_info = '' struct_type = self.structTypes[item] if struct_type in self.stype_version_dict.keys(): ver_info = self.stype_version_dict[struct_type] else: struct_type[:-4] if struct_type[:-4] in self.stype_version_dict.values(): ver_info = self.stype_version_dict[struct_type[:-4]] else: ver_info = None if ver_info is not None: if 'VK_API_VERSION_' in ver_info: api_version = ver_info; pnext_check += ' if (api_version < %s) {\n' % ver_info pnext_check += ' skip |= LogError(\n' pnext_check += ' instance, pnext_vuid,\n' pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s) which was added in %s but the "\n' % (struct_type, ver_info) pnext_check += ' "current effective API version is %s.",\n' pnext_check += ' api_name, parameter_name.get_name().c_str(), StringAPIVersion(api_version).c_str());\n' pnext_check += ' }\n' else: # Dependent on enabled extension ext_name = ver_info ext_name_define = self.extension_names[ver_info] table_type = '' if ext_name_define in self.instance_extension_list: table_type = 'instance' elif ext_name_define in self.device_extension_list: table_type = 'device' else: print("Error in parameter_validation_generator.py CodeGen.") norm_ext_name = ext_name_define[:-15].lower() if table_type == 'device': pnext_check += ' if ((!SupportedByPdev(physical_device, %s)) && !%s_extensions.%s) {\n' % (ext_name_define, table_type, norm_ext_name.lower()) else: pnext_check += ' if (!%s_extensions.%s) {\n' % (table_type, norm_ext_name.lower()) pnext_check += ' skip |= LogError(\n' pnext_check += ' instance, pnext_vuid,\n' pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s), but its parent extension "\n' % struct_type pnext_check += ' "%s has not been enabled.",\n' % ext_name pnext_check += ' api_name, parameter_name.get_name().c_str());\n' pnext_check += ' }\n' pnext_check += '\n' expr = self.expandStructCode(item, item, 'structure->', '', ' ', [], postProcSpec) struct_validation_source = self.ScrubStructCode(expr) if struct_validation_source != '': pnext_case += ' %s *structure = (%s *) header;\n' % (item, item) pnext_case += '%s%s' % (pnext_check, struct_validation_source) pnext_case += ' } break;\n' if protect: pnext_case += '#endif // %s\n' % protect # Skip functions containing no validation if struct_validation_source or pnext_check != '': pnext_handler += pnext_case; else: pnext_handler += '\n // No Validation code for %s structure members -- Covers VUID-%s-sType-sType\n' % (item, item) pnext_handler += ' default:\n' pnext_handler += ' skip = false;\n' pnext_handler += ' }\n' pnext_handler += ' return skip;\n' pnext_handler += '}\n' write(pnext_handler, file=self.outFile) self.newline() ext_template = 'bool StatelessValidation::OutputExtensionError(const std::string &api_name, const std::string &extension_name) const {\n' ext_template += ' return LogError(instance,\n' ext_template += ' kVUID_PVError_ExtensionNotEnabled, "Attemped to call %s() but its required extension %s has not been enabled\\n",\n' ext_template += ' api_name.c_str(), extension_name.c_str());\n' ext_template += '}\n' write(ext_template, file=self.outFile) self.newline() commands_text = '\n'.join(self.validation) write(commands_text, file=self.outFile) self.newline() if self.header_file: # Output declarations and record intercepted procedures write('\n'.join(self.declarations), file=self.outFile) # Finish processing in superclass OutputGenerator.endFile(self) # # Processing at beginning of each feature or extension def beginFeature(self, interface, emit): # Start processing in superclass OutputGenerator.beginFeature(self, interface, emit) # C-specific # Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this # feature. They're only printed in endFeature(). self.headerVersion = None self.stypes = [] self.commands = [] self.structMembers = [] self.newFlags = set() self.featureExtraProtect = GetFeatureProtect(interface) # Get base list of extension dependencies for all items in this extension base_required_extensions = [] if "VK_VERSION_1" not in self.featureName: nameElem = interface[0][1] name = nameElem.get('name') # Save Name Define to get correct enable name later self.extension_names[self.featureName] = name # This extension is the first dependency for this command base_required_extensions.append(self.featureName) # Add any defined extension dependencies to the base dependency list for this extension requires = interface.get('requires') if requires is not None: base_required_extensions.extend(requires.split(',')) # Build dictionary of extension dependencies for each item in this extension self.required_extensions = dict() for require_element in interface.findall('require'): # Copy base extension dependency list required_extensions = list(base_required_extensions) # Add any additional extension dependencies specified in this require block additional_extensions = require_element.get('extension') if additional_extensions: required_extensions.extend(additional_extensions.split(',')) # Save full extension list for all named items for element in require_element.findall('*[@name]'): self.required_extensions[element.get('name')] = required_extensions # And note if this is an Instance or Device extension self.extension_type = interface.get('type') if interface.tag == 'extension': name_elem = interface[0][1] name_definition = name_elem.get('name') if 'EXTENSION_NAME' not in name_definition: print("Error in vk.xml file -- extension name is not available") if interface.get('type') == 'instance': self.instance_extension_list += '%s, ' % name_definition else: self.device_extension_list += '%s, ' % name_definition # # Called at the end of each extension (feature) def endFeature(self): if self.header_file: return # C-specific # Actually write the interface to the output file. if (self.emit): # If type declarations are needed by other features based on this one, it may be necessary to suppress the ExtraProtect, # or move it below the 'for section...' loop. ifdef = '' if (self.featureExtraProtect is not None): ifdef = '#ifdef %s\n' % self.featureExtraProtect self.validation.append(ifdef) # Generate the struct member checking code from the captured data self.processStructMemberData() # Generate the command parameter checking code from the captured data self.processCmdData() # Write the declaration for the HeaderVersion if self.headerVersion: write('const uint32_t GeneratedVulkanHeaderVersion = {};'.format(self.headerVersion), file=self.outFile) # Write the declarations for the VkFlags values combining all flag bits for flag in sorted(self.newFlags): flagBits = flag.replace('Flags', 'FlagBits') if flagBits in self.flagBits: bits = self.flagBits[flagBits] decl = 'const {} All{} = {}'.format(flag, flagBits, bits[0]) for bit in bits[1:]: decl += '|' + bit decl += ';' self.flag_values_definitions[flag] = decl endif = '\n' if (self.featureExtraProtect is not None): endif = '#endif // %s\n' % self.featureExtraProtect self.validation.append(endif) # Finish processing in superclass OutputGenerator.endFeature(self) # # Type generation def genType(self, typeinfo, name, alias): # record the name/alias pair if alias is not None: self.alias_dict[name]=alias OutputGenerator.genType(self, typeinfo, name, alias) typeElem = typeinfo.elem # If the type is a struct type, traverse the embedded <member> tags generating a structure. Otherwise, emit the tag text. category = typeElem.get('category') if (category == 'struct' or category == 'union'): self.genStruct(typeinfo, name, alias) elif (category == 'handle'): self.handleTypes.add(name) elif (category == 'bitmask'): self.flags.add(name) self.newFlags.add(name) elif (category == 'define'): if name == 'VK_HEADER_VERSION': nameElem = typeElem.find('name') self.headerVersion = noneStr(nameElem.tail).strip() # # Struct parameter check generation. # This is a special case of the <type> tag where the contents are interpreted as a set of <member> tags instead of freeform C # type declarations. The <member> tags are just like <param> tags - they are a declaration of a struct or union member. # Only simple member declarations are supported (no nested structs etc.) def genStruct(self, typeinfo, typeName, alias): if not self.source_file: return # alias has already been recorded in genType, above OutputGenerator.genStruct(self, typeinfo, typeName, alias) conditions = self.structMemberValidationConditions[typeName] if typeName in self.structMemberValidationConditions else None members = typeinfo.elem.findall('.//member') if self.featureExtraProtect is not None: self.struct_feature_protect[typeName] = self.featureExtraProtect # # Iterate over members once to get length parameters for arrays lens = set() for member in members: len = self.getLen(member) if len: lens.add(len) # # Generate member info membersInfo = [] returned_only = typeinfo.elem.attrib.get('returnedonly') is not None for member in members: # Get the member's type and name info = self.getTypeNameTuple(member) type = info[0] name = info[1] stypeValue = '' cdecl = self.makeCParamDecl(member, 0) ispointer = self.paramIsPointer(member) isconst = True if 'const' in cdecl else False # Store pointer/array/string info -- Check for parameter name in lens set iscount = False if name in lens: iscount = True # The pNext members are not tagged as optional, but are treated as optional for parameter NULL checks. Static array # members are also treated as optional to skip NULL pointer validation, as they won't be NULL. isstaticarray = self.paramIsStaticArray(member) isoptional = False if self.paramIsOptional(member) or (name == 'pNext') or (isstaticarray): isoptional = True # Determine if value should be ignored by code generation. noautovalidity = False if (member.attrib.get('noautovalidity') is not None) or ((typeName in self.structMemberBlacklist) and (name in self.structMemberBlacklist[typeName])): noautovalidity = True # Some types are marked as noautovalidity, but stateless_validation.h will still want them for manual validation noautovalidity_type_exceptions = [ "VkQueryPipelineStatisticFlags", "VkBorderColor" ] # Store all types that are from incoming calls if auto validity # non-const pointers don't have auto gen code as used for return values if (noautovalidity == False) or (type in noautovalidity_type_exceptions): if not returned_only and (not ispointer or isconst): self.called_types.add(type) structextends = False membersInfo.append(self.CommandParam(type=type, name=name, ispointer=ispointer, isstaticarray=isstaticarray, isbool=True if type == 'VkBool32' else False, israngedenum=True if type in self.enumRanges else False, isconst=isconst, isoptional=isoptional, iscount=iscount, noautovalidity=noautovalidity, len=self.getLen(member), extstructs=self.registry.validextensionstructs[typeName] if name == 'pNext' else None, condition=conditions[name] if conditions and name in conditions else None, cdecl=cdecl)) # If this struct extends another, keep its name in list for further processing if typeinfo.elem.attrib.get('structextends') is not None: self.structextends_list.append(typeName) # Returnedonly structs should have most of their members ignored -- on entry, we only care about validating the sType and # pNext members. Everything else will be overwritten by the callee. if returned_only: self.returnedonly_structs.append(typeName) membersInfo = [m for m in membersInfo if m.name in ('sType', 'pNext')] self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo)) # # Capture group (e.g. C "enum" type) info to be used for param check code generation. # These are concatenated together with other types. def genGroup(self, groupinfo, groupName, alias): if not self.source_file: return # record the name/alias pair if alias is not None: self.alias_dict[groupName]=alias OutputGenerator.genGroup(self, groupinfo, groupName, alias) groupElem = groupinfo.elem # Store the sType values if groupName == 'VkStructureType': for elem in groupElem.findall('enum'): self.stypes.append(elem.get('name')) elif 'FlagBits' in groupName: bits = [] for elem in groupElem.findall('enum'): if elem.get('supported') != 'disabled': bits.append(elem.get('name')) if bits: self.flagBits[groupName] = bits else: # Determine if begin/end ranges are needed (we don't do this for VkStructureType, which has a more finely grained check) expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper() expandPrefix = expandName expandSuffix = '' expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName) if expandSuffixMatch: expandSuffix = '_' + expandSuffixMatch.group() # Strip off the suffix from the prefix expandPrefix = expandName.rsplit(expandSuffix, 1)[0] isEnum = ('FLAG_BITS' not in expandPrefix) if isEnum: self.enumRanges.add(groupName) # Create definition for a list containing valid enum values for this enumerated type if self.featureExtraProtect is not None: enum_entry = '#ifdef %s\n' % self.featureExtraProtect else: enum_entry = '' enum_entry += 'const std::vector<%s> All%sEnums = {' % (groupName, groupName) for enum in groupElem: name = enum.get('name') if name is not None and enum.get('supported') != 'disabled': enum_entry += '%s, ' % name enum_entry += '};' if self.featureExtraProtect is not None: enum_entry += '\n#endif // %s' % self.featureExtraProtect self.enum_values_definitions[groupName] = enum_entry # # Capture command parameter info to be used for param check code generation. def genCmd(self, cmdinfo, name, alias): # record the name/alias pair if alias is not None: self.alias_dict[name]=alias OutputGenerator.genCmd(self, cmdinfo, name, alias) decls = self.makeCDecls(cmdinfo.elem) typedef = decls[1] typedef = typedef.split(')',1)[1] if self.header_file: if name not in self.blacklist: if (self.featureExtraProtect is not None): self.declarations += [ '#ifdef %s' % self.featureExtraProtect ] # Strip off 'vk' from API name decl = '%s%s' % ('bool PreCallValidate', decls[0].split("VKAPI_CALL vk")[1]) decl_terminator = ' const override;' if 'ValidationCache' in name: decl_terminator = ' const;' decl = str(decl).replace(';', decl_terminator) self.declarations += [ decl ] if (self.featureExtraProtect is not None): self.declarations += [ '#endif' ] if self.source_file: if name not in self.blacklist: params = cmdinfo.elem.findall('param') # Get list of array lengths lens = set() for param in params: len = self.getLen(param) if len: lens.add(len) # Get param info paramsInfo = [] for param in params: paramInfo = self.getTypeNameTuple(param) cdecl = self.makeCParamDecl(param, 0) ispointer = self.paramIsPointer(param) isconst = True if 'const' in cdecl else False # non-const pointers don't have auto gen code as used for return values if not ispointer or isconst: self.called_types.add(paramInfo[0]) # Check for parameter name in lens set iscount = False if paramInfo[1] in lens: iscount = True paramsInfo.append(self.CommandParam(type=paramInfo[0], name=paramInfo[1], ispointer=ispointer, isstaticarray=self.paramIsStaticArray(param), isbool=True if paramInfo[0] == 'VkBool32' else False, israngedenum=True if paramInfo[0] in self.enumRanges else False, isconst=isconst, isoptional=self.paramIsOptional(param), iscount=iscount, noautovalidity=True if param.attrib.get('noautovalidity') is not None else False, len=self.getLen(param), extstructs=None, condition=None, cdecl=cdecl)) # Save return value information, if any result_type = '' promotion_info = '' resultinfo = cmdinfo.elem.find('proto/type') if (resultinfo is not None and resultinfo.text != 'void'): result_type = resultinfo.text if "VK_VERSION" in self.featureName and "VK_VERSION_1_0" != self.featureName: if ('VkInstance' == paramsInfo[0].type or 'VkPhysicalDevice' == paramsInfo[0].type): promotion_info = [paramsInfo[0].name, self.featureName] self.commands.append(self.CommandData(name=name, params=paramsInfo, cdecl=self.makeCDecls(cmdinfo.elem)[0], extension_type=self.extension_type, result=result_type, promotion_info=promotion_info)) # # Check if the parameter passed in is a pointer def paramIsPointer(self, param): ispointer = 0 paramtype = param.find('type') if (paramtype.tail is not None) and ('*' in paramtype.tail): ispointer = paramtype.tail.count('*') elif paramtype.text[:4] == 'PFN_': # Treat function pointer typedefs as a pointer to a single value ispointer = 1 return ispointer # # Check if the parameter passed in is a static array def paramIsStaticArray(self, param): isstaticarray = 0 paramname = param.find('name') if (paramname.tail is not None) and ('[' in paramname.tail): isstaticarray = paramname.tail.count('[') return isstaticarray # # Check if the parameter passed in is optional # Returns a list of Boolean values for comma separated len attributes (len='false,true') def paramIsOptional(self, param): # See if the handle is optional isoptional = False # Simple, if it's optional, return true optString = param.attrib.get('optional') if optString: if optString == 'true': isoptional = True elif ',' in optString: opts = [] for opt in optString.split(','): val = opt.strip() if val == 'true': opts.append(True) elif val == 'false': opts.append(False) else: print('Unrecognized len attribute value',val) isoptional = opts return isoptional # # Check if the handle passed in is optional # Uses the same logic as ValidityOutputGenerator.isHandleOptional def isHandleOptional(self, param, lenParam): # Simple, if it's optional, return true if param.isoptional: return True # If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes. if param.noautovalidity: return True # If the parameter is an array and we haven't already returned, find out if any of the len parameters are optional if lenParam and lenParam.isoptional: return True return False # # Retrieve the value of the len tag def getLen(self, param): result = None # Default to altlen when available to avoid LaTeX markup if 'altlen' in param.attrib: len = param.attrib.get('altlen') else: len = param.attrib.get('len') if len and len != 'null-terminated': # Only first level is supported for multidimensional arrays. Conveniently, this also strips the trailing # 'null-terminated' from arrays of strings len = len.split(',')[0] # Convert scope notation to pointer access result = str(len).replace('::', '->') elif self.paramIsStaticArray(param): # For static arrays get length from inside [] array_match = re.search(r'\[(\d+)\]', param.find('name').tail) if array_match: result = array_match.group(1) return result # # Retrieve the type and name for a parameter def getTypeNameTuple(self, param): type = '' name = '' for elem in param: if elem.tag == 'type': type = noneStr(elem.text) elif elem.tag == 'name': name = noneStr(elem.text) return (type, name) # # Find a named parameter in a parameter list def getParamByName(self, params, name): for param in params: if param.name == name: return param return None # # Get the length paramater record for the specified length expression def getLenParam(self, params, length): # First check if any element of params matches length exactly lenParam = self.getParamByName(params, length) if not lenParam: # Otherwise, look for any elements of params that appear within length len_candidates = [p for p in params if re.search(r'\b{}\b'.format(p.name), length)] # 0 or 1 matches are expected, >1 would require a special case and/or explicit validation if len(len_candidates) == 0: lenParam = None elif len(len_candidates) == 1: lenParam = len_candidates[0] else: raise Exception('Cannot determine length parameter for len attribute value {}'.format(length)) return lenParam # # Convert a vulkan.h command declaration into a parameter_validation.h definition def getCmdDef(self, cmd): # Strip the trailing ';' and split into individual lines lines = cmd.cdecl[:-1].split('\n') cmd_hdr = '\n'.join(lines) return cmd_hdr # # Generate the code to check for a NULL dereference before calling the # validation function def genCheckedLengthCall(self, name, exprs): count = name.count('->') if count: checkedExpr = [] localIndent = '' elements = name.split('->') # Open the if expression blocks for i in range(0, count): checkedExpr.append(localIndent + 'if ({} != NULL) {{\n'.format('->'.join(elements[0:i+1]))) localIndent = self.incIndent(localIndent) # Add the validation expression for expr in exprs: checkedExpr.append(localIndent + expr) # Close the if blocks for i in range(0, count): localIndent = self.decIndent(localIndent) checkedExpr.append(localIndent + '}\n') return [checkedExpr] # No if statements were required return exprs # # Generate code to check for a specific condition before executing validation code def genConditionalCall(self, prefix, condition, exprs): checkedExpr = [] localIndent = '' formattedCondition = condition.format(prefix) checkedExpr.append(localIndent + 'if ({})\n'.format(formattedCondition)) checkedExpr.append(localIndent + '{\n') localIndent = self.incIndent(localIndent) for expr in exprs: checkedExpr.append(localIndent + expr) localIndent = self.decIndent(localIndent) checkedExpr.append(localIndent + '}\n') return [checkedExpr] # # Get VUID identifier from implicit VUID tag def GetVuid(self, name, suffix): vuid_string = 'VUID-%s-%s' % (name, suffix) vuid = "kVUIDUndefined" if '->' in vuid_string: return vuid if vuid_string in self.valid_vuids: vuid = "\"%s\"" % vuid_string else: if name in self.alias_dict: alias_string = 'VUID-%s-%s' % (self.alias_dict[name], suffix) if alias_string in self.valid_vuids: vuid = "\"%s\"" % alias_string return vuid # # Generate the sType check string def makeStructTypeCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name): checkExpr = [] stype = self.structTypes[value.type] vuid_name = struct_type_name if struct_type_name is not None else funcPrintName stype_vuid = self.GetVuid(value.type, "sType-sType") param_vuid = self.GetVuid(vuid_name, "%s-parameter" % value.name) if lenValue: count_required_vuid = self.GetVuid(vuid_name, "%s-arraylength" % value.len) # This is an array of struct pointers if value.ispointer == 2: checkExpr.append('skip |= validate_struct_pointer_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format( funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec)) # This is an array with a pointer to a count value elif lenValue.ispointer: # When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {}, {});\n'.format( funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec)) # This is an array with an integer count value else: checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format( funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec)) # This is an individual struct else: checkExpr.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", {}{vn}, {sv}, {}, {}, {});\n'.format( funcPrintName, valuePrintName, prefix, valueRequired, param_vuid, stype_vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec)) return checkExpr # # Generate the handle check string def makeHandleCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec): checkExpr = [] if lenValue: if lenValue.ispointer: # This is assumed to be an output array with a pointer to a count value raise('Unsupported parameter validation case: Output handle array elements are not NULL checked') else: count_required_vuid = self.GetVuid(funcPrintName, "%s-arraylength" % (value.len)) # This is an array with an integer count value checkExpr.append('skip |= validate_handle_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {});\n'.format( funcPrintName, lenValueRequired, valueRequired, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec)) else: # This is assumed to be an output handle pointer raise('Unsupported parameter validation case: Output handles are not NULL checked') return checkExpr # # Generate check string for an array of VkFlags values def makeFlagsArrayCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec): checkExpr = [] flagBitsName = value.type.replace('Flags', 'FlagBits') if not flagBitsName in self.flagBits: raise('Unsupported parameter validation case: array of reserved VkFlags') else: allFlags = 'All' + flagBitsName checkExpr.append('skip |= validate_flags_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcPrintName, lenPrintName, valuePrintName, flagBitsName, allFlags, value.len, value.name, lenValueRequired, valueRequired, pf=prefix, **postProcSpec)) return checkExpr # # Generate pNext check string def makeStructNextCheck(self, prefix, value, funcPrintName, valuePrintName, postProcSpec, struct_type_name): checkExpr = [] # Generate an array of acceptable VkStructureType values for pNext extStructCount = 0 extStructVar = 'NULL' extStructNames = 'NULL' pNextVuid = self.GetVuid(struct_type_name, "pNext-pNext") sTypeVuid = self.GetVuid(struct_type_name, "sType-unique") if value.extstructs: extStructVar = 'allowed_structs_{}'.format(struct_type_name) extStructCount = 'ARRAY_SIZE({})'.format(extStructVar) extStructNames = '"' + ', '.join(value.extstructs) + '"' checkExpr.append('const VkStructureType {}[] = {{ {} }};\n'.format(extStructVar, ', '.join([self.structTypes[s] for s in value.extstructs]))) checkExpr.append('skip |= validate_struct_pnext("{}", {ppp}"{}"{pps}, {}, {}{}, {}, {}, GeneratedVulkanHeaderVersion, {}, {});\n'.format( funcPrintName, valuePrintName, extStructNames, prefix, value.name, extStructCount, extStructVar, pNextVuid, sTypeVuid, **postProcSpec)) return checkExpr # # Generate the pointer check string def makePointerCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name): checkExpr = [] vuid_tag_name = struct_type_name if struct_type_name is not None else funcPrintName if lenValue: length_deref = '->' in value.len count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len)) array_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name)) # TODO: Remove workaround for missing optional tag in vk.xml if array_required_vuid == '"VUID-VkFramebufferCreateInfo-pAttachments-parameter"': return [] # This is an array with a pointer to a count value if lenValue.ispointer and not length_deref: # If count and array parameters are optional, there will be no validation if valueRequired == 'true' or lenPtrRequired == 'true' or lenValueRequired == 'true': # When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {}, {});\n'.format( funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec)) # This is an array with an integer count value else: # If count and array parameters are optional, there will be no validation if valueRequired == 'true' or lenValueRequired == 'true': if value.type != 'char': # A valid VU can't use '->' in the middle so the generated VUID from the spec uses '::' instead count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len.replace('->', '::'))) checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {});\n'.format( funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec)) else: # Arrays of strings receive special processing checkExpr.append('skip |= validate_string_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {}, {});\n'.format( funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec)) if checkExpr: if lenValue and length_deref: # Add checks to ensure the validation call does not dereference a NULL pointer to obtain the count checkExpr = self.genCheckedLengthCall(value.len, checkExpr) # This is an individual struct that is not allowed to be NULL elif not value.isoptional: # Function pointers need a reinterpret_cast to void* ptr_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name)) if value.type[:4] == 'PFN_': allocator_dict = {'pfnAllocation': '"VUID-VkAllocationCallbacks-pfnAllocation-00632"', 'pfnReallocation': '"VUID-VkAllocationCallbacks-pfnReallocation-00633"', 'pfnFree': '"VUID-VkAllocationCallbacks-pfnFree-00634"', } vuid = allocator_dict.get(value.name) if vuid is not None: ptr_required_vuid = vuid checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec)) else: checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, {}{}, {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec)) else: # Special case for optional internal allocation function pointers. if (value.type, value.name) == ('PFN_vkInternalAllocationNotification', 'pfnInternalAllocation'): checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalFree', postProcSpec)) elif (value.type, value.name) == ('PFN_vkInternalFreeNotification', 'pfnInternalFree'): checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalAllocation', postProcSpec)) return checkExpr # # Generate internal allocation function pointer check. def internalAllocationCheck(self, funcPrintName, prefix, name, complementaryName, postProcSpec): checkExpr = [] vuid = '"VUID-VkAllocationCallbacks-pfnInternalAllocation-00635"' checkExpr.append('if ({}{} != NULL)'.format(prefix, name)) checkExpr.append('{') local_indent = self.incIndent('') # Function pointers need a reinterpret_cast to void* checkExpr.append(local_indent + 'skip |= validate_required_pointer("{}", {ppp}"{}{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, prefix, complementaryName, prefix, complementaryName, vuid, **postProcSpec)) checkExpr.append('}\n') return checkExpr # # Process struct member validation code, performing name substitution if required def processStructMemberCode(self, line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec): # Build format specifier list kwargs = {} if '{postProcPrefix}' in line: # If we have a tuple that includes a format string and format parameters, need to use ParameterName class if type(memberDisplayNamePrefix) is tuple: kwargs['postProcPrefix'] = 'ParameterName(' else: kwargs['postProcPrefix'] = postProcSpec['ppp'] if '{postProcSuffix}' in line: # If we have a tuple that includes a format string and format parameters, need to use ParameterName class if type(memberDisplayNamePrefix) is tuple: kwargs['postProcSuffix'] = ', ParameterName::IndexVector{{ {}{} }})'.format(postProcSpec['ppi'], memberDisplayNamePrefix[1]) else: kwargs['postProcSuffix'] = postProcSpec['pps'] if '{postProcInsert}' in line: # If we have a tuple that includes a format string and format parameters, need to use ParameterName class if type(memberDisplayNamePrefix) is tuple: kwargs['postProcInsert'] = '{}{}, '.format(postProcSpec['ppi'], memberDisplayNamePrefix[1]) else: kwargs['postProcInsert'] = postProcSpec['ppi'] if '{funcName}' in line: kwargs['funcName'] = funcName if '{valuePrefix}' in line: kwargs['valuePrefix'] = memberNamePrefix if '{displayNamePrefix}' in line: # Check for a tuple that includes a format string and format parameters to be used with the ParameterName class if type(memberDisplayNamePrefix) is tuple: kwargs['displayNamePrefix'] = memberDisplayNamePrefix[0] else: kwargs['displayNamePrefix'] = memberDisplayNamePrefix if kwargs: # Need to escape the C++ curly braces if 'IndexVector' in line: line = line.replace('IndexVector{ ', 'IndexVector{{ ') line = line.replace(' }),', ' }}),') return line.format(**kwargs) return line # # Process struct member validation code, stripping metadata def ScrubStructCode(self, code): scrubbed_lines = '' for line in code: if 'validate_struct_pnext' in line: continue if 'allowed_structs' in line: continue if 'xml-driven validation' in line: continue line = line.replace('{postProcPrefix}', '') line = line.replace('{postProcSuffix}', '') line = line.replace('{postProcInsert}', '') line = line.replace('{funcName}', '') line = line.replace('{valuePrefix}', '') line = line.replace('{displayNamePrefix}', '') line = line.replace('{IndexVector}', '') line = line.replace('local_data->', '') scrubbed_lines += line return scrubbed_lines # # Process struct validation code for inclusion in function or parent struct validation code def expandStructCode(self, item_type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, output, postProcSpec): lines = self.validatedStructs[item_type] for line in lines: if output: output[-1] += '\n' if type(line) is list: for sub in line: output.append(self.processStructMemberCode(indent + sub, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec)) else: output.append(self.processStructMemberCode(indent + line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec)) return output # # Process struct pointer/array validation code, performing name substitution if required def expandStructPointerCode(self, prefix, value, lenValue, funcName, valueDisplayName, postProcSpec): expr = [] expr.append('if ({}{} != NULL)\n'.format(prefix, value.name)) expr.append('{') indent = self.incIndent(None) if lenValue: # Need to process all elements in the array indexName = value.len.replace('Count', 'Index') expr[-1] += '\n' if lenValue.ispointer: # If the length value is a pointer, de-reference it for the count. expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < *{}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName)) else: expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < {}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName)) expr.append(indent + '{') indent = self.incIndent(indent) # Prefix for value name to display in error message if value.ispointer == 2: memberNamePrefix = '{}{}[{}]->'.format(prefix, value.name, indexName) memberDisplayNamePrefix = ('{}[%i]->'.format(valueDisplayName), indexName) else: memberNamePrefix = '{}{}[{}].'.format(prefix, value.name, indexName) memberDisplayNamePrefix = ('{}[%i].'.format(valueDisplayName), indexName) else: memberNamePrefix = '{}{}->'.format(prefix, value.name) memberDisplayNamePrefix = '{}->'.format(valueDisplayName) # Expand the struct validation lines expr = self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, expr, postProcSpec) if lenValue: # Close if and for scopes indent = self.decIndent(indent) expr.append(indent + '}\n') expr.append('}\n') return expr # # Generate the parameter checking code def genFuncBody(self, funcName, values, valuePrefix, displayNamePrefix, structTypeName): lines = [] # Generated lines of code unused = [] # Unused variable names duplicateCountVuid = [] # prevent duplicate VUs being generated for value in values: usedLines = [] lenParam = None # # Prefix and suffix for post processing of parameter names for struct members. Arrays of structures need special processing to include the array index in the full parameter name. postProcSpec = {} postProcSpec['ppp'] = '' if not structTypeName else '{postProcPrefix}' postProcSpec['pps'] = '' if not structTypeName else '{postProcSuffix}' postProcSpec['ppi'] = '' if not structTypeName else '{postProcInsert}' # # Generate the full name of the value, which will be printed in the error message, by adding the variable prefix to the value name valueDisplayName = '{}{}'.format(displayNamePrefix, value.name) # # Check for NULL pointers, ignore the in-out count parameters that # will be validated with their associated array if (value.ispointer or value.isstaticarray) and not value.iscount: # Parameters for function argument generation req = 'true' # Parameter cannot be NULL cpReq = 'true' # Count pointer cannot be NULL cvReq = 'true' # Count value cannot be 0 lenDisplayName = None # Name of length parameter to print with validation messages; parameter name with prefix applied countRequiredVuid = None # If there is a count required VUID to check # Generate required/optional parameter strings for the pointer and count values if value.isoptional: req = 'false' if value.len: # The parameter is an array with an explicit count parameter lenParam = self.getLenParam(values, value.len) if lenParam: lenDisplayName = value.len.replace(lenParam.name, displayNamePrefix + lenParam.name) if lenParam.ispointer: # Count parameters that are pointers are inout if type(lenParam.isoptional) is list: if lenParam.isoptional[0]: cpReq = 'false' if lenParam.isoptional[1]: cvReq = 'false' else: if lenParam.isoptional: cpReq = 'false' else: if lenParam.isoptional: cvReq = 'false' elif value.noautovalidity: # Handle edge case where XML expresses a non-optional non-pointer value length with noautovalidity # ex: <param noautovalidity="true"len="commandBufferCount"> vuidNameTag = structTypeName if structTypeName is not None else funcName countRequiredVuid = self.GetVuid(vuidNameTag, "%s-arraylength" % (lenParam.name)) if countRequiredVuid in duplicateCountVuid: countRequiredVuid = None else: duplicateCountVuid.append(countRequiredVuid) else: # Do not generate length checks for constant sized arrays cpReq = 'false' cvReq = 'false' # # The parameter will not be processed when tagged as 'noautovalidity' # For the pointer to struct case, the struct pointer will not be validated, but any # members not tagged as 'noautovalidity' will be validated # We special-case the custom allocator checks, as they are explicit but can be auto-generated. AllocatorFunctions = ['PFN_vkAllocationFunction', 'PFN_vkReallocationFunction', 'PFN_vkFreeFunction', 'PFN_vkInternalAllocationNotification', 'PFN_vkInternalFreeNotification'] if value.noautovalidity and value.type not in AllocatorFunctions and not countRequiredVuid: # Log a diagnostic message when validation cannot be automatically generated and must be implemented manually self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name)) elif countRequiredVuid: usedLines.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, "", {pf}{ln}, &{pf}{vn}, true, false, {}, kVUIDUndefined);\n'.format( funcName, countRequiredVuid, pf=valuePrefix, ldn=lenDisplayName, ln=value.len, vn=value.name, **postProcSpec)) else: if value.type in self.structTypes: # If this is a pointer to a struct with an sType field, verify the type usedLines += self.makeStructTypeCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName) # If this is an input handle array that is not allowed to contain NULL handles, verify that none of the handles are VK_NULL_HANDLE elif value.type in self.handleTypes and value.isconst and not self.isHandleOptional(value, lenParam): usedLines += self.makeHandleCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec) elif value.type in self.flags and value.isconst: usedLines += self.makeFlagsArrayCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec) elif value.isbool and value.isconst: usedLines.append('skip |= validate_bool32_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec)) elif value.israngedenum and value.isconst: enum_value_list = 'All%sEnums' % value.type usedLines.append('skip |= validate_ranged_enum_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.type, enum_value_list, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec)) elif value.name == 'pNext': usedLines += self.makeStructNextCheck(valuePrefix, value, funcName, valueDisplayName, postProcSpec, structTypeName) else: usedLines += self.makePointerCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName) # If this is a pointer to a struct (input), see if it contains members that need to be checked if value.type in self.validatedStructs: if value.isconst: # or value.type in self.returnedonly_structs: usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec)) elif value.type in self.returnedonly_structs: usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec)) # Non-pointer types else: # The parameter will not be processes when tagged as 'noautovalidity' # For the struct case, the struct type will not be validated, but any # members not tagged as 'noautovalidity' will be validated if value.noautovalidity: # Log a diagnostic message when validation cannot be automatically generated and must be implemented manually self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name)) else: vuid_name_tag = structTypeName if structTypeName is not None else funcName if value.type in self.structTypes: stype = self.structTypes[value.type] vuid = self.GetVuid(value.type, "sType-sType") undefined_vuid = '"kVUIDUndefined"' usedLines.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", &({}{vn}), {sv}, false, kVUIDUndefined, {});\n'.format( funcName, valueDisplayName, valuePrefix, vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec)) elif value.type in self.handleTypes: if not self.isHandleOptional(value, None): usedLines.append('skip |= validate_required_handle("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec)) elif value.type in self.flags and value.type.replace('Flags', 'FlagBits') not in self.flagBits: vuid = self.GetVuid(vuid_name_tag, "%s-zerobitmask" % (value.name)) usedLines.append('skip |= validate_reserved_flags("{}", {ppp}"{}"{pps}, {pf}{}, {});\n'.format(funcName, valueDisplayName, value.name, vuid, pf=valuePrefix, **postProcSpec)) elif value.type in self.flags or value.type in self.flagBits: if value.type in self.flags: flagBitsName = value.type.replace('Flags', 'FlagBits') flagsType = 'kOptionalFlags' if value.isoptional else 'kRequiredFlags' invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name)) zeroVuid = self.GetVuid(vuid_name_tag, "%s-requiredbitmask" % (value.name)) elif value.type in self.flagBits: flagBitsName = value.type flagsType = 'kOptionalSingleBit' if value.isoptional else 'kRequiredSingleBit' invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name)) zeroVuid = invalidVuid allFlagsName = 'All' + flagBitsName invalid_vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name)) allFlagsName = 'All' + flagBitsName zeroVuidArg = '' if value.isoptional else ', ' + zeroVuid usedLines.append('skip |= validate_flags("{}", {ppp}"{}"{pps}, "{}", {}, {pf}{}, {}, {}{});\n'.format(funcName, valueDisplayName, flagBitsName, allFlagsName, value.name, flagsType, invalidVuid, zeroVuidArg, pf=valuePrefix, **postProcSpec)) elif value.isbool: usedLines.append('skip |= validate_bool32("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec)) elif value.israngedenum: vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name)) enum_value_list = 'All%sEnums' % value.type usedLines.append('skip |= validate_ranged_enum("{}", {ppp}"{}"{pps}, "{}", {}, {}{}, {});\n'.format(funcName, valueDisplayName, value.type, enum_value_list, valuePrefix, value.name, vuid, **postProcSpec)) # If this is a struct, see if it contains members that need to be checked if value.type in self.validatedStructs: memberNamePrefix = '{}{}.'.format(valuePrefix, value.name) memberDisplayNamePrefix = '{}.'.format(valueDisplayName) usedLines.append(self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, '', [], postProcSpec)) # Append the parameter check to the function body for the current command if usedLines: # Apply special conditional checks if value.condition: usedLines = self.genConditionalCall(valuePrefix, value.condition, usedLines) lines += usedLines elif not value.iscount: # If no expression was generated for this value, it is unreferenced by the validation function, unless # it is an array count, which is indirectly referenced for array valiadation. unused.append(value.name) if not lines: lines.append('// No xml-driven validation\n') return lines, unused # # Generate the struct member check code from the captured data def processStructMemberData(self): indent = self.incIndent(None) for struct in self.structMembers: # # The string returned by genFuncBody will be nested in an if check for a NULL pointer, so needs its indent incremented lines, unused = self.genFuncBody('{funcName}', struct.members, '{valuePrefix}', '{displayNamePrefix}', struct.name) if lines: self.validatedStructs[struct.name] = lines # # Generate the command param check code from the captured data def processCmdData(self): indent = self.incIndent(None) for command in self.commands: # Skip first parameter if it is a dispatch handle (everything except vkCreateInstance) startIndex = 0 if command.name == 'vkCreateInstance' else 1 lines, unused = self.genFuncBody(command.name, command.params[startIndex:], '', '', None) # Cannot validate extension dependencies for device extension APIs having a physical device as their dispatchable object if (command.name in self.required_extensions) and (self.extension_type != 'device' or command.params[0].type != 'VkPhysicalDevice'): ext_test = '' if command.params[0].type in ["VkInstance", "VkPhysicalDevice"] or command.name == 'vkCreateInstance': ext_table_type = 'instance' else: ext_table_type = 'device' for ext in self.required_extensions[command.name]: ext_name_define = '' ext_enable_name = '' for extension in self.registry.extensions: if extension.attrib['name'] == ext: ext_name_define = extension[0][1].get('name') ext_enable_name = ext_name_define.lower() ext_enable_name = re.sub('_extension_name', '', ext_enable_name) break ext_test = 'if (!%s_extensions.%s) skip |= OutputExtensionError("%s", %s);\n' % (ext_table_type, ext_enable_name, command.name, ext_name_define) lines.insert(0, ext_test) if lines: func_sig = self.getCmdDef(command) + ' const {\n' func_sig = func_sig.split('VKAPI_CALL vk')[1] cmdDef = 'bool StatelessValidation::PreCallValidate' + func_sig cmdDef += '%sbool skip = false;\n' % indent if isinstance(command.promotion_info, list): version_flag = command.promotion_info[1] version_id = version_flag.replace('VK_VERSION', 'VK_API_VERSION') cmdDef += '%s if (CheckPromotedApiAgainstVulkanVersion(%s, "%s", %s)) return true;\n' % (indent, command.promotion_info[0], command.name, version_id) for line in lines: if type(line) is list: for sub in line: cmdDef += indent + sub else: cmdDef += indent + line # Insert call to custom-written function if present if command.name in self.functions_with_manual_checks: # Generate parameter list for manual fcn and down-chain calls params_text = '' for param in command.params: params_text += '%s, ' % param.name params_text = params_text[:-2] + ');\n' cmdDef += ' if (!skip) skip |= manual_PreCallValidate'+ command.name[2:] + '(' + params_text cmdDef += '%sreturn skip;\n' % indent cmdDef += '}\n' self.validation.append(cmdDef)
the-stack_0_4301
#!/usr/bin/python3 # -*- coding: utf-8 -*- # from trainer import Trainer import pyximport pyximport.install() from cython_train.trainer_cython import Trainer from ssd_v2 import SSD300v2 import keras import argparse def main(): parser = argparse.ArgumentParser(description="Training ssd model with keras") parser.add_argument("-c", "--class_number", metavar="class_number", type=int, default=21, dest="class_number", help="set the classify number ") parser.add_argument("-b", "--prior_boxes_ssd300", metavar="prior_boxes_ssd300", type=str, default='prior_boxes_ssd300.pkl', dest="prior_boxes_ssd300", help="set the prior boxes file") parser.add_argument("-t", "--train_file", metavar="train_file", type=str, default='VOC2007.pkl', dest="train_file", help="set the train file") parser.add_argument("-p", "--path_prefix", metavar="path_prefix", type=str, default='./VOCdevkit/VOC2007/JPEGImages/', dest="path_prefix", help="set the path prefix") parser.add_argument("-w", "--weight_file", metavar="weight_file", type=str, default='weights_SSD300.hdf5', dest="weight_file", help="set the weight file") parser.add_argument("-s", "--save_weight_file", metavar="save_weight_file", type=str, default='./resource/checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5', dest="save_weight_file", help="set the save weight file") parser.add_argument("-n", "--nb_epoch", metavar="nb_epoch", type=int, default=100, dest="nb_epoch", help="set the number of epoch") args = parser.parse_args() input_shape = (300, 300, 3) model = SSD300v2(input_shape, num_classes=args.class_number) base_lr=3e-4 trainer = Trainer(class_number=args.class_number, input_shape=input_shape, priors_file=args.prior_boxes_ssd300, train_file=args.train_file, path_prefix=args.path_prefix, model=model, weight_file=args.weight_file, freeze=('input_1', 'conv1_1', 'conv1_2', 'pool1', 'conv2_1', 'conv2_2', 'pool2', 'conv3_1', 'conv3_2', 'conv3_3', 'pool3'), save_weight_file=args.save_weight_file, optim=keras.optimizers.Adam(lr=base_lr), ) trainer.train(nb_epoch=args.nb_epoch) if __name__ == "__main__": main()
the-stack_0_4306
#!/usr/bin/env python __copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu' __license__ = 'MIT' import os import sys verbose = os.environ.get('RADICAL_PILOT_VERBOSE', 'REPORT') os.environ['RADICAL_PILOT_VERBOSE'] = verbose import radical.pilot as rp import radical.utils as ru # ------------------------------------------------------------------------------ # # READ the RADICAL-Pilot documentation: https://radicalpilot.readthedocs.io/ # # ------------------------------------------------------------------------------ # ----------------------------------------------------------------------------- # if __name__ == '__main__': # we use a reporter class for nicer output report = ru.Reporter(name='radical.pilot') report.title('Getting Started (RP version %s)' % rp.version) # use the resource specified as argument, fall back to localhost if len(sys.argv) > 2: report.exit('Usage:\t%s [resource]\n\n' % sys.argv[0]) elif len(sys.argv) == 2: resource = sys.argv[1] else : resource = 'local.localhost' # Create a new session. No need to try/except this: if session creation # fails, there is not much we can do anyways... session = rp.Session() # all other pilot code is now tried/excepted. If an exception is caught, we # can rely on the session object to exist and be valid, and we can thus tear # the whole RP stack down via a 'session.close()' call in the 'finally' # clause... try: # read the config used for resource details report.info('read config') config = ru.read_json('%s/config.json' % os.path.dirname(os.path.abspath(__file__))) report.ok('>>ok\n') report.header('submit pilots') # Add a Pilot Manager. Pilot managers manage one or more ComputePilots. pmgr = rp.PilotManager(session=session) # Define an [n]-core local pilot that runs for [x] minutes # Here we use a dict to initialize the description object pd_init = { 'resource' : resource, 'runtime' : 15, # pilot runtime (min) 'exit_on_error' : True, 'project' : config[resource].get('project', None), 'queue' : config[resource].get('queue', None), 'access_schema' : config[resource].get('schema', None), 'cores' : config[resource].get('cores', 1), 'gpus' : config[resource].get('gpus', 0), } pdesc = rp.ComputePilotDescription(pd_init) # Launch the pilot. pilot = pmgr.submit_pilots(pdesc) report.header('submit units') # Register the ComputePilot in a UnitManager object. umgr = rp.UnitManager(session=session) umgr.add_pilots(pilot) # Create a workload of ComputeUnits. # Each compute unit runs a specific `echo` command n = 128 # number of units to run report.info('create %d unit description(s)\n\t' % n) cuds = list() for i in range(0, n): # create a new CU description, and fill it. # Here we don't use dict initialization. cud = rp.ComputeUnitDescription() cud.environment = {'TEST' : 'jabberwocky'} cud.executable = '/bin/echo' cud.arguments = ['$RP_UNIT_ID greets $TEST'] cuds.append(cud) report.progress() report.ok('>>ok\n') # Submit the previously created ComputeUnit descriptions to the # PilotManager. This will trigger the selected scheduler to start # assigning ComputeUnits to the ComputePilots. units = umgr.submit_units(cuds) # Wait for all compute units to reach a final state (DONE, CANCELED or FAILED). report.header('gather results') umgr.wait_units() report.info('\n') for unit in units: report.plain(' * %s: %s, exit: %3s, out: %s\n' % (unit.uid, unit.state[:4], unit.exit_code, unit.stdout.strip()[:35])) except Exception as e: # Something unexpected happened in the pilot code above report.error('caught Exception: %s\n' % e) raise except (KeyboardInterrupt, SystemExit): # the callback called sys.exit(), and we can here catch the # corresponding KeyboardInterrupt exception for shutdown. We also catch # SystemExit (which gets raised if the main threads exits for some other # reason). report.warn('exit requested\n') finally: # always clean up the session, no matter if we caught an exception or # not. This will kill all remaining pilots. report.header('finalize') session.close() report.header() # ------------------------------------------------------------------------------
the-stack_0_4307
""" File: 1514.py Title: Path with Maximum Probability Difficulty: Medium URL: https://leetcode.com/problems/path-with-maximum-probability/ """ import heapq import unittest from collections import defaultdict, deque from typing import List class Solution: def maxProbability(self, n: int, edges: List[List[int]], probs: List[float], start: int, end: int) -> float: adjacents = defaultdict(dict) for edge, prob in zip(edges, probs): a, b = edge adjacents[a][b] = prob adjacents[b][a] = prob heap = [(-1, start)] visited = [False] * n while heap: neg_prob, here = heapq.heappop(heap) if visited[here]: continue if here == end: return -neg_prob visited[here] = True for there in adjacents[here]: if not visited[there]: there_prob = neg_prob * adjacents[here][there] heapq.heappush(heap, (there_prob, there)) return 0.0 class SolutionTestCase(unittest.TestCase): def test_example1(self): # Input n = 3 edges = [[0, 1], [1, 2], [0, 2]] probs = [0.5, 0.5, 0.2] start = 0 end = 2 # Output output = 0.25000 solution = Solution() self.assertEqual(solution.maxProbability(n, edges, probs, start, end), output) def test_example2(self): # Input n = 3 edges = [[0, 1], [1, 2], [0, 2]] probs = [0.5, 0.5, 0.3] start = 0 end = 2 # Output output = 0.30000 solution = Solution() self.assertEqual(solution.maxProbability(n, edges, probs, start, end), output) def test_example3(self): # Input n = 3 edges = [[0, 1]] probs = [0.5] start = 0 end = 2 # Output output = 0.00000 solution = Solution() self.assertEqual(solution.maxProbability(n, edges, probs, start, end), output) if __name__ == "__main__": unittest.main()
the-stack_0_4308
"""arikefoods URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.conf import settings from django.conf.urls.static import static urlpatterns = [ path('admin/', admin.site.urls), path('accounts/', include('allauth.urls')), path('', include('home.urls')), path('food_menu/', include('menu.urls')), path('', include('order.urls')), path('checkout/', include('checkout.urls')), path('', include('recipe_blog.urls')), path('feedback/', include('feedback.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
the-stack_0_4312
#coding=utf8 import traceback from extensions.database import db from extensions.hueyext import hueyapp from extensions.celeryext import celeryapp from models.asyncmodel import Async from models.warehouse import Warehouse, Area, Workarea, Location from models.inv import Good, Category, Inv from models.auth import Partner, Seq from models.stockout import Stockout, StockoutLine from blueprints.stockout.action import StockoutAction from utils.upload import get_file_content from utils.functions import clear_empty from utils.base import DictNone # #@hueyapp.task() @celeryapp.task def import_stockout(company_code, warehouse_code, owner_code, args, task_id, user_code=None, user_name=None): ret = import_stockout_sync(company_code, warehouse_code, owner_code, args, task_id, user_code=user_code, user_name=user_name) db.session.close() return ret def import_stockout_sync(company_code, warehouse_code, owner_code, args, task_id, user_code=None, user_name=None): task = Async.query.get(task_id) print('handle async task_id ==> ', task_id, task.async_id) task.get_file() content = get_file_content(task.link) success = True exc_info = '' try: order_dict = DictNone() task.code = 'stockout' for row in content: d = DictNone(clear_empty(row)) if not d.erp_order_code: continue # 创建订单 if d.erp_order_code not in order_dict: if Stockout.query.filter_by(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code, \ erp_order_code=d.erp_order_code).count() > 0: continue order = Stockout(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code, source='import', user_code=user_code, user_name=user_name) order_dict[d.erp_order_code] = order order.erp_order_code = d.erp_order_code order.order_code = Seq.make_order_code('C', company_code, warehouse_code, owner_code) order.xtype = d.xtype or 'B2B' order.order_type = d.order_type order.date_planned = d.date_planned order.source = 'custom' order.remark = d.remark or '' order.partner_code = d.partner_code or '' order.partner_name = d.partner_name or '' order.sender_info = {'name': d.sender, 'tel': d.sender_tel, 'address': d.sender_address} order.receiver_info = {'name': d.receiver, 'tel': d.receiver_tel, 'address': d.receiver_address} order.supplier_info = {'supplier_code': d.supplier_code} order.express_info = {'express_code': d.express_code} order.invoice_info = {'invoice': d.invoice} # order.JSON = {'custom1': d.custom1, 'custom2': d.custom2, 'custom3': d.custom3, 'custom4': d.custom4} db.session.add(order) else: order = order_dict[d.erp_order_code] if not d.sku or not d.qty: continue line = StockoutLine(company_code=company_code, warehouse_code=warehouse_code, owner_code=owner_code) line.erp_order_code = order.erp_order_code line.order_code = order.order_code line.sku = d.sku line.barcode = d.barcode or d.sku line.name = d.name or d.sku line.qty = int(d.qty) line.remark = d.remark or '' line.supplier_code = d.supplier_code or '' # line.supplier_code = d.supplier_code or '' # line.quality_type = d.quality_type or 'ZP' # line.product_date = d.product_date or None # line.expire_date = d.expire_date or None # line.batch_code = d.batch_code or '' # line.virtual_warehouse = d.virtual_warehouse or '' # line.spec = d.spec or '' line.style = d.style or '' line.color = d.color or '' line.size = d.size or '' line.unit = d.unit or '' # line.JSON = {'custom1': d.custom1, 'custom2': d.custom2, 'custom3': d.custom3, 'custom4': d.custom4} db.session.add(line) line.stockout = order db.session.flush() exc_info = 'save stockout: %s'%len(content) except: exc_info = traceback.format_exc() success = False if success: db.session.commit() task.state = 'done' task.exc_info = 'SUCCESS' else: db.session.rollback() task.state = 'fail' task.exc_info = exc_info[-1500:] print(exc_info) db.session.commit()
the-stack_0_4313
# -*- coding:utf-8 -*- from __future__ import absolute_import """ 词向量测试 20K 词向量: - 规模: 19527 x 300D - 来源: [Chinese-Word-Vectors: sgns.sikuquanshu.word.bz2](https://github.com/Embedding/Chinese-Word-Vectors) 测试结果: - faiss: load index, 0.82s; search 100 times by word, 1.08s; search 100 times by vec, 1.06s - gensim: load index, 5.80s; search 100 times by word, 1.64s; search 100 times by vec, 1.62s """ import bz2 import logging import pickle import time import gensim import numpy as np import os from pyxtools import global_init_logger from pyxtools.faiss_tools import faiss class BasicBenchmark(object): """ Basic Class """ def __init__(self, similar_top_n: int = 20): """ init """ self.logger = logging.getLogger(self.__class__.__name__) self.similar_top_n = similar_top_n self.dimension = None self.result_dict = {} self.word_vec_model_file = "vec.model" self._word_vec_dict = {} def prepare(self): """ 准备工作 """ self._global_prepare() def _global_prepare(self): """ """ if not os.path.exists(self.word_vec_model_file): with open(self.word_vec_model_file, "wb") as fw: with bz2.BZ2File('./sgns.sikuquanshu.word.bz2', 'rb') as fr: fw.write(fr.read()) @staticmethod def get_word_list() -> [str]: """ 测试词 """ return ["计", "算", "机", "词", "向", "量", "囧"] def run(self): # prepare self.prepare() # init time_start = time.time() self.init() self.logger.info("Init: cost {} s!".format(time.time() - time_start)) # search similar words time_start = time.time() for i in range(100): self.search() for word in self.get_word_list(): result_list = self.result_dict[word] self.logger.info("{}>>\n{}".format( word, "\n".join([result for result in result_list]) )) self.logger.info("Search 100 times by word: cost {} s!".format(time.time() - time_start)) # search similar words by vec self.result_dict.clear() time_start = time.time() for i in range(100): self.vec_search() for word in self.get_word_list(): result_list = self.result_dict[word] self.logger.info("{}>>\n{}".format( word, "\n".join([result for result in result_list]) )) self.logger.info("Search 100 times by vec: cost {} s!".format(time.time() - time_start)) def init(self): raise NotImplementedError def search(self): raise NotImplementedError def vec_search(self, ): raise NotImplementedError def save_result_dict(self, word: str, result: str): if word not in self.result_dict: self.result_dict[word] = [result] else: result_list = self.result_dict[word] if result not in result_list: self.result_dict[word].append(result) def load_pre_trained_model(self, ): """ 返回预训练好的模型 """ gensim_model = gensim.models.KeyedVectors.load_word2vec_format(self.word_vec_model_file, binary=False) self.dimension = gensim_model.vector_size return gensim_model class GensimBenchmark(BasicBenchmark): """ Gensim """ def __init__(self): super(GensimBenchmark, self).__init__() self._model = None def init(self): self._model = self.load_pre_trained_model() for word in self.get_word_list(): self._word_vec_dict[word] = self._model.get_vector(word) def search(self): for word in self.get_word_list(): result = ", ".join([item[0] for item in self._model.similar_by_word(word, topn=self.similar_top_n)]) self.save_result_dict(word, result) def vec_search(self): """ 直接使用词向量搜索 """ for word in self.get_word_list(): word_vec = self._word_vec_dict[word] result = ", ".join( [item[0] for item in self._model.similar_by_word(word_vec, topn=self.similar_top_n + 1)[1:]] ) self.save_result_dict(word, result) class FaissBenchmark(BasicBenchmark): """ Faiss """ def __init__(self): super(FaissBenchmark, self).__init__() self._model = None self._word_detail_info = None self.faiss_index_file = "./faiss.index" self.faiss_index_detail_pkl = "./faiss.pkl" def prepare(self): """ 将Gensim 版本的模型转化为Faiss模型 """ super(FaissBenchmark, self).prepare() # turn model from gensim to faiss index if os.path.exists(self.faiss_index_file) and os.path.exists(self.faiss_index_detail_pkl): return # load model to dict self.logger.info("loading model...") time_start = time.time() gensim_model = self.load_pre_trained_model() model_size = len(gensim_model.vocab) self.dimension = gensim_model.vector_size feature = np.zeros(shape=(model_size, self.dimension), dtype=np.float32) word_list = [word for word in gensim_model.vocab] for i, word in enumerate(word_list): feature[i] = gensim_model.get_vector(word) # not normed self.logger.info("success to load index! Cost {} seconds!".format(time.time() - time_start)) # train faiss index index_factory = "Flat" normed_feature = feature / np.linalg.norm(feature, axis=1, keepdims=True) faiss_index = faiss.index_factory(self.dimension, index_factory) self.logger.info("training index...") time_start = time.time() faiss_index.train(normed_feature) # nb * d faiss_index.add(normed_feature) self.logger.info("success to train index! Cost {} seconds!".format(time.time() - time_start)) # save in file faiss.write_index(faiss_index, self.faiss_index_file) with open(self.faiss_index_detail_pkl, "wb") as f: pickle.dump((word_list, feature), f) def init(self): """ load model """ self._model = faiss.read_index(self.faiss_index_file) with open(self.faiss_index_detail_pkl, "rb") as f: self._word_detail_info = pickle.load(f) self.dimension = self._word_detail_info[1].shape[-1] for word in self.get_word_list(): self._word_vec_dict[word] = self._word_detail_info[1][self._word_detail_info[0].index(word)] def _search_by_vec(self, feature_list, ): """ 向量搜索 """ normed_feature_list = feature_list / np.linalg.norm(feature_list, axis=1, keepdims=True) length = normed_feature_list.shape[0] distance_list, indices = self._model.search(normed_feature_list, self.similar_top_n + 1) distance_list = distance_list.reshape((length, self.similar_top_n + 1)) indices = indices.reshape((length, self.similar_top_n + 1)) return distance_list, indices def search(self): """ search similar words """ # 获取查询词向量 word_list = self.get_word_list() word_feature_list = np.zeros(shape=(len(word_list), self.dimension), dtype=np.float32) for i, word in enumerate(word_list): word_feature_list[i] = self._word_detail_info[1][self._word_detail_info[0].index(word)] # search _, indices_arr = self._search_by_vec(word_feature_list) # show result for i, word in enumerate(word_list): result = ", ".join([self._word_detail_info[0][word_index] for word_index in indices_arr[i][1:]]) self.save_result_dict(word, result) def vec_search(self): """ 直接使用词向量搜索 """ # 获取查询词向量 word_list = self.get_word_list() word_feature_list = np.zeros(shape=(len(word_list), self.dimension), dtype=np.float32) for i, word in enumerate(word_list): word_feature_list[i] = self._word_vec_dict[word] # search _, indices_arr = self._search_by_vec(word_feature_list) # show result for i, word in enumerate(word_list): result = ", ".join([self._word_detail_info[0][word_index] for word_index in indices_arr[i][1:]]) self.save_result_dict(word, result) if __name__ == '__main__': # global logger global_init_logger() # benchmark for method_cls in [FaissBenchmark, GensimBenchmark, ]: method_cls().run()
the-stack_0_4314
# Imports from datetime import timedelta from typing import List, Tuple import hypothesis.strategies as st import numpy as np import numpy.testing as npt import pandas as pd import pyarrow as pa import pytest from hypothesis import given, settings from fletcher._algorithms import ( _extract_data_buffer_as_np_array, _merge_valid_bitmaps, max_op, min_op, np_ufunc_op, prod_op, sum_op, ) from fletcher.algorithms.utils.chunking import ( _calculate_chunk_offsets, _combined_in_chunk_offsets, _in_chunk_offsets, ) def _is_na(a): return (a is pa.NA) or (a is None) or (np.isnan(a)) def assert_allclose_na(a, b): """assert_allclose with a broader NA/nan/None definition.""" if _is_na(a) and _is_na(b): pass else: npt.assert_allclose(a, b) @pytest.mark.parametrize( "op, pandas_op", [(sum_op, pd.Series.sum), (prod_op, pd.Series.prod)] ) @settings(deadline=timedelta(milliseconds=1000)) @given( data=st.lists(st.one_of(st.floats(max_value=10.0, min_value=-10), st.none())), skipna=st.booleans(), ) def test_reduce_op(data, skipna, op, pandas_op): arrow = pa.array(data, type=pa.float64(), from_pandas=True) pandas = pd.Series(data, dtype=float) assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) # Split in the middle and check whether this still works if len(data) > 2: arrow = pa.chunked_array( [ pa.array(data[: len(data) // 2], type=pa.float64(), from_pandas=True), pa.array(data[len(data) // 2 :], type=pa.float64(), from_pandas=True), ] ) assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) @pytest.mark.parametrize( "op, pandas_op", [(min_op, pd.Series.min), (max_op, pd.Series.max)] ) @settings(deadline=timedelta(milliseconds=1000)) @given( data=st.lists(st.one_of(st.floats(max_value=10.0), st.none())), skipna=st.booleans() ) def test_reduce_op_no_identity(data, skipna, op, pandas_op): arrow = pa.array(data, type=pa.float64(), from_pandas=True) pandas = pd.Series(data, dtype=float) should_raise = arrow.null_count == len(arrow) and (skipna or len(arrow) == 0) if should_raise: with pytest.raises(ValueError): assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) else: assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) # Split in the middle and check whether this still works if len(data) > 2: arrow = pa.chunked_array( [ pa.array(data[: len(data) // 2], type=pa.float64(), from_pandas=True), pa.array(data[len(data) // 2 :], type=pa.float64(), from_pandas=True), ] ) if should_raise: with pytest.raises(ValueError): assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) else: assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna)) def test_calculate_chunk_offsets(): arr = pa.chunked_array([[1, 1, 1]]) npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0])) arr = pa.chunked_array([[1], [1, 1]]) npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 1])) arr = pa.chunked_array([[1, 1], [1]]) npt.assert_array_equal(_calculate_chunk_offsets(arr), np.array([0, 2])) def check_valid_in_offsets( arr: pa.ChunkedArray, in_offsets: List[Tuple[int, int, int]] ) -> None: if arr.num_chunks == 0: assert in_offsets == [] return # We always start at the beginning assert in_offsets[0][0] == 0 assert in_offsets[0][1] == 0 # Overall, the chunk offsets must have the same length as the array assert sum(x[2] for x in in_offsets) == len(arr) @given(data=st.lists(st.lists(st.integers(min_value=0, max_value=10)))) def test_in_chunk_offsets(data: List[List[int]]): arr = pa.chunked_array(data, type=pa.int64()) # Simple case: Passing in the actual chunk offsets should yield a valid selection offsets = list(_calculate_chunk_offsets(arr)) in_offsets = _in_chunk_offsets(arr, offsets) check_valid_in_offsets(arr, in_offsets) def test_combined_in_chunk_offsets(): a = pa.chunked_array([[]]) b = pa.chunked_array([[]]) in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b) assert in_a_offsets == [(0, 0, 0)] assert in_b_offsets == [(0, 0, 0)] a = pa.chunked_array([[1]]) b = pa.chunked_array([[2]]) in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b) assert in_a_offsets == [(0, 0, 1)] assert in_b_offsets == [(0, 0, 1)] a = pa.chunked_array([[1, 2], [3, 4, 5]]) b = pa.chunked_array([[1], [2, 3], [4, 5]]) in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b) assert in_a_offsets == [(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 2)] assert in_b_offsets == [(0, 0, 1), (1, 0, 1), (1, 1, 1), (2, 0, 2)] @pytest.mark.parametrize("data", [[1, 2, 4, 5], [1.0, 0.5, 4.0, 5.0]]) def test_extract_data_buffer_as_np_array(data): arr = pa.array(data) result = _extract_data_buffer_as_np_array(arr) expected = np.array(data) npt.assert_array_equal(result, expected) result = _extract_data_buffer_as_np_array(arr[2:4]) expected = np.array(data[2:4]) npt.assert_array_equal(result, expected) def assert_content_equals_array(result, expected): """Assert that the result is an Arrow structure and the content matches an array.""" assert isinstance(result, (pa.Array, pa.ChunkedArray)) if isinstance(result, pa.ChunkedArray): result = pa.concat_arrays(result.iterchunks()) assert result.equals(expected) def check_np_ufunc(a, b, expected): result = np_ufunc_op(a, b, np.ndarray.__add__) assert_content_equals_array(result, expected) result = np_ufunc_op(b, a, np.ndarray.__add__) assert_content_equals_array(result, expected) def test_np_ufunc_op_chunked_chunked(): a = pa.chunked_array([[1, 2], [3, None, None]]) b = pa.chunked_array([[1], [2, 3], [4, None]]) expected = pa.array([2, 4, 6, None, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_chunked_flat(): a = pa.chunked_array([[1, 2], [3, None, None]]) b = pa.array([1, 2, 3, 4, None]) expected = pa.array([2, 4, 6, None, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_chunked_np_array(): a = pa.chunked_array([[1, 2], [3, None]]) b = np.array([1, 2, 3, 4]) expected = pa.array([2, 4, 6, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_chunked_scalar(): a = pa.chunked_array([[1, 2], [3, None]]) b = 4 expected = pa.array([5, 6, 7, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_flat_flat(): a = pa.array([1, 2, 3, None, None]) b = pa.array([1, 2, 3, 4, None]) expected = pa.array([2, 4, 6, None, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_flat_np_array(): a = pa.array([1, 2, 3, None]) b = np.array([1, 2, 3, 4]) expected = pa.array([2, 4, 6, None]) check_np_ufunc(a, b, expected) def test_np_ufunc_op_flat_scalar(): a = pa.array([1, 2, 3, None]) b = 4 expected = pa.array([5, 6, 7, None]) check_np_ufunc(a, b, expected) def test_merge_valid_bitmaps(): a = pa.array([1, 1, 1, 1, 1, 1, 1, 1, 1]) b = pa.array([1, 1, 1, None, None, None, 1, 1, 1]) expected = np.array([0xFF, 0x1], dtype=np.uint8) result = _merge_valid_bitmaps(a, a) npt.assert_array_equal(result, expected) expected = np.array([0xC7, 0x1], dtype=np.uint8) result = _merge_valid_bitmaps(a, b) npt.assert_array_equal(result, expected) expected = np.array([0x1], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(8, 1), a.slice(8, 1)) npt.assert_array_equal(result, expected) expected = np.array([0xF], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(0, 4), a.slice(0, 4)) npt.assert_array_equal(result, expected) expected = np.array([0x7], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(0, 4), b.slice(0, 4)) npt.assert_array_equal(result, expected) expected = np.array([0xF], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 4), a.slice(5, 4)) npt.assert_array_equal(result, expected) expected = np.array([0xE], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 4), b.slice(5, 4)) npt.assert_array_equal(result, expected) expected = np.array([0x3], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(5, 2)) npt.assert_array_equal(result, expected) expected = np.array([0x2], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(5, 2)) npt.assert_array_equal(result, expected) expected = np.array([0x3], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 2), a.slice(3, 2)) npt.assert_array_equal(result, expected) expected = np.array([0x0], dtype=np.uint8) result = _merge_valid_bitmaps(a.slice(5, 2), b.slice(3, 2)) npt.assert_array_equal(result, expected)
the-stack_0_4317
from __future__ import print_function import gdbremote_testcase from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase): mydir = TestBase.compute_mydir(__file__) AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read" @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet def has_auxv_support(self): inferior_args = ["message:main entered", "sleep:5"] procs = self.prep_debug_monitor_and_inferior( inferior_args=inferior_args) # Don't do anything until we match the launched inferior main entry output. # Then immediately interrupt the process. # This prevents auxv data being asked for before it's ready and leaves # us in a stopped state. self.test_sequence.add_log_lines([ # Start the inferior... "read packet: $c#63", # ... match output.... {"type": "output_match", "regex": self.maybe_strict_output_regex( r"message:main entered\r\n")}, ], True) # ... then interrupt. self.add_interrupt_packets() self.add_qSupported_packets() context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) features = self.parse_qSupported_response(context) return self.AUXV_SUPPORT_FEATURE_NAME in features and features[ self.AUXV_SUPPORT_FEATURE_NAME] == "+" def get_raw_auxv_data(self): # Start up llgs and inferior, and check for auxv support. if not self.has_auxv_support(): self.skipTest("auxv data not supported") # Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target. # Auxv is specified in terms of pairs of unsigned longs. self.reset_test_sequence() self.add_process_info_collection_packets() context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) proc_info = self.parse_process_info_response(context) self.assertIsNotNone(proc_info) self.assertTrue("ptrsize" in proc_info) word_size = int(proc_info["ptrsize"]) OFFSET = 0 LENGTH = 0x400 # Grab the auxv data. self.reset_test_sequence() self.test_sequence.add_log_lines( [ "read packet: $qXfer:auxv:read::{:x},{:x}:#00".format( OFFSET, LENGTH), { "direction": "send", "regex": re.compile( r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE | re.DOTALL), "capture": { 1: "response_type", 2: "content_raw"}}], True) context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) # Ensure we end up with all auxv data in one packet. # FIXME don't assume it all comes back in one packet. self.assertEqual(context.get("response_type"), "l") # Decode binary data. content_raw = context.get("content_raw") self.assertIsNotNone(content_raw) return (word_size, self.decode_gdbremote_binary(content_raw)) def supports_auxv(self): # When non-auxv platforms support llgs, skip the test on platforms # that don't support auxv. self.assertTrue(self.has_auxv_support()) # # We skip the "supports_auxv" test on debugserver. The rest of the tests # appropriately skip the auxv tests if the support flag is not present # in the qSupported response, so the debugserver test bits are still there # in case debugserver code one day does have auxv support and thus those # tests don't get skipped. # @skipIfWindows # no auxv support. @llgs_test def test_supports_auxv_llgs(self): self.init_llgs_test() self.build() self.set_inferior_startup_launch() self.supports_auxv() def auxv_data_is_correct_size(self): (word_size, auxv_data) = self.get_raw_auxv_data() self.assertIsNotNone(auxv_data) # Ensure auxv data is a multiple of 2*word_size (there should be two # unsigned long fields per auxv entry). self.assertEqual(len(auxv_data) % (2 * word_size), 0) # print("auxv contains {} entries".format(len(auxv_data) / (2*word_size))) @debugserver_test def test_auxv_data_is_correct_size_debugserver(self): self.init_debugserver_test() self.build() self.set_inferior_startup_launch() self.auxv_data_is_correct_size() @skipIfWindows @expectedFailureNetBSD @llgs_test def test_auxv_data_is_correct_size_llgs(self): self.init_llgs_test() self.build() self.set_inferior_startup_launch() self.auxv_data_is_correct_size() def auxv_keys_look_valid(self): (word_size, auxv_data) = self.get_raw_auxv_data() self.assertIsNotNone(auxv_data) # Grab endian. self.reset_test_sequence() self.add_process_info_collection_packets() context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) process_info = self.parse_process_info_response(context) self.assertIsNotNone(process_info) endian = process_info.get("endian") self.assertIsNotNone(endian) auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) self.assertIsNotNone(auxv_dict) # Verify keys look reasonable. for auxv_key in auxv_dict: self.assertTrue(auxv_key >= 1) self.assertTrue(auxv_key <= 1000) # print("auxv dict: {}".format(auxv_dict)) @debugserver_test def test_auxv_keys_look_valid_debugserver(self): self.init_debugserver_test() self.build() self.set_inferior_startup_launch() self.auxv_keys_look_valid() @skipIfWindows @expectedFailureNetBSD @llgs_test def test_auxv_keys_look_valid_llgs(self): self.init_llgs_test() self.build() self.set_inferior_startup_launch() self.auxv_keys_look_valid() def auxv_chunked_reads_work(self): # Verify that multiple smaller offset,length reads of auxv data # return the same data as a single larger read. # Grab the auxv data with a single large read here. (word_size, auxv_data) = self.get_raw_auxv_data() self.assertIsNotNone(auxv_data) # Grab endian. self.reset_test_sequence() self.add_process_info_collection_packets() context = self.expect_gdbremote_sequence() self.assertIsNotNone(context) process_info = self.parse_process_info_response(context) self.assertIsNotNone(process_info) endian = process_info.get("endian") self.assertIsNotNone(endian) auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) self.assertIsNotNone(auxv_dict) iterated_auxv_data = self.read_binary_data_in_chunks( "qXfer:auxv:read::", 2 * word_size) self.assertIsNotNone(iterated_auxv_data) auxv_dict_iterated = self.build_auxv_dict( endian, word_size, iterated_auxv_data) self.assertIsNotNone(auxv_dict_iterated) # Verify both types of data collection returned same content. self.assertEqual(auxv_dict_iterated, auxv_dict) @debugserver_test def test_auxv_chunked_reads_work_debugserver(self): self.init_debugserver_test() self.build() self.set_inferior_startup_launch() self.auxv_chunked_reads_work() @skipIfWindows @expectedFailureNetBSD @llgs_test def test_auxv_chunked_reads_work_llgs(self): self.init_llgs_test() self.build() self.set_inferior_startup_launch() self.auxv_chunked_reads_work()
the-stack_0_4322
# -*- coding: utf-8 -*- """This file contains a parser for the Google Drive snapshots. The Google Drive snapshots are stored in SQLite database files named snapshot.db. """ from __future__ import unicode_literals from dfdatetime import posix_time as dfdatetime_posix_time from plaso.containers import events from plaso.containers import time_events from plaso.lib import definitions from plaso.parsers import sqlite from plaso.parsers.sqlite_plugins import interface class GoogleDriveSnapshotCloudEntryEventData(events.EventData): """Google Drive snapshot cloud entry event data. Attributes: doc_type (int): document type. path (str): path of the file. shared (bool): True if the file is shared, False if the file is private. size (int): size of the file. url (str): URL of the file. """ DATA_TYPE = 'gdrive:snapshot:cloud_entry' def __init__(self): """Initializes event data.""" super(GoogleDriveSnapshotCloudEntryEventData, self).__init__( data_type=self.DATA_TYPE) self.document_type = None self.path = None self.shared = None self.size = None self.url = None class GoogleDriveSnapshotLocalEntryEventData(events.EventData): """Google Drive snapshot local entry event data. Attributes: path (str): path of the file. size (int): size of the file. """ DATA_TYPE = 'gdrive:snapshot:local_entry' def __init__(self): """Initializes event data.""" super(GoogleDriveSnapshotLocalEntryEventData, self).__init__( data_type=self.DATA_TYPE) self.path = None self.size = None class GoogleDrivePlugin(interface.SQLitePlugin): """SQLite plugin for Google Drive snapshot.db files.""" NAME = 'google_drive' DESCRIPTION = 'Parser for Google Drive SQLite database files.' # Define the needed queries. QUERIES = [ (('SELECT cloud_entry.resource_id, cloud_entry.filename, ' 'cloud_entry.modified, cloud_entry.created, cloud_entry.size, ' 'cloud_entry.doc_type, cloud_entry.shared, cloud_entry.checksum, ' 'cloud_entry.url, cloud_relations.parent_resource_id ' 'FROM cloud_entry, cloud_relations ' 'WHERE cloud_relations.child_resource_id = cloud_entry.resource_id ' 'AND cloud_entry.modified IS NOT NULL;'), 'ParseCloudEntryRow'), (('SELECT inode_number, filename, modified, checksum, size ' 'FROM local_entry WHERE modified IS NOT NULL;'), 'ParseLocalEntryRow')] # The required tables. REQUIRED_TABLES = frozenset([ 'cloud_entry', 'cloud_relations', 'local_entry', 'local_relations', 'mapping', 'overlay_status']) SCHEMAS = [{ 'cloud_entry': ( 'CREATE TABLE cloud_entry (resource_id TEXT, filename TEXT, ' 'modified INTEGER, created INTEGER, acl_role INTEGER, doc_type ' 'INTEGER, removed INTEGER, url TEXT, size INTEGER, checksum TEXT, ' 'shared INTEGER, PRIMARY KEY (resource_id))'), 'cloud_relations': ( 'CREATE TABLE cloud_relations (child_resource_id TEXT, ' 'parent_resource_id TEXT, UNIQUE (child_resource_id, ' 'parent_resource_id), FOREIGN KEY (child_resource_id) REFERENCES ' 'cloud_entry(resource_id), FOREIGN KEY (parent_resource_id) ' 'REFERENCES cloud_entry(resource_id))'), 'local_entry': ( 'CREATE TABLE local_entry (inode_number INTEGER, filename TEXT, ' 'modified INTEGER, checksum TEXT, size INTEGER, PRIMARY KEY ' '(inode_number))'), 'local_relations': ( 'CREATE TABLE local_relations (child_inode_number INTEGER, ' 'parent_inode_number INTEGER, UNIQUE (child_inode_number), FOREIGN ' 'KEY (parent_inode_number) REFERENCES local_entry(inode_number), ' 'FOREIGN KEY (child_inode_number) REFERENCES ' 'local_entry(inode_number))'), 'mapping': ( 'CREATE TABLE mapping (inode_number INTEGER, resource_id TEXT, ' 'UNIQUE (inode_number), FOREIGN KEY (inode_number) REFERENCES ' 'local_entry(inode_number), FOREIGN KEY (resource_id) REFERENCES ' 'cloud_entry(resource_id))'), 'overlay_status': ( 'CREATE TABLE overlay_status (path TEXT, overlay_status INTEGER, ' 'PRIMARY KEY (path))')}] # Queries used to build cache. LOCAL_PATH_CACHE_QUERY = ( 'SELECT local_relations.child_inode_number, ' 'local_relations.parent_inode_number, local_entry.filename ' 'FROM local_relations, local_entry ' 'WHERE local_relations.child_inode_number = local_entry.inode_number') CLOUD_PATH_CACHE_QUERY = ( 'SELECT cloud_entry.filename, cloud_entry.resource_id, ' 'cloud_relations.parent_resource_id AS parent ' 'FROM cloud_entry, cloud_relations ' 'WHERE cloud_entry.doc_type = 0 ' 'AND cloud_entry.resource_id = cloud_relations.child_resource_id') def GetLocalPath(self, inode, cache, database): """Return local path for a given inode. Args: inode: The inode number for the file. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: A full path, including the filename of the given inode value. """ local_path = cache.GetResults('local_path') if not local_path: results = database.Query(self.LOCAL_PATH_CACHE_QUERY) cache.CacheQueryResults( results, 'local_path', 'child_inode_number', ('parent_inode_number', 'filename')) local_path = cache.GetResults('local_path') parent, path = local_path.get(inode, [None, None]) # TODO: Read the local_sync_root from the sync_config.db and use that # for a root value. root_value = '%local_sync_root%/' if not path: return root_value paths = [] while path: paths.append(path) parent, path = local_path.get(parent, [None, None]) if not paths: return root_value # Paths are built top level to root so we need to reverse the list to # represent them in the traditional order. paths.reverse() return root_value + '/'.join(paths) def GetCloudPath(self, resource_id, cache, database): """Return cloud path given a resource id. Args: resource_id: The resource_id for the file. cache: The local cache object. database: A database object (instance of SQLiteDatabase). Returns: A full path to the resource value. """ cloud_path = cache.GetResults('cloud_path') if not cloud_path: results = database.Query(self.CLOUD_PATH_CACHE_QUERY) cache.CacheQueryResults( results, 'cloud_path', 'resource_id', ('filename', 'parent')) cloud_path = cache.GetResults('cloud_path') if resource_id == 'folder:root': return '/' paths = [] parent_path, parent_id = cloud_path.get(resource_id, ['', '']) while parent_path: if parent_path == 'folder:root': break paths.append(parent_path) parent_path, parent_id = cloud_path.get(parent_id, ['', '']) if not paths: return '/' # Paths are built top level to root so we need to reverse the list to # represent them in the traditional order. paths.reverse() return '/{0:s}/'.format('/'.join(paths)) def ParseCloudEntryRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a cloud entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (SQLiteCache): cache. database (SQLiteDatabase): database. """ query_hash = hash(query) parent_resource_id = self._GetRowValue( query_hash, row, 'parent_resource_id') filename = self._GetRowValue(query_hash, row, 'filename') cloud_path = self.GetCloudPath(parent_resource_id, cache, database) cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename) event_data = GoogleDriveSnapshotCloudEntryEventData() event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type') event_data.path = cloud_filename event_data.query = query event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared')) event_data.size = self._GetRowValue(query_hash, row, 'size') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'created') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) def ParseLocalEntryRow( self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): """Parses a local entry row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (Optional[SQLiteCache]): cache. database (Optional[SQLiteDatabase]): database. """ query_hash = hash(query) inode_number = self._GetRowValue(query_hash, row, 'inode_number') local_path = self.GetLocalPath(inode_number, cache, database) event_data = GoogleDriveSnapshotLocalEntryEventData() event_data.path = local_path event_data.query = query event_data.size = self._GetRowValue(query_hash, row, 'size') timestamp = self._GetRowValue(query_hash, row, 'modified') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) sqlite.SQLiteParser.RegisterPlugin(GoogleDrivePlugin)
the-stack_0_4323
import os import pytest from conda_build import api from .utils import fail_dir, metadata_dir @pytest.mark.parametrize("pkg_format,pkg_ext", [(None, ".tar.bz2"), ("2", ".conda")]) def test_conda_pkg_format( pkg_format, pkg_ext, testing_config, testing_workdir, monkeypatch, capfd ): """Conda package format "2" builds .conda packages.""" # Build the "entry_points" recipe, which contains a test pass for package. recipe = os.path.join(metadata_dir, "entry_points") # These variables are defined solely for testing purposes, # so they can be checked within build scripts testing_config.activate = True testing_config.conda_pkg_format = pkg_format monkeypatch.setenv("CONDA_TEST_VAR", "conda_test") monkeypatch.setenv("CONDA_TEST_VAR_2", "conda_test_2") output_file, = api.get_output_file_paths(recipe, config=testing_config) assert output_file.endswith(pkg_ext) api.build(recipe, config=testing_config) assert os.path.exists(output_file) out, err = capfd.readouterr() # Verify that test pass ran through api assert "Manual entry point" in out assert "TEST END: %s" % output_file in out
the-stack_0_4324
from openpyxl import load_workbook from docx import Document from docx.oxml.ns import qn import os # 设置文档字体 def set_font(document): document.styles['Normal'].font.name = u'宋体' document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体') def get_ws(file_path): # 读取excel xlsx文件 wb = load_workbook(file_path) # 打开现有工作表 ws = wb.active # 默认对第一张工作表进行操作 return ws def get_title(ws): title = [] for col_index in range(ws.max_column): title.append(ws.cell(row=1, column=col_index+1).value) return title def print_title(title): print("表头字段如下:") for t in title: print(t,end=" ") print() def enter_choice(): optional = '是否YyNn' sure = '是Yy' while True: try: choice = input("是否采用与字段内容无关的数值递增的文件命名方式?(是/否)(y/n):\n") if choice not in optional: # 如果输入不在可选字符范围 raise ValueError("需输入'是'、'否'、'y'、'n'中的一个字符") break except Exception as err: print("输入不符合要求:{}\n请重新输入".format(repr(err))) if choice in sure: return True else: return False def enter_name_rules(title): while True: try: print("请输入命名字段") name_title = input() if name_title not in title: raise ValueError("请原样输入表头中的一个字段") name_rules = title.index(name_title) break except Exception as err: print(err) return name_rules # 转换成word表格 def excel_to_table(ws,name_rules,default_value,save_dir='ExcelToWordResult'): ''' :param ws: load_workbook处理后的工作簿对象 :param name_rules: 命名规则 :param default_value: 命名缺省值 :return: ''' # 获取行列数 if not os.path.exists(save_dir): os.makedirs(save_dir) row_num = ws.max_row column_num = ws.max_column # for row in ws.rows: # ws.rows是一个存储每行ceil的元组 # for ceil in row: # print(ceil.value) # 写入word文件 for row_index in range(1,row_num): # 跳过表头,写入每个记录 # 创建word文档 document = Document() # 设置文档字体 set_font(document) # 在word文档中添加表格 tbobj = document.add_table(rows=2, cols=column_num, style="Table Grid") # 添加表头以及记录 for col_index in range(column_num): tbobj.cell(0, col_index).text = str(ws.cell(row=1, column=col_index+1).value) # 添加表头 tbobj.cell(1, col_index).text = str(ws.cell(row=row_index+1, column=col_index+1).value) # 添加记录 if name_rules == default_value: # 如果采用默认命名(数字递增) filename = str(row_index) + '.docx' # 文件名 else: filename = str(ws.cell(row=row_index+1, column=name_rules+1).value) + '.docx' # 保存文件 save_path = save_dir + '\\' + filename try: # 涉及文件IO,进行异常处理 document.save(save_path) # 输出文件存储路径的提示信息 current_path = os.getcwd() # 获得当前路径 print("当前路径是{}".format(current_path)) print("{} 存储成功".format(save_path)) except Exception as err: print(err) print("文件存储失败") def main(file_path): ws = get_ws(file_path) # 获取工作簿对象 title = get_title(ws) # 获取其表头字段 print_title(title) choice = enter_choice() # 由用户指定是否采用数值递增命名 default_value = -1 # 命名方式缺省值 if choice: name_rules = default_value else: name_rules = enter_name_rules(title) excel_to_table(ws, name_rules, default_value) # 以表格形式写入批量word if __name__ == "__main__": file_path = '学生信息表.xlsx' main(file_path)
the-stack_0_4325
import datetime import inspect import json import logging import logging.config import os import pathlib from types import ModuleType from typing import Any, Callable, ContextManager, List, Optional, Union import dotenv import orjson # type: ignore import sentry_sdk import structlog import platform import tempfile from structlog_sentry_logger import structlog_sentry ROOT_DIR = pathlib.Path("/tmp" if platform.system() == "Darwin" else tempfile.gettempdir()) LOG_DATA_DIR = ROOT_DIR / ".logs" LOG_DATA_DIR.mkdir(exist_ok=True) DATETIME_FORMAT = "iso" _CONFIGS = {"USE_ORJSON": True} def _toggle_json_library(use_orjson: bool = True) -> None: _CONFIGS["USE_ORJSON"] = use_orjson def get_namespaced_module_name(__file__: Union[pathlib.Path, str]) -> str: fully_qualified_path = pathlib.Path(__file__).resolve() prefix_dir = str(ROOT_DIR) if str(ROOT_DIR) in str(fully_qualified_path) else "/" namespaces = fully_qualified_path.relative_to(prefix_dir).with_suffix("").parts return ".".join(namespaces) def get_caller_name(prev_stack_frame: inspect.FrameInfo) -> str: deduced_calling_module = deduce_module(prev_stack_frame) return ( deduced_calling_module.__name__ if deduced_calling_module and not is_caller_main(deduced_calling_module.__name__) else get_namespaced_module_name(prev_stack_frame.filename) ) def deduce_module(prev_stack_frame: inspect.FrameInfo) -> Optional[ModuleType]: return inspect.getmodule(prev_stack_frame[0]) def get_caller_name_from_frames(stack_frames: List[inspect.FrameInfo]) -> str: prev_stack_frame = stack_frames[1] if __file__.endswith(".py") else stack_frames[0] return get_caller_name(prev_stack_frame) def get_logger(name: Optional[str] = None) -> Any: """ Convenience function that returns a logger Returns: A proxy that creates a correctly configured logger bound to the __name__ of the calling module """ del name stack_frames = inspect.stack() caller_name = get_caller_name_from_frames(stack_frames) if not structlog.is_configured(): timestamper = structlog.processors.TimeStamper(fmt=DATETIME_FORMAT) set_logging_config(caller_name, timestamper) set_structlog_config(timestamper) logger = structlog.get_logger(caller_name) logger.setLevel(logging.DEBUG) return logger getLogger = get_logger """ CamelCase alias for `structlog_sentry_logger.get_logger`. """ def get_config_dict() -> dict: """ Convenience function to get the local logging configuration dictionary, e.g., to help configure loggers from other libraries. Returns: The logging configuration dictionary that would be used to configure the Python logging library component of the logger """ stack_frames = inspect.stack() caller_name = get_caller_name_from_frames(stack_frames) timestamper = structlog.processors.TimeStamper(fmt=DATETIME_FORMAT) return get_logging_config(caller_name, timestamper) def is_caller_main(caller_name: str) -> bool: return caller_name == "__main__" def get_logging_config( module_name: str, timestamper: structlog.processors.TimeStamper ) -> dict: handlers = get_handlers(module_name) return { "version": 1, "disable_existing_loggers": False, "formatters": (get_formatters(timestamper)), "handlers": handlers, "loggers": { "": { "handlers": list(handlers.keys()), "level": "WARNING", "propagate": True, } }, } def set_logging_config( module_name: str, timestamper: structlog.processors.TimeStamper ) -> None: config_dict = get_logging_config(module_name, timestamper) logging.config.dictConfig(config_dict) def get_formatters(timestamper: structlog.processors.TimeStamper) -> dict: pre_chain = [ # Add the log level and a timestamp to the event_dict if the log # entry is not from structlog. structlog.stdlib.add_log_level, timestamper, structlog.stdlib.add_logger_name, ] return { "plain": { "()": structlog.stdlib.ProcessorFormatter, "processor": structlog.processors.JSONRenderer( serializer=serializer, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS, ), "foreign_pre_chain": pre_chain, }, "colored": { "()": structlog.stdlib.ProcessorFormatter, "processor": structlog.dev.ConsoleRenderer(colors=True), "format": "%(message)s [in %(funcName)s]", "foreign_pre_chain": pre_chain, }, } def serializer( *args: Any, default: Optional[Callable[[Any], Any]] = None, option: Optional[int] = orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS, ) -> str: if _CONFIGS["USE_ORJSON"]: return orjson.dumps(*args, default=default, option=option).decode() # type: ignore[misc] return json.dumps(*args, sort_keys=True) def get_handlers(module_name: str) -> dict: default_key = "default" base_handlers = { default_key: { "level": "DEBUG", "class": "logging.StreamHandler", "stream": "ext://sys.stdout", } } if _ENV_VARS_REQUIRED_BY_LIBRARY[get_handlers] in os.environ: # Prettify stdout/stderr streams base_handlers[default_key]["formatter"] = "colored" # Add filename handler file_timestamp = datetime.datetime.utcnow().isoformat().replace(":", "-") log_file_name = f"{file_timestamp}_{module_name}.jsonl" log_file_path = LOG_DATA_DIR / log_file_name base_handlers["filename"] = { "level": "DEBUG", "class": "logging.handlers.RotatingFileHandler", "filename": str(log_file_path), # 1 MB "maxBytes": 1 << 20, # type: ignore[dict-item] "backupCount": 3, # type: ignore[dict-item] "formatter": "plain", } else: base_handlers[default_key]["formatter"] = "plain" return base_handlers def set_structlog_config(timestamper: structlog.processors.TimeStamper) -> None: structlog_processors = [ timestamper, structlog.processors.StackInfoRenderer(), add_severity_field_from_level_if_in_cloud_environment, ] stdlib_log_compatibility_processors = [ structlog.stdlib.filter_by_level, structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, structlog.stdlib.PositionalArgumentsFormatter(), SentryBreadcrumbJsonProcessor(level=logging.ERROR, tag_keys="__all__"), ] # Note: MUST come last! format_wrapper_processer = [structlog.stdlib.ProcessorFormatter.wrap_for_formatter] structlog.configure( processors=( stdlib_log_compatibility_processors # type: ignore[arg-type] + structlog_processors + format_wrapper_processer # type: ignore[arg-type,operator] ), # See [Performance](https://www.structlog.org/en/stable/performance.html) # for an in-depth explanation of the below settings context_class=dict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) def add_severity_field_from_level_if_in_cloud_environment( logger: Any, # pylint: disable=unused-argument method: str, # pylint: disable=unused-argument event_dict: structlog.types.EventDict, ) -> structlog.types.EventDict: """A custom processor for structlog for Cloud Logging compatibility Since Cloud Logging infers log levels from the `severity` key, simply duplicates `level` to the `severity` field in the logger's event dictionary. """ if ( is_cloud_logging_compatibility_mode_requested() or is_probably_in_cloud_environment() ): cloud_logging_log_level_key, python_log_level_key = "severity", "level" if cloud_logging_log_level_key in event_dict: # Dogfood by instantiating a local logger with own library. # Note: NO infinite loop since the below log message does *NOT* use # `severity` as a key in the emitted event. local_logger = get_logger() local_logger.warning( "Existing log value being overwritten", src_key=python_log_level_key, dest_key=cloud_logging_log_level_key, old_value=event_dict[cloud_logging_log_level_key], new_value=event_dict[python_log_level_key], logger_name=logger.name, ) event_dict[cloud_logging_log_level_key] = event_dict[python_log_level_key] return event_dict def is_cloud_logging_compatibility_mode_requested() -> bool: return ( _ENV_VARS_REQUIRED_BY_LIBRARY[is_cloud_logging_compatibility_mode_requested] in os.environ ) def is_probably_in_cloud_environment() -> bool: """Returns True if it is *likely* (but not guaranteed) logging is occurring in the context of a Cloud Logging environment""" for env_var in [ # GKE # There are no GKE-specific environment variable that definitively imply we are # running in GKE... Falling back to detecting Kubernetes-injected environment # variables since those are the only ones present in GKE pods that *could* imply # we are running in GKE. # Kubernetes # see: https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables "KUBERNETES_SERVICE_HOST", # Cloud Function # see: https://cloud.google.com/functions/docs/configuring/env-var#runtime_environment_variables_set_automatically "GCP_PROJECT", # GAE # see: https://cloud.google.com/functions/docs/configuring/env-var#runtime_environment_variables_set_automatically "GOOGLE_CLOUD_PROJECT", ]: if env_var in os.environ: return True return False _ENV_VARS_REQUIRED_BY_LIBRARY = { get_handlers: "STRUCTLOG_SENTRY_LOGGER_LOCAL_DEVELOPMENT_LOGGING_MODE_ON", is_cloud_logging_compatibility_mode_requested: "STRUCTLOG_SENTRY_LOGGER_CLOUD_LOGGING_COMPATIBILITY_MODE_ON", sentry_sdk.init: "SENTRY_DSN", } class SentryBreadcrumbJsonProcessor(structlog_sentry.SentryJsonProcessor): """ Addresses: `SentryJsonProcessor breaks logging breadcrumbs #25`_ (source_) .. _`SentryJsonProcessor breaks logging breadcrumbs #25`: https://github.com/kiwicom/structlog-sentry/issues/25 .. _`source`: https://github.com/kiwicom/structlog-sentry/issues/25#issuecomment-660292563 """ def __init__( # pylint: disable=too-many-arguments self, breadcrumb_level: int = logging.INFO, level: int = logging.WARNING, active: bool = True, as_extra: bool = True, tag_keys: Union[List[str], str] = None, ) -> None: self.breadcrumb_level = breadcrumb_level super().__init__( level=level, active=active, as_extra=as_extra, tag_keys=tag_keys ) @staticmethod def save_breadcrumb(logger: Any, event_dict: structlog.types.EventDict) -> None: data = event_dict.copy() # type: ignore[attr-defined] data.pop("event") data.pop("logger", None) data.pop("level", None) data.pop("timestamp", None) breadcrumb = { "ty": "log", "level": event_dict["level"].lower(), "category": event_dict.get("logger") or logger.name, "message": event_dict["event"], "data": data, } sentry_sdk.add_breadcrumb(breadcrumb, hint={"event_dict": event_dict}) def __call__( self, logger: Any, method: str, event_dict: structlog.types.EventDict ) -> structlog.types.EventDict: do_breadcrumb = ( getattr(logging, event_dict["level"].upper()) >= self.breadcrumb_level ) if do_breadcrumb: self.save_breadcrumb(logger, event_dict) return super().__call__(logger=logger, method=method, event_dict=event_dict) def _load_library_specific_env_vars() -> None: # Inject into the environment ONLY the env vars required by the library; # we manually update/add to the the environment ONLY the keys in a user's `.env` for # which the library is inspecting (i.e., the set intersection between the # aforementioned), and only if they weren't already defined in the environment. users_dotenv_values = dotenv.dotenv_values(dotenv.find_dotenv()) legal_env_vars_keys = ( _ENV_VARS_REQUIRED_BY_LIBRARY.values() & users_dotenv_values.keys() ) for k in legal_env_vars_keys: v = users_dotenv_values[k] # Any env-var-to-add already defined in the environment will take precedent over # what is defined in a user's `.env` file. if k not in os.environ and v is not None: os.environ[k] = v def _init_sentry() -> ContextManager[Any]: # Note: if DSN isn't defined, will silently not transmit telemetry return sentry_sdk.init() # pylint: disable=abstract-class-instantiated
the-stack_0_4326
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PerlIntervaltree(PerlPackage): """Set::IntervalTree uses Interval Trees to store and efficiently look up ranges using a range-based lookup.""" homepage = "https://metacpan.org/release/Set-IntervalTree" url = "https://cpan.metacpan.org/authors/id/B/BE/BENBOOTH/Set-IntervalTree-0.10.tar.gz" version('0.10', '42efe9369f1b30e7fd04e10c07226b06') depends_on('perl-extutils-makemaker', type='build')
the-stack_0_4327
from machine import mem32 # import time import sys import uasyncio from i2c_responder_base import I2CResponderBase import calc_icmpv6_chksum class I2CResponder(I2CResponderBase): """Implementation of a (polled) Raspberry Pico I2C Responder. Subclass of the original I2CResponder class which has been renamed I2CReponderBase. See that class for more info. This new version I2CResponder implments a protocol which both Controller and Responder must adhere to in order to send longer messages. Created: March 30, 2022 By: D. Garrett """ VERSION = "2.0.1" def __init__(self, i2c_device_id=0, sda_gpio=0, scl_gpio=1, responder_address=0x41): """Initialize. Args: i2c_device_id (int, optional): The internal Pico I2C device to use (0 or 1). sda_gpio (int, optional): The gpio number of the pin to use for SDA. scl_gpio (int, optional): The gpio number of the pin to use for SCL. responder_address (int, required): The I2C address to assign to this Responder. """ super().__init__(i2c_device_id=i2c_device_id, sda_gpio=sda_gpio, scl_gpio=scl_gpio, responder_address=responder_address) """ Send a long message to the Controller 16 bytes at a time. First send 4 byte length of message. Then send blocks of up to 16 bytes. """ async def send_msg(self, msg): # send length of message # UTF8 may have multibyte characters buff = bytearray(msg.encode('utf8')) rem_bytes = len(buff) len_buff = bytearray(rem_bytes.to_bytes(4,sys.byteorder)) await self.send_bytes(len_buff) # print("sending: " + str(len_buff)) # send message msg_pos = 0 # if controller no longer requesting input # stop sending data while rem_bytes > 0: # and self.read_is_pending(): if rem_bytes <= 16: await self.send_bytes(buff[msg_pos:]) return await self.send_bytes(buff[msg_pos:msg_pos+16]) msg_pos += 16 rem_bytes -= 16 """ Send a block bytes of up to 16 bytes of data """ async def send_bytes(self,buffer_out): for value in buffer_out: # loop (polling) until the Controller issues an I2C READ. while not self.read_is_pending(): await uasyncio.sleep_ms(0) # stop sending if controller no longer soliciting input # if not self.read_is_pending(): # return self.put_read_data(value) """ Read a long message from the Controller. Send an acknowledgment to the Controller of if the receive was successful. If receive failed, retry up to 5 times, then send 2 telling controller it was a permanent error and don't bother to resend. If failed receive, returns an empty string, else returns the string received. """ async def rcv_msg(self): if not self.write_data_is_available(): return "" retry = 8 ok = False while not ok and retry > 0: b_array, ok = await self.rcv_block() retry = retry - 1 if retry > 0: # Controller will resend if not okay await self.send_ack(int(ok)) if not ok: """ print("receive error... ",end="") print((5-retry)) print("received: ", end="") print(b_array) """ # await uasyncio.sleep_ms(0) else: # permanent error - don't resend print("***** permanent receive error *****") await self.send_ack(2) if ok: # don't try to decode invalid receive. # may result in decode error. return b_array.decode('utf8') else: return "" """ Send a 2 byte int acknowledgement to the Controller of message received. 1 = message received ok and checksum matched 0 = message not received ok, resend 2 = message not received ok, but don't resend """ async def send_ack(self, ok): b = bytearray(ok.to_bytes(2,sys.byteorder)) await self.send_bytes(b) """ Receive a byte array data where the first two bytes of the input stream contain the msg length and the next two contain a checksum. Return a byte array of data and True/False for if the checksum matched. """ async def rcv_block(self): # read length of message and checksum data = self.get_write_data(max_size=4) n_bytes = int.from_bytes(bytes(data[0:2]),sys.byteorder) chksum = int.from_bytes(bytes(data[2:4]),sys.byteorder) """ print("rcv bytes: ",end="") print(n_bytes, end="") print(", checksum: ",end="") print(chksum) """ r = await self.rcv_bytes(n_bytes) # print("returning results") # r = bytearray(data) cs = calc_icmpv6_chksum.calc_icmpv6_chksum(r) # wait until all sent data is received # and controller issues a read for the ack while not self.read_is_pending(): if self.write_data_is_available(): self.get_write_data(max_size=16) return r, cs == chksum """ Receive bytes in blocks of 16 bytes or less until n_bytes of data received or "times out". Here, "times out" means no bytes received for 50ms. Returns a list of bytes. """ async def rcv_bytes(self, rem_bytes): data = bytearray(rem_bytes) data_offset = 0 wait_cnt = 0 empty = [] while rem_bytes > 0: if self.write_data_is_available(): b = self.get_write_data(max_size=16) else: b = empty if len(b) == 0: print("+",end="") await uasyncio.sleep_ms(5) wait_cnt = wait_cnt + 1 if wait_cnt > 50: # time out receive - exit early # print("i2c_responder.rcv_msg() tired of waiting, exiting before EOD") return data[:data_offset] else: wait_cnt = 0 r_cnt = len(b) rem_bytes = rem_bytes - r_cnt for i in range(r_cnt): data[data_offset] = b[i] data_offset = data_offset + 1 if rem_bytes > 0 and r_cnt != 16: # received a short block print("**** <16 bytes in block: ", end="") print(len(b)) return data[:data_offset] """ print("v2 rcvd '", end="") print(bytearray(b),end="") print("' blk remain: ",end="") print(rem_bytes) """ return data
the-stack_0_4328
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals from unittest import TestCase, skip from docutils.core import Publisher from docutils import io from m2rr import prolog, convert class RendererTestBase(TestCase): def conv(self, src, **kwargs): out = convert(src, **kwargs) self.check_rst(out) return out def conv_no_check(self, src, **kwargs): out = convert(src, **kwargs) return out def check_rst(self, rst): pub = Publisher(reader=None, parser=None, writer=None, settings=None, source_class=io.StringInput, destination_class=io.StringOutput) pub.set_components(reader_name='standalone', parser_name='restructuredtext', writer_name='pseudoxml') pub.process_programmatic_settings( settings_spec=None, settings_overrides={'output_encoding': 'unicode'}, config_section=None, ) pub.set_source(rst, source_path=None) pub.set_destination(destination=None, destination_path=None) output = pub.publish(enable_exit_status=False) self.assertLess(pub.document.reporter.max_level, 0) return output, pub class TestBasic(RendererTestBase): def test_fail_rst(self): with self.assertRaises(AssertionError): # This check should be failed and report warning self.check_rst('```') def test_simple_paragraph(self): src = 'this is a sentence.\n' out = self.conv(src) self.assertEqual(out, '\n' + src) def test_multiline_paragraph(self): src = '\n'.join([ 'first sentence.', 'second sentence.', ]) out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_multi_paragraph(self): src = '\n'.join([ 'first paragraph.', '', 'second paragraph.', ]) out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_hr(self): src = 'a\n\n---\n\nb' out = self.conv(src) self.assertEqual(out, '\na\n\n----\n\nb\n') def test_linebreak(self): src = 'abc def \nghi' out = self.conv(src) self.assertEqual( out, prolog + '\nabc def\\ :raw-html-m2rr:`<br>`\nghi' + '\n', ) class TestInlineMarkdown(RendererTestBase): def test_inline_code(self): src = '`a`' out = self.conv(src) self.assertEqual(out.replace('\n', ''), '``a``') def test_inline_code_with_backticks(self): src = '```a``a```' out = self.conv(src) self.assertEqual(out.strip(), '.. role:: raw-html-m2rr(raw)\n' ' :format: html\n\n\n' ':raw-html-m2rr:`<code class="docutils literal">' '<span class="pre">a&#96;&#96;a</span></code>`' ) def test_strikethrough(self): src = ('~~a~~') self.conv(src) def test_emphasis(self): src = '*a*' out = self.conv(src) self.assertEqual(out.replace('\n', ''), '*a*') def test_emphasis_(self): src = '_a_' out = self.conv(src) self.assertEqual(out.replace('\n', ''), '*a*') def test_emphasis_no_(self): src = '_a_' out = self.conv(src, no_underscore_emphasis=True) self.assertEqual(out.replace('\n', ''), '_a_') def test_double_emphasis(self): src = '**a**' out = self.conv(src) self.assertEqual(out.replace('\n', ''), '**a**') def test_double_emphasis__(self): src = '__a__' out = self.conv(src) self.assertEqual(out.replace('\n', ''), '**a**') def test_emphasis_no__(self): src = '__a__' out = self.conv(src, no_underscore_emphasis=True) self.assertEqual(out.replace('\n', ''), '__a__') def test_autolink(self): src = 'link to http://example.com/ in sentence.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_link(self): src = 'this is a [link](http://example.com/).' out = self.conv(src) self.assertEqual( out, '\nthis is a `link <http://example.com/>`_.\n') def test_anonymous_link(self): src = 'this is a [link](http://example.com/).' out = self.conv(src, anonymous_references=True) self.assertEqual( out, '\nthis is a `link <http://example.com/>`__.\n') def test_link_with_rel_link_enabled(self): src = 'this is a [link](http://example.com/).' out = self.conv_no_check( src, parse_relative_links=True ) self.assertEqual( out, '\nthis is a `link <http://example.com/>`_.\n') def test_anonymous_link_with_rel_link_enabled(self): src = 'this is a [link](http://example.com/).' out = self.conv_no_check( src, parse_relative_links=True, anonymous_references=True ) self.assertEqual( out, '\nthis is a `link <http://example.com/>`__.\n') def test_anchor(self): src = 'this is an [anchor](#anchor).' out = self.conv_no_check( src, parse_relative_links=True ) self.assertEqual( out, '\nthis is an :ref:`anchor <anchor>`.\n') def test_relative_link(self): src = 'this is a [relative link](a_file.md).' out = self.conv_no_check( src, parse_relative_links=True ) self.assertEqual( out, '\nthis is a :doc:`relative link <a_file>`.\n') def test_relative_link_with_anchor(self): src = 'this is a [relative link](a_file.md#anchor).' out = self.conv_no_check( src, parse_relative_links=True ) self.assertEqual( out, '\nthis is a :doc:`relative link <a_file>`.\n') def test_link_title(self): src = 'this is a [link](http://example.com/ "example").' out = self.conv(src) self.assertEqual( out, '.. role:: raw-html-m2rr(raw)\n' ' :format: html\n\n\n' 'this is a :raw-html-m2rr:' '`<a href="http://example.com/" title="example">link</a>`.\n' ) def test_image_link(self): src = '[![Alt Text](image_taget_url)](link_target_url)' out = self.conv(src) self.assertEqual( out, '\n\n.. image:: image_taget_url\n' ' :target: link_target_url\n :alt: Alt Text\n\n', ) def test_rest_role(self): src = 'a :code:`some code` inline.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_rest_role2(self): src = 'a `some code`:code: inline.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_rest_link(self): src = 'a `RefLink <http://example.com>`_ here.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_rest_link_and_role(self): src = 'a :code:`a` and `RefLink <http://example.com>`_ here.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_rest_link_and_role2(self): src = 'a `a`:code: and `RefLink <http://example.com>`_ here.' out = self.conv(src) self.assertEqual(out, '\n' + src + '\n') def test_rest_role_incomplete(self): src = 'a co:`de` and `RefLink <http://example.com>`_ here.' out = self.conv(src) self.assertEqual( out, '\na co:\\ ``de`` and `RefLink <http://example.com>`_ here.\n', ) def test_rest_role_incomplete2(self): src = 'a `RefLink <http://example.com>`_ and co:`de` here.' out = self.conv(src) self.assertEqual( out, '\na `RefLink <http://example.com>`_ and co:\\ ``de`` here.\n', ) def test_rest_role_with_code(self): src = 'a `code` and :code:`rest` here.' out = self.conv(src) self.assertEqual(out, '\na ``code`` and :code:`rest` here.\n') def test_rest2_role_with_code(self): src = 'a `code` and `rest`:code: here.' out = self.conv(src) self.assertEqual(out, '\na ``code`` and `rest`:code: here.\n') def test_code_with_rest_role(self): src = 'a :code:`rest` and `code` here.' out = self.conv(src) self.assertEqual(out, '\na :code:`rest` and ``code`` here.\n') def test_code_with_rest_role2(self): src = 'a `rest`:code: and `code` here.' out = self.conv(src) self.assertEqual(out, '\na `rest`:code: and ``code`` here.\n') def test_rest_link_with_code(self): src = 'a `RefLink <a>`_ and `code` here.' out = self.conv(src) self.assertEqual(out, '\na `RefLink <a>`_ and ``code`` here.\n') def test_code_with_rest_link(self): src = 'a `code` and `RefLink <a>`_ here.' out = self.conv(src) self.assertEqual(out, '\na ``code`` and `RefLink <a>`_ here.\n') def test_inline_math(self): src = 'this is `$E = mc^2$` inline math.' out = self.conv(src) self.assertEqual(out, '\nthis is :math:`E = mc^2` inline math.\n') def test_disable_inline_math(self): src = 'this is `$E = mc^2$` inline math.' out = self.conv(src, disable_inline_math=True) self.assertEqual(out, '\nthis is ``$E = mc^2$`` inline math.\n') def test_inline_html(self): src = 'this is <s>html</s>.' out = self.conv(src) self.assertEqual( out, prolog + '\nthis is :raw-html-m2rr:`<s>html</s>`.\n') def test_block_html(self): src = '<h1>title</h1>' out = self.conv(src) self.assertEqual(out, '\n\n.. raw:: html\n\n <h1>title</h1>\n\n') class TestBlockQuote(RendererTestBase): def test_block_quote(self): src = '> q1\n> q2' out = self.conv(src) self.assertEqual(out, '\n..\n\n q1\n q2\n\n') def test_block_quote_nested(self): src = '> q1\n> > q2' out = self.conv(src) # one extra empty line is inserted, but still valid rst anyway self.assertEqual(out, '\n..\n\n q1\n\n ..\n\n q2\n\n') @skip('markdown does not support dedent in block quote') def test_block_quote_nested_2(self): src = '> q1\n> > q2\n> q3' out = self.conv(src) self.assertEqual(out, '\n..\n\n q1\n\n ..\n q2\n\n q3\n\n') class TestCodeBlock(RendererTestBase): def test_plain_code_block(self): src = '\n'.join([ '```', 'pip install sphinx', '```', ]) out = self.conv(src) # Default to text block if non specified self.assertEqual(out, '\n.. code-block:: text\n\n\ pip install sphinx\n') def test_plain_code_block_tilda(self): src = '\n'.join([ '~~~', 'pip install sphinx', '~~~', ]) out = self.conv(src) self.assertEqual(out, '\n.. code-block:: text\n\n pip install \ sphinx\n') def test_code_block_math(self): src = '\n'.join([ '```math', 'E = mc^2', '```', ]) out = self.conv(src) self.assertEqual(out, '\n.. math::\n\n E = mc^2\n') def test_plain_code_block_indent(self): src = '\n'.join([ '```', 'pip install sphinx', ' new line', '```', ]) out = self.conv(src) self.assertEqual( out, '\n.. code-block:: text\n\n pip install \ sphinx\n new line\n', ) def test_python_code_block(self): src = '\n'.join([ '```python', 'print(1)', '```', ]) out = self.conv(src) self.assertEqual(out, '\n.. code-block:: python\n\n print(1)\n') def test_python_code_block_indent(self): src = '\n'.join([ '```python', 'def a(i):', ' print(i)', '```', ]) out = self.conv(src) self.assertEqual( out, '\n.. code-block:: python\n\n def a(i):\n print(i)\n', ) class TestImage(RendererTestBase): def test_image(self): src = '![alt text](a.png)' out = self.conv(src) # first and last newline is inserted by paragraph self.assertEqual( out, '\n\n.. image:: a.png\n :target: a.png\n :alt: alt text\n\n', ) def test_image_title(self): src = '![alt text](a.png "title")' self.conv(src) # title is not supported now class TestHeading(RendererTestBase): def test_heading(self): src = '# head 1' out = self.conv(src) self.assertEqual(out, '\nhead 1\n' + '=' * 6 + '\n') def test_heading_multibyte(self): src = '# マルチバイト文字\n' out = self.conv(src) self.assertEqual(out, '\nマルチバイト文字\n' + '=' * 16 + '\n') class TestList(RendererTestBase): def test_ul(self): src = '* list' out = self.conv(src) self.assertEqual(out, '\n\n* list\n') def test_ol(self): src = '1. list' out = self.conv(src) self.assertEqual(out, '\n\n#. list\n') def test_nested_ul(self): src = '\n'.join([ '* list 1', '* list 2', ' * list 2.1', ' * list 2.2', '* list 3', ]) out = self.conv(src) self.assertEqual( out, '\n\n* list 1\n' '* list 2\n\n' ' * list 2.1\n' ' * list 2.2\n\n' '* list 3\n', ) def test_nested_ul_2(self): src = '\n'.join([ '* list 1', '* list 2', ' * list 2.1', ' * list 2.2', ' * list 2.2.1', ' * list 2.2.2', '* list 3', ]) out = self.conv(src) self.assertEqual( out, '\n\n* list 1\n' '* list 2\n\n' ' * list 2.1\n' ' * list 2.2\n\n' ' * list 2.2.1\n' ' * list 2.2.2\n\n' '* list 3\n' ) def test_nested_ol(self): src = '\n'.join([ '1. list 1', '2. list 2', ' 2. list 2.1', ' 3. list 2.2', '3. list 3', ]) out = self.conv(src) self.assertEqual( out, '\n\n#. list 1\n' '#. list 2\n' '\n' ' #. list 2.1\n' ' #. list 2.2\n' '\n' '#. list 3\n', ) def test_nested_ol_2(self): src = '\n'.join([ '1. list 1', '2. list 2', ' 3. list 2.1', ' 4. list 2.2', ' 5. list 2.2.1', ' 6. list 2.2.2', '7. list 3', ]) out = self.conv(src) self.assertEqual( out, '\n'.join([ '\n\n#. list 1', '#. list 2', '', ' #. list 2.1', ' #. list 2.2', '', ' #. list 2.2.1', ' #. list 2.2.2', '', '#. list 3\n', ]) ) def test_nested_mixed_1(self): src = '\n'.join([ '1. list 1', '2. list 2', ' * list 2.1', ' * list 2.2', ' 1. list 2.2.1', ' 2. list 2.2.2', '7. list 3', ]) out = self.conv(src) self.assertEqual( out, '\n'.join([ '\n\n#. list 1', '#. list 2', '', ' * list 2.1', ' * list 2.2', '', ' #. list 2.2.1', ' #. list 2.2.2', '', '#. list 3\n', ]) ) def test_nested_multiline_1(self): src = '\n'.join([ '* list 1', ' list 1 cont', '* list 2', ' list 2 cont', ' * list 2.1', ' list 2.1 cont', ' * list 2.2', ' list 2.2 cont', ' * list 2.2.1', ' * list 2.2.2', '* list 3', ]) out = self.conv(src) self.assertEqual( out, '\n'.join([ '\n\n* list 1', ' list 1 cont', '* list 2', ' list 2 cont', '', ' * list 2.1', ' list 2.1 cont', ' * list 2.2', ' list 2.2 cont', '', ' * list 2.2.1', ' * list 2.2.2', '', '* list 3\n', ]) ) def test_nested_multiline_2(self): src = '\n'.join([ '1. list 1', ' list 1 cont', '1. list 2', ' list 2 cont', ' 1. list 2.1', ' list 2.1 cont', ' 1. list 2.2', ' list 2.2 cont', ' 1. list 2.2.1', ' 1. list 2.2.2', '1. list 3', ]) out = self.conv(src) self.assertEqual( out, '\n'.join([ '\n\n#. list 1', ' list 1 cont', '#. list 2', ' list 2 cont', '', ' #. list 2.1', ' list 2.1 cont', ' #. list 2.2', ' list 2.2 cont', '', ' #. list 2.2.1', ' #. list 2.2.2', '', '#. list 3\n', ]) ) def test_nested_multiline_3(self): src = '\n'.join([ '1. list 1', ' list 1 cont', '1. list 2', ' list 2 cont', ' * list 2.1', ' list 2.1 cont', ' * list 2.2', ' list 2.2 cont', ' 1. list 2.2.1', ' 1. list 2.2.2', '1. list 3', ]) out = self.conv(src) self.assertEqual( out, '\n'.join([ '\n\n#. list 1', ' list 1 cont', '#. list 2', ' list 2 cont', '', ' * list 2.1', ' list 2.1 cont', ' * list 2.2', ' list 2.2 cont', '', ' #. list 2.2.1', ' #. list 2.2.2', '', '#. list 3\n', ]) ) class TestConplexText(RendererTestBase): def test_code(self): src = ''' some sentence ```python print(1) ``` some sentence # title ```python print(1) ``` --- end ''' self.conv(src) class TestTable(RendererTestBase): def test_table(self): src = '''h1 | h2 | h3\n--- | --- | ---\n1 | 2 | 3\n4 | 5 | 6''' out = self.conv(src) self.assertEqual(out, '\n'.join([ '', '.. list-table::', ' :header-rows: 1', '', ' * - h1', ' - h2', ' - h3', ' * - 1', ' - 2', ' - 3', ' * - 4', ' - 5', ' - 6', '', '', ])) class TestFootNote(RendererTestBase): def test_footnote(self): src = '\n'.join([ 'This is a[^1] footnote[^2] ref[^ref] with rst [#a]_.', '', '[^1]: note 1', '[^2]: note 2', '[^ref]: note ref', '.. [#a] note rst', ]) out = self.conv(src) self.assertEqual(out, '\n'.join([ '', 'This is a\\ [#fn-1]_ ' 'footnote\\ [#fn-2]_ ref\\ [#fn-ref]_ with rst [#a]_.', '', '.. [#a] note rst', # one empty line inserted... '', '.. [#fn-1] note 1', '.. [#fn-2] note 2', '.. [#fn-ref] note ref', '', ])) def test_sphinx_ref(self): src = 'This is a sphinx [ref]_ global ref.\n\n.. [ref] ref text' out = self.conv(src) self.assertEqual(out, '\n' + src) class TestDirective(RendererTestBase): def test_comment_oneline(self): src = '.. a' out = self.conv(src) self.assertEqual(out, '\n.. a') def test_comment_indented(self): src = ' .. a' out = self.conv(src) self.assertEqual(out, '\n .. a') def test_comment_newline(self): src = '..\n\n comment\n\nnewline' out = self.conv(src) self.assertEqual(out, '\n..\n\n comment\n\nnewline\n') def test_comment_multiline(self): comment = ( '.. this is comment.\n' ' this is also comment.\n' '\n' '\n' ' comment may include empty line.\n' '\n\n') src = comment + '`eoc`' out = self.conv(src) self.assertEqual(out, '\n' + comment + '``eoc``\n') class TestRestCode(RendererTestBase): def test_rest_code_block_empty(self): src = '\n\n::\n\n' out = self.conv(src) self.assertEqual(out, '\n\n') def test_eol_marker(self): src = 'a::\n\n code\n' out = self.conv(src) self.assertEqual(out, '\na:\n\n.. code-block:: text\n\n code\n') def test_eol_marker_remove(self): src = 'a ::\n\n code\n' out = self.conv(src) self.assertEqual(out, '\na\n\n.. code-block:: text\n\n code\n')
the-stack_0_4330
# flake8: noqa import base64 import collections import datetime import inspect import os import os.path as osp import pickle import re import subprocess import sys import cloudpickle import dateutil.tz import numpy as np from garage.core import Serializable class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def flatten(l): return [item for sublist in l for item in sublist] class BinaryOp(Serializable): def __init__(self): Serializable.quick_init(self, locals()) def rdiv(self, a, b): return b / a # def __init__(self, opname, a, b): # self.opname = opname # self.a = a # self.b = b class VariantDict(AttrDict): def __init__(self, d, hidden_keys): super(VariantDict, self).__init__(d) self._hidden_keys = hidden_keys def dump(self): return {k: v for k, v in self.items() if k not in self._hidden_keys} class VariantGenerator: """ Usage: vg = VariantGenerator() vg.add("param1", [1, 2, 3]) vg.add("param2", ['x', 'y']) vg.variants() => # all combinations of [1,2,3] x ['x','y'] Supports noncyclic dependency among parameters: vg = VariantGenerator() vg.add("param1", [1, 2, 3]) vg.add("param2", lambda param1: [param1+1, param1+2]) vg.variants() => # .. """ def __init__(self): self._variants = [] self._populate_variants() self._hidden_keys = [] for k, vs, cfg in self._variants: if cfg.get('hide', False): self._hidden_keys.append(k) def add(self, key, vals, **kwargs): self._variants.append((key, vals, kwargs)) def _populate_variants(self): methods = inspect.getmembers( self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)) methods = [ x[1].__get__(self, self.__class__) for x in methods if getattr(x[1], '__is_variant', False) ] for m in methods: self.add(m.__name__, m, **getattr(m, '__variant_config', dict())) def variants(self, randomized=False): ret = list(self.ivariants()) if randomized: np.random.shuffle(ret) return list(map(self.variant_dict, ret)) def variant_dict(self, variant): return VariantDict(variant, self._hidden_keys) def to_name_suffix(self, variant): suffix = [] for k, vs, cfg in self._variants: if not cfg.get('hide', False): suffix.append(k + '_' + str(variant[k])) return '_'.join(suffix) def ivariants(self): dependencies = list() for key, vals, _ in self._variants: if hasattr(vals, '__call__'): args = inspect.getfullargspec(vals).args if hasattr(vals, 'im_self') or hasattr(vals, '__self__'): # remove the first 'self' parameter args = args[1:] dependencies.append((key, set(args))) else: dependencies.append((key, set())) sorted_keys = [] # topo sort all nodes while len(sorted_keys) < len(self._variants): # get all nodes with zero in-degree free_nodes = [k for k, v in dependencies if not v] if not free_nodes: error_msg = 'Invalid parameter dependency: \n' for k, v in dependencies: if v: error_msg += k + ' depends on ' + ' & '.join(v) + '\n' raise ValueError(error_msg) dependencies = [(k, v) for k, v in dependencies if k not in free_nodes] # remove the free nodes from the remaining dependencies for _, v in dependencies: v.difference_update(free_nodes) sorted_keys += free_nodes return self._ivariants_sorted(sorted_keys) def _ivariants_sorted(self, sorted_keys): if not sorted_keys: yield dict() else: first_keys = sorted_keys[:-1] first_variants = self._ivariants_sorted(first_keys) last_key = sorted_keys[-1] last_vals = [v for k, v, _ in self._variants if k == last_key][0] if hasattr(last_vals, '__call__'): last_val_keys = inspect.getfullargspec(last_vals).args if hasattr(last_vals, 'im_self') or hasattr( last_vals, '__self__'): last_val_keys = last_val_keys[1:] else: last_val_keys = None for variant in first_variants: if hasattr(last_vals, '__call__'): last_variants = last_vals( **{k: variant[k] for k in last_val_keys}) for last_choice in last_variants: yield AttrDict(variant, **{last_key: last_choice}) else: for last_choice in last_vals: yield AttrDict(variant, **{last_key: last_choice}) def variant(*args, **kwargs): def _variant(fn): fn.__is_variant = True fn.__variant_config = kwargs return fn if len(args) == 1 and isinstance(args[0], collections.Callable): return _variant(args[0]) return _variant def query_yes_no(question, default='yes'): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False} if default is None: prompt = ' [y/n] ' elif default == 'yes': prompt = ' [Y/n] ' elif default == 'no': prompt = ' [y/N] ' else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") exp_count = 0 now = datetime.datetime.now(dateutil.tz.tzlocal()) timestamp = now.strftime('%Y_%m_%d_%H_%M_%S') def run_experiment(method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='garage.experiment.experiment_wrapper', python_command='python', dry=False, env=None, variant=None, use_tf=False, use_gpu=False, pre_commands=None, **kwargs): """Serialize the method call and run the experiment using the specified mode. Args: method_call (callable): A method call. batch_tasks (list[dict]): A batch of method calls. exp_prefix (str): Name prefix for the experiment. exp_name (str): Name of the experiment. log_dir (str): Log directory for the experiment. script (str): The name of the entrance point python script. python_command (str): Python command to run the experiment. dry (bool): Whether to do a dry-run, which only prints the commands without executing them. env (dict): Extra environment variables. variant (dict): If provided, should be a dictionary of parameters. use_tf (bool): Used along with the Theano and GPU configuration when using TensorFlow use_gpu (bool): Whether the launched task is running on GPU. This triggers a few configuration changes including certain environment flags. pre_commands (str): Pre commands to run the experiment. """ if method_call is None and batch_tasks is None: raise Exception( 'Must provide at least either method_call or batch_tasks') for task in (batch_tasks or [method_call]): if not hasattr(task, '__call__'): raise ValueError('batch_tasks should be callable') # ensure variant exists if variant is None: variant = dict() if batch_tasks is None: batch_tasks = [ dict( kwargs, pre_commands=pre_commands, method_call=method_call, exp_name=exp_name, log_dir=log_dir, env=env, variant=variant) ] global exp_count if use_tf: if not use_gpu: os.environ['CUDA_VISIBLE_DEVICES'] = '' else: os.unsetenv('CUDA_VISIBLE_DEVICES') for task in batch_tasks: call = task.pop('method_call') data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8') task['args_data'] = data exp_count += 1 if task.get('exp_name', None) is None: task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp, exp_count) if task.get('log_dir', None) is None: task['log_dir'] = ( '{log_dir}/local/{exp_prefix}/{exp_name}'.format( log_dir=osp.join(os.getcwd(), 'data'), exp_prefix=exp_prefix.replace('_', '-'), exp_name=task['exp_name'])) if task.get('variant', None) is not None: variant = task.pop('variant') if 'exp_name' not in variant: variant['exp_name'] = task['exp_name'] task['variant_data'] = base64.b64encode( pickle.dumps(variant)).decode('utf-8') elif 'variant' in task: del task['variant'] task['env'] = task.get('env', dict()) or dict() task['env']['GARAGE_USE_GPU'] = str(use_gpu) task['env']['GARAGE_USE_TF'] = str(use_tf) for task in batch_tasks: env = task.pop('env', None) command = to_local_command( task, python_command=python_command, script=script) print(command) if dry: return try: if env is None: env = dict() subprocess.call(command, shell=True, env=dict(os.environ, **env)) except Exception as e: print(e) if isinstance(e, KeyboardInterrupt): raise _find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search def _shellquote(s): """Return a shell-escaped version of the string *s*.""" if not s: return "''" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return "'" + s.replace("'", "'\"'\"'") + "'" def _to_param_val(v): if v is None: return '' elif isinstance(v, list): return ' '.join(map(_shellquote, list(map(str, v)))) else: return _shellquote(str(v)) def to_local_command(params, python_command='python', script='garage.experiment.experiment_wrapper'): command = python_command + ' -m ' + script garage_env = eval(os.environ.get('GARAGE_ENV', '{}')) for k, v in garage_env.items(): command = '{}={} '.format(k, v) + command pre_commands = params.pop('pre_commands', None) post_commands = params.pop('post_commands', None) if pre_commands is not None or post_commands is not None: print('Not executing the pre_commands: ', pre_commands, ', nor post_commands: ', post_commands) for k, v in params.items(): if isinstance(v, dict): for nk, nv in v.items(): if str(nk) == '_name': command += ' --{} {}'.format(k, _to_param_val(nv)) else: command += \ ' --{}_{} {}'.format(k, nk, _to_param_val(nv)) else: command += ' --{} {}'.format(k, _to_param_val(v)) return command def concretize(obj): if isinstance(obj, dict): # make sure that there's no hidden caveat ret = dict() for k, v in obj.items(): ret[concretize(k)] = concretize(v) return ret elif isinstance(obj, (list, tuple)): return obj.__class__(list(map(concretize, obj))) else: return obj
the-stack_0_4332
# -*- coding: utf-8 -*- # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple from urllib import parse as urlparse from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_integer, parse_json_object_from_request, parse_string, ) from synapse.http.site import SynapseRequest from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, ) from synapse.storage.databases.main.room import RoomSortOrder from synapse.types import JsonDict, RoomAlias, RoomID, UserID, create_requester from synapse.util import json_decoder if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) class ShutdownRoomRestServlet(RestServlet): """Shuts down a room by removing all local users from the room and blocking all future invites and joins to the room. Any local aliases will be repointed to a new room created by `new_room_user_id` and kicked users will be auto joined to the new room. """ PATTERNS = admin_patterns("/shutdown_room/(?P<room_id>[^/]+)") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.room_shutdown_handler = hs.get_room_shutdown_handler() async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) content = parse_json_object_from_request(request) assert_params_in_dict(content, ["new_room_user_id"]) ret = await self.room_shutdown_handler.shutdown_room( room_id=room_id, new_room_user_id=content["new_room_user_id"], new_room_name=content.get("room_name"), message=content.get("message"), requester_user_id=requester.user.to_string(), block=True, ) return (200, ret) class DeleteRoomRestServlet(RestServlet): """Delete a room from server. It is a combination and improvement of shutdown and purge room. Shuts down a room by removing all local users from the room. Blocking all future invites and joins to the room is optional. If desired any local aliases will be repointed to a new room created by `new_room_user_id` and kicked users will be auto- joined to the new room. If 'purge' is true, it will remove all traces of a room from the database. """ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/delete$") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.room_shutdown_handler = hs.get_room_shutdown_handler() self.pagination_handler = hs.get_pagination_handler() async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) content = parse_json_object_from_request(request) block = content.get("block", False) if not isinstance(block, bool): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'block' must be a boolean, if given", Codes.BAD_JSON, ) purge = content.get("purge", True) if not isinstance(purge, bool): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'purge' must be a boolean, if given", Codes.BAD_JSON, ) force_purge = content.get("force_purge", False) if not isinstance(force_purge, bool): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'force_purge' must be a boolean, if given", Codes.BAD_JSON, ) ret = await self.room_shutdown_handler.shutdown_room( room_id=room_id, new_room_user_id=content.get("new_room_user_id"), new_room_name=content.get("room_name"), message=content.get("message"), requester_user_id=requester.user.to_string(), block=block, ) # Purge room if purge: await self.pagination_handler.purge_room(room_id, force=force_purge) return (200, ret) class ListRoomRestServlet(RestServlet): """ List all rooms that are known to the homeserver. Results are returned in a dictionary containing room information. Supports pagination. """ PATTERNS = admin_patterns("/rooms$") def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) # Extract query parameters start = parse_integer(request, "from", default=0) limit = parse_integer(request, "limit", default=100) order_by = parse_string(request, "order_by", default=RoomSortOrder.NAME.value) if order_by not in ( RoomSortOrder.ALPHABETICAL.value, RoomSortOrder.SIZE.value, RoomSortOrder.NAME.value, RoomSortOrder.CANONICAL_ALIAS.value, RoomSortOrder.JOINED_MEMBERS.value, RoomSortOrder.JOINED_LOCAL_MEMBERS.value, RoomSortOrder.VERSION.value, RoomSortOrder.CREATOR.value, RoomSortOrder.ENCRYPTION.value, RoomSortOrder.FEDERATABLE.value, RoomSortOrder.PUBLIC.value, RoomSortOrder.JOIN_RULES.value, RoomSortOrder.GUEST_ACCESS.value, RoomSortOrder.HISTORY_VISIBILITY.value, RoomSortOrder.STATE_EVENTS.value, ): raise SynapseError( 400, "Unknown value for order_by: %s" % (order_by,), errcode=Codes.INVALID_PARAM, ) search_term = parse_string(request, "search_term") if search_term == "": raise SynapseError( 400, "search_term cannot be an empty string", errcode=Codes.INVALID_PARAM, ) direction = parse_string(request, "dir", default="f") if direction not in ("f", "b"): raise SynapseError( 400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM ) reverse_order = True if direction == "b" else False # Return list of rooms according to parameters rooms, total_rooms = await self.store.get_rooms_paginate( start, limit, order_by, reverse_order, search_term ) response = { # next_token should be opaque, so return a value the client can parse "offset": start, "rooms": rooms, "total_rooms": total_rooms, } # Are there more rooms to paginate through after this? if (start + limit) < total_rooms: # There are. Calculate where the query should start from next time # to get the next part of the list response["next_batch"] = start + limit # Is it possible to paginate backwards? Check if we currently have an # offset if start > 0: if start > limit: # Going back one iteration won't take us to the start. # Calculate new offset response["prev_batch"] = start - limit else: response["prev_batch"] = 0 return 200, response class RoomRestServlet(RestServlet): """Get room details. TODO: Add on_POST to allow room creation without joining the room """ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)$") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) ret = await self.store.get_room_with_stats(room_id) if not ret: raise NotFoundError("Room not found") members = await self.store.get_users_in_room(room_id) ret["joined_local_devices"] = await self.store.count_devices_by_users(members) return (200, ret) class RoomMembersRestServlet(RestServlet): """ Get members list of a room. """ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/members") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) ret = await self.store.get_room(room_id) if not ret: raise NotFoundError("Room not found") members = await self.store.get_users_in_room(room_id) ret = {"members": members, "total": len(members)} return 200, ret class RoomStateRestServlet(RestServlet): """ Get full state within a room. """ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/state") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) ret = await self.store.get_room(room_id) if not ret: raise NotFoundError("Room not found") event_ids = await self.store.get_current_state_ids(room_id) events = await self.store.get_events(event_ids.values()) now = self.clock.time_msec() room_state = await self._event_serializer.serialize_events( events.values(), now, # We don't bother bundling aggregations in when asked for state # events, as clients won't use them. bundle_aggregations=False, ) ret = {"state": room_state} return 200, ret class JoinRoomAliasServlet(RestServlet): PATTERNS = admin_patterns("/join/(?P<room_identifier>[^/]*)") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.room_member_handler = hs.get_room_member_handler() self.admin_handler = hs.get_admin_handler() self.state_handler = hs.get_state_handler() async def on_POST( self, request: SynapseRequest, room_identifier: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) content = parse_json_object_from_request(request) assert_params_in_dict(content, ["user_id"]) target_user = UserID.from_string(content["user_id"]) if not self.hs.is_mine(target_user): raise SynapseError(400, "This endpoint can only be used with local users") if not await self.admin_handler.get_user(target_user): raise NotFoundError("User not found") if RoomID.is_valid(room_identifier): room_id = room_identifier try: remote_room_hosts = [ x.decode("ascii") for x in request.args[b"server_name"] ] # type: Optional[List[str]] except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): handler = self.room_member_handler room_alias = RoomAlias.from_string(room_identifier) room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias) else: raise SynapseError( 400, "%s was not legal room ID or room alias" % (room_identifier,) ) fake_requester = create_requester( target_user, authenticated_entity=requester.authenticated_entity ) # send invite if room has "JoinRules.INVITE" room_state = await self.state_handler.get_current_state(room_id) join_rules_event = room_state.get((EventTypes.JoinRules, "")) if join_rules_event: if not (join_rules_event.content.get("join_rule") == JoinRules.PUBLIC): # update_membership with an action of "invite" can raise a # ShadowBanError. This is not handled since it is assumed that # an admin isn't going to call this API with a shadow-banned user. await self.room_member_handler.update_membership( requester=requester, target=fake_requester.user, room_id=room_id, action="invite", remote_room_hosts=remote_room_hosts, ratelimit=False, ) await self.room_member_handler.update_membership( requester=fake_requester, target=fake_requester.user, room_id=room_id, action="join", remote_room_hosts=remote_room_hosts, ratelimit=False, ) return 200, {"room_id": room_id} class MakeRoomAdminRestServlet(RestServlet): """Allows a server admin to get power in a room if a local user has power in a room. Will also invite the user if they're not in the room and it's a private room. Can specify another user (rather than the admin user) to be granted power, e.g.: POST/_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin { "user_id": "@foo:example.com" } """ PATTERNS = admin_patterns("/rooms/(?P<room_identifier>[^/]*)/make_room_admin") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.room_member_handler = hs.get_room_member_handler() self.event_creation_handler = hs.get_event_creation_handler() self.state_handler = hs.get_state_handler() self.is_mine_id = hs.is_mine_id async def on_POST(self, request, room_identifier): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) content = parse_json_object_from_request(request, allow_empty_body=True) # Resolve to a room ID, if necessary. if RoomID.is_valid(room_identifier): room_id = room_identifier elif RoomAlias.is_valid(room_identifier): room_alias = RoomAlias.from_string(room_identifier) room_id, _ = await self.room_member_handler.lookup_room_alias(room_alias) room_id = room_id.to_string() else: raise SynapseError( 400, "%s was not legal room ID or room alias" % (room_identifier,) ) # Which user to grant room admin rights to. user_to_add = content.get("user_id", requester.user.to_string()) # Figure out which local users currently have power in the room, if any. room_state = await self.state_handler.get_current_state(room_id) if not room_state: raise SynapseError(400, "Server not in room") create_event = room_state[(EventTypes.Create, "")] power_levels = room_state.get((EventTypes.PowerLevels, "")) if power_levels is not None: # We pick the local user with the highest power. user_power = power_levels.content.get("users", {}) admin_users = [ user_id for user_id in user_power if self.is_mine_id(user_id) ] admin_users.sort(key=lambda user: user_power[user]) if not admin_users: raise SynapseError(400, "No local admin user in room") admin_user_id = None for admin_user in reversed(admin_users): if room_state.get((EventTypes.Member, admin_user)): admin_user_id = admin_user break if not admin_user_id: raise SynapseError( 400, "No local admin user in room", ) pl_content = power_levels.content else: # If there is no power level events then the creator has rights. pl_content = {} admin_user_id = create_event.sender if not self.is_mine_id(admin_user_id): raise SynapseError( 400, "No local admin user in room", ) # Grant the user power equal to the room admin by attempting to send an # updated power level event. new_pl_content = dict(pl_content) new_pl_content["users"] = dict(pl_content.get("users", {})) new_pl_content["users"][user_to_add] = new_pl_content["users"][admin_user_id] fake_requester = create_requester( admin_user_id, authenticated_entity=requester.authenticated_entity, ) try: await self.event_creation_handler.create_and_send_nonmember_event( fake_requester, event_dict={ "content": new_pl_content, "sender": admin_user_id, "type": EventTypes.PowerLevels, "state_key": "", "room_id": room_id, }, ) except AuthError: # The admin user we found turned out not to have enough power. raise SynapseError( 400, "No local admin user in room with power to update power levels." ) # Now we check if the user we're granting admin rights to is already in # the room. If not and it's not a public room we invite them. member_event = room_state.get((EventTypes.Member, user_to_add)) is_joined = False if member_event: is_joined = member_event.content["membership"] in ( Membership.JOIN, Membership.INVITE, ) if is_joined: return 200, {} join_rules = room_state.get((EventTypes.JoinRules, "")) is_public = False if join_rules: is_public = join_rules.content.get("join_rule") == JoinRules.PUBLIC if is_public: return 200, {} await self.room_member_handler.update_membership( fake_requester, target=UserID.from_string(user_to_add), room_id=room_id, action=Membership.INVITE, ) return 200, {} class ForwardExtremitiesRestServlet(RestServlet): """Allows a server admin to get or clear forward extremities. Clearing does not require restarting the server. Clear forward extremities: DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities Get forward_extremities: GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities """ PATTERNS = admin_patterns("/rooms/(?P<room_identifier>[^/]*)/forward_extremities") def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.room_member_handler = hs.get_room_member_handler() self.store = hs.get_datastore() async def resolve_room_id(self, room_identifier: str) -> str: """Resolve to a room ID, if necessary.""" if RoomID.is_valid(room_identifier): resolved_room_id = room_identifier elif RoomAlias.is_valid(room_identifier): room_alias = RoomAlias.from_string(room_identifier) room_id, _ = await self.room_member_handler.lookup_room_alias(room_alias) resolved_room_id = room_id.to_string() else: raise SynapseError( 400, "%s was not legal room ID or room alias" % (room_identifier,) ) if not resolved_room_id: raise SynapseError( 400, "Unknown room ID or room alias %s" % room_identifier ) return resolved_room_id async def on_DELETE(self, request, room_identifier): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) room_id = await self.resolve_room_id(room_identifier) deleted_count = await self.store.delete_forward_extremities_for_room(room_id) return 200, {"deleted": deleted_count} async def on_GET(self, request, room_identifier): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester.user) room_id = await self.resolve_room_id(room_identifier) extremities = await self.store.get_forward_extremities_for_room(room_id) return 200, {"count": len(extremities), "results": extremities} class RoomEventContextServlet(RestServlet): """ Provide the context for an event. This API is designed to be used when system administrators wish to look at an abuse report and understand what happened during and immediately prior to this event. """ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$") def __init__(self, hs): super().__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() self.auth = hs.get_auth() async def on_GET(self, request, room_id, event_id): requester = await self.auth.get_user_by_req(request, allow_guest=False) await assert_user_is_admin(self.auth, requester.user) limit = parse_integer(request, "limit", default=10) # picking the API shape for symmetry with /messages filter_str = parse_string(request, b"filter", encoding="utf-8") if filter_str: filter_json = urlparse.unquote(filter_str) event_filter = Filter( json_decoder.decode(filter_json) ) # type: Optional[Filter] else: event_filter = None results = await self.room_context_handler.get_event_context( requester, room_id, event_id, limit, event_filter, use_admin_priviledge=True, ) if not results: raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() results["events_before"] = await self._event_serializer.serialize_events( results["events_before"], time_now ) results["event"] = await self._event_serializer.serialize_event( results["event"], time_now ) results["events_after"] = await self._event_serializer.serialize_events( results["events_after"], time_now ) results["state"] = await self._event_serializer.serialize_events( results["state"], time_now ) return 200, results
the-stack_0_4333
import smtplib from smtplib import SMTPServerDisconnected from email.message import EmailMessage import mimetypes import os import logging class MailClient(object): """ Example mail client using SMTPlib Uses config """ def __init__(self, config=None, logger=None): self.mailserver = None self.logger = logger if logger else logging.getLogger("MailClient") self.C = config self.fromaddr = self.C["mail.connection.user"] self.connect() def connect(self): self.mailserver = smtplib.SMTP(self.C["mail.connection.host"], self.C["mail.connection.port"]) self.mailserver.ehlo() self.mailserver.starttls() self.mailserver.login(self.fromaddr, self.C["mail.connection.passwd"]) self.logger.info("self.Connected successfully to mail server.") @staticmethod def add_attachment(msg, fpath): """ Liberated from docs """ ctype, encoding = mimetypes.guess_type(fpath) if ctype is None or encoding is not None: ctype = 'application/octet-stream' maintype, subtype = ctype.split('/', 1) with open(fpath, "rb") as f: msg.add_attachment(f.read(), maintype=maintype, subtype=subtype, filename=os.path.basename(fpath)) def compose_mail(self, title, body, attachments=None, to=None): msg = EmailMessage() msg.set_content(body) msg["To"] = to if to else ", ".join(self.C["mail.recipients"]) msg["From"] = self.fromaddr msg["Subject"] = title if attachments: if not isinstance(attachments, list): attachments = [attachments] self.logger.info("Found {} attachment. Processing".format(len(attachments))) for attachment in attachments: self.logger.info("Attaching \"{}\"".format(attachment)) self.add_attachment(msg, attachment) self.logger.debug("Attached \"{}\"".format(attachment)) return msg def send(self, msg): try: self.mailserver.send_message(msg) self.logger.info("Mail sent to the {} recipients".format(len(self.C["mail.recipients"]))) except SMTPServerDisconnected: self.logger.warning("Mail server disconnected. Reconnecting.") self.connect() self.send(msg) if __name__ == '__main__': from src.config.config import Config c = Config("mail.yaml") m = MailClient(config=c) mail = m.compose_mail("test mail", "this is a test mail. \n Please ignore the content", attachments=["attachments/1.txt", "attachments/2.txt"]) m.send(mail)
the-stack_0_4335
from django.shortcuts import render, HttpResponse from posts.models import Post # Create your views here. def index(request): posts = Post.objects.all().order_by('-registered_at')[:5] context = { 'posts' : posts } return render(request, 'home/index.html', context)
the-stack_0_4336
from elegantrl.agents.AgentSAC import AgentSAC from elegantrl.agents.net import Critic, ActorSAC, ActorFixSAC, CriticREDQ import torch import numpy as np from copy import deepcopy class AgentREDQ(AgentSAC): # [ElegantRL.2021.11.11] """ Bases: ``AgentBase`` Randomized Ensemble Double Q-learning algorithm. “Randomized Ensembled Double Q-Learning: Learning Fast Without A Model”. Xinyue Chen et al.. 2021. :param net_dim[int]: the dimension of networks (the width of neural networks) :param state_dim[int]: the dimension of state (the number of state vector) :param action_dim[int]: the dimension of action (the number of discrete action) :param reward_scale: scale the reward to get a appropriate scale Q value :param gamma: the discount factor of Reinforcement Learning :param learning_rate: learning rate of optimizer :param if_per_or_gae: PER (off-policy) or GAE (on-policy) for sparse reward :param env_num: the env number of VectorEnv. env_num == 1 means don't use VectorEnv :param gpu_id: the gpu_id of the training device. Use CPU when cuda is not available. :param G: Update to date ratio :param M: subset size of critics :param N: ensemble number of critics """ def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None): self.ClassCri = Critic self.get_obj_critic = self.get_obj_critic_raw self.ClassAct = ActorSAC self.if_use_cri_target = True self.if_use_act_target = False self.alpha_log = None self.alpha_optim = None self.target_entropy = None self.obj_critic = (-np.log(0.5)) ** 0.5 # for reliable_lambda self.act_class = getattr(self, "act_class", ActorFixSAC) self.cri_class = getattr(self, "cri_class", CriticREDQ) super().__init__(net_dim, state_dim, action_dim, gpu_id, args) self.obj_c = (-np.log(0.5)) ** 0.5 # for reliable_lambda def init( self, net_dim=256, state_dim=8, action_dim=2, reward_scale=1.0, gamma=0.99, learning_rate=3e-4, if_per_or_gae=False, env_num=1, gpu_id=0, G=20, M=2, N=10, ): self.gamma = gamma self.state_dim = state_dim self.action_dim = action_dim self.reward_scale = reward_scale self.traj_list = [[] for _ in range(env_num)] self.G = G self.M = M self.N = N self.device = torch.device( f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu" ) self.cri_list = [ self.ClassCri(net_dim, state_dim, action_dim).to(self.device) for i in range(self.N) ] self.act = self.ClassAct(net_dim, state_dim, action_dim).to(self.device) self.cri_target_list = [deepcopy(self.cri_list[i]) for i in range(N)] self.cri_optim_list = [ torch.optim.Adam(self.cri_list[i].parameters(), learning_rate) for i in range(self.N) ] self.act_optim = torch.optim.Adam(self.act.parameters(), learning_rate) assert isinstance(if_per_or_gae, bool) if env_num == 1: self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env self.alpha_log = torch.zeros( 1, requires_grad=True, device=self.device ) # trainable parameter self.alpha_optim = torch.optim.Adam([self.alpha_log], lr=learning_rate) self.target_entropy = np.log(action_dim) self.criterion = torch.nn.MSELoss() def get_obj_critic_raw(self, buffer, batch_size): with torch.no_grad(): reward, mask, action, state, next_s = buffer.sample_batch(batch_size) next_a, next_log_prob = self.act_target.get_action_logprob( next_s ) # stochastic policy next_q = self.cri_target.get_q_min(next_s, next_a) alpha = self.alpha_log.exp().detach() q_label = reward + mask * (next_q + next_log_prob * alpha) qs = self.cri.get_q_values(state, action) obj_critic = self.criterion(qs, q_label * torch.ones_like(qs)) return obj_critic, state def get_obj_critic_per(self, buffer, batch_size): with torch.no_grad(): reward, mask, action, state, next_s, is_weights = buffer.sample_batch( batch_size ) next_a, next_log_prob = self.act_target.get_action_logprob( next_s ) # stochastic policy next_q = self.cri_target.get_q_min(next_s, next_a) alpha = self.alpha_log.exp().detach() q_label = reward + mask * (next_q + next_log_prob * alpha) qs = self.cri.get_q_values(state, action) td_error = self.criterion(qs, q_label * torch.ones_like(qs)).mean(dim=1) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return obj_critic, state def get_obj_critic_raw_(self, buffer, batch_size, alpha): """ Calculate the loss of networks with **uniform sampling**. :param buffer: the ReplayBuffer instance that stores the trajectories. :param batch_size: the size of batch data for Stochastic Gradient Descent (SGD). :param alpha: the trade-off coefficient of entropy regularization. :return: the loss of the network and states. """ with torch.no_grad(): batch = buffer.sample_batch(batch_size) state = torch.Tensor(batch["obs1"]).to(self.device) next_s = torch.Tensor(batch["obs2"]).to(self.device) action = torch.Tensor(batch["acts"]).to(self.device) reward = torch.Tensor(batch["rews"]).unsqueeze(1).to(self.device) mask = torch.Tensor(batch["done"]).unsqueeze(1).to(self.device) # state, next_s, actions, reward, mask = buffer.sample_batch(batch_size) # print(batch_size,reward.shape,mask.shape,action.shape, state.shape, next_s.shape) next_a, next_log_prob = self.act.get_action_logprob( next_s ) # stochastic policy g = torch.Generator() g.manual_seed(torch.randint(high=10000000, size=(1,))[0].item()) a = torch.randperm(self.N, generator=g) # a = np.random.choice(self.N, self.M, replace=False) # print(a[:M]) q_tmp = [self.cri_target_list[a[j]](next_s, next_a) for j in range(self.M)] q_prediction_next_cat = torch.cat(q_tmp, 1) min_q, min_indices = torch.min(q_prediction_next_cat, dim=1, keepdim=True) next_q_with_log_prob = min_q - alpha * next_log_prob y_q = reward + (1 - mask) * self.gamma * next_q_with_log_prob q_values = [ self.cri_list[j](state, action) for j in range(self.N) ] # todo ensemble q_values_cat = torch.cat(q_values, dim=1) y_q = y_q.expand(-1, self.N) if y_q.shape[1] == 1 else y_q obj_critic = self.criterion(q_values_cat, y_q) * self.N return obj_critic, state # return y_q, state,action def select_actions_(self, state, size, env): """ Select continuous actions for exploration :param state: states.shape==(batch_size, state_dim, ) :return: actions.shape==(batch_size, action_dim, ), -1 < action < +1 """ state = state.to(self.device) actions = self.act.get_action(state) return actions.detach().cpu() def cri_multi_train_(self, k): q_values = self.cri_list[k](self.state, self.action) obj = self.criterion(q_values, self.y_q) self.cri_optim_list[k].zero_grad() obj.backward() self.cri_optim_list[k].step() def update_net_(self, buffer, batch_size, soft_update_tau): # buffer.update_now_len() """ Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer: the ReplayBuffer instance that stores the trajectories. :param batch_size: the size of batch data for Stochastic Gradient Descent (SGD). :param soft_update_tau: the soft update parameter. :return: a tuple of the log information. """ for i in range(self.G): alpha = self.alpha_log.cpu().exp().item() """objective of critic (loss function of critic)""" obj_critic, state = self.get_obj_critic(buffer, batch_size, alpha) # self.y_q, self.state,self.action = self.get_obj_critic(buffer, batch_size, alpha) for q_i in range(self.N): self.cri_optim_list[q_i].zero_grad() obj_critic.backward() if ((i + 1) % self.G == 0) or i == self.G - 1: a_noise_pg, logprob = self.act.get_action_logprob( state ) # policy gradient """objective of alpha (temperature parameter automatic adjustment)""" cri_tmp = [] for j in range(self.N): self.cri_list[j].requires_grad_(False) cri_tmp.append(self.cri_list[j](state, a_noise_pg)) q_value_pg = torch.cat(cri_tmp, 1) q_value_pg = torch.mean(q_value_pg, dim=1, keepdim=True) obj_actor = (-q_value_pg + logprob * alpha).mean() # todo ensemble self.act_optim.zero_grad() obj_actor.backward() for j in range(self.N): self.cri_list[j].requires_grad_(True) obj_alpha = -(self.alpha_log * (logprob - 1).detach()).mean() self.optim_update(self.alpha_optim, obj_alpha) for q_i in range(self.N): self.cri_optim_list[q_i].step() if ((i + 1) % self.G == 0) or i == self.G - 1: self.act_optim.step() for q_i in range(self.N): self.soft_update( self.cri_target_list[q_i], self.cri_list[q_i], soft_update_tau ) return obj_actor, alpha
the-stack_0_4338
""" Run a large scale benchmark. We measure: {dataset, encoder, model, train and test accuracy measures, train and test runtimes, feature count}. Note: A reasonably recent version of sklearn is required to run GradientBoostingClassifier and MLPClassifier. """ import os import warnings import pandas as pd import numpy as np from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import category_encoders from examples.benchmarking_large import arff_loader from examples.benchmarking_large.util import train_model, train_encoder # The settings are taken from: # Data-driven advice for applying machine learning to bioinformatics problems, Olson et al. # Following models have high variance of results: SGD, SVC and DecisionTree. That is not a big deal. # Just be careful during result interpretation. # Also, following models are slow because of their configuration: GradientBoosting and RandomForest. # SGD and DecisionTree benefit from stronger regularization. models = [SGDClassifier(loss='modified_huber', max_iter=50, tol=1e-3), LogisticRegression(C=1.5, penalty='l1', fit_intercept=True), SVC(kernel='poly', probability=True, C=0.01, gamma=0.1, degree=3, coef0=10.0), KNeighborsClassifier(), GaussianNB(), DecisionTreeClassifier(max_depth=4), GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=500, max_depth=3, max_features='log2'), RandomForestClassifier(n_estimators=500, max_features=0.25, criterion='entropy'), MLPClassifier()] # We use Arff datasets on GitHub. But once OpenML loader will be part of scikit-learn: # https://github.com/scikit-learn/scikit-learn/pull/11419 # the plan is to move on OpenML. # We ignore datasets without any polynomial feature. # We also ignore 'splice.arff', 'anneal.arff', 'anneal.orig.arff' due to high runtime. # Datasets sensitive to amount of regularization are: # breast.cancer.arff Medium impact # bridges.version1.arff # bridges.version2.arff # car.arff # colic.arff # cylinder.bands.arff Medium impact # flags.arff Large impact # heart.c.arff # hepatitis.arff # hypothyroid.arff # kr.vs.kp.arff # labor.arff Large impact # lymph.arff # nursery.arff # postoperative.patient.data.arff Large impact # primary.tumor.arff # solar.flare1.arff Medium impact # solar.flare2.arff Medium impact # soybean.arff Large impact # sick.arff # spectrometer.arff Large impact # sponge.arff Large impact # tic-tac-toe.arff # trains.arff Medium impact (note that this is a tiny dataset -> with high variance) datasets = [#'audiology.arff', 'autos.arff', 'breast.cancer.arff', 'bridges.version1.arff', 'bridges.version2.arff', 'car.arff', # 'colic.arff', 'credit.a.arff', 'credit.g.arff', 'cylinder.bands.arff', 'flags.arff', 'heart.c.arff', 'heart.h.arff', 'hepatitis.arff', 'hypothyroid.arff', 'kr.vs.kp.arff', 'labor.arff', 'lymph.arff', 'mushroom.arff', 'nursery.arff', 'postoperative.patient.data.arff', 'primary.tumor.arff', 'sick.arff', 'solar.flare1.arff', 'solar.flare2.arff', 'soybean.arff', 'spectrometer.arff', 'sponge.arff', 'tic-tac-toe.arff', 'trains.arff', 'vote.arff', 'vowel.arff'] # datasets = ['postoperative.patient.data.arff'] # datasets = ['amazon.csv', 'carvana.csv', 'erasmus.csv', 'internetusage.csv', 'ipumsla97small.csv', 'kobe.csv', 'pbcseq.csv', 'phpvcoG8S.csv', 'westnile.csv'] # We ignore encoders {BackwardDifferenceEncoder, HelmertEncoder, PolynomialEncoder and SumEncoder} because of: # https://github.com/scikit-learn-contrib/categorical-encoding/issues/91 encoders = [ category_encoders.TargetEncoderV2()] # Initialization if os.path.isfile('./output/result.csv'): os.remove('./output/result.csv') # Ok... warnings.filterwarnings('ignore') # Loop over datasets, then over encoders, and finally, over the models for dataset_name in datasets: X, y, fold_count = arff_loader.load(dataset_name) non_numeric = list(X.select_dtypes(exclude=[np.number]).columns.values) for encoder in encoders: print("Encoding:", dataset_name, y.name, encoder.__class__.__name__) folds, fit_encoder_time, score_encoder_time = train_encoder(X, y, fold_count, encoder) for model in models: print('Evaluating:', dataset_name, encoder.__class__.__name__, model.__class__.__name__) scores, fit_model_time, score_model_time = train_model(folds, model) # Log into csv result = pd.DataFrame([dataset_name, y.name, encoder.__class__.__name__, model.__class__.__name__, X.shape[1], folds[0][0].shape[1], fit_encoder_time, score_encoder_time, fit_model_time, score_model_time] + list(scores)).T if not os.path.isfile('./output/result.csv'): result.to_csv('./output/result.csv', header=['dataset', 'target', 'encoder', 'model', 'input_features', 'output_features', 'fit_encoder_time', 'score_encoder_time', 'fit_model_time', 'score_model_time', 'test_matthews', 'train_matthews', 'test_auc', 'train_auc', 'test_brier', 'train_brier'], index=False) else: result.to_csv('./output/result.csv', mode='a', header=False, index=False) print('Finished. The result was stored into ./output/result.csv.')
the-stack_0_4339
#!/usr/bin/python # -*- coding: utf-8 -*- """ Django settings for scrapy_joy project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '$k70*9=58#2%p(!b_1ox*!96^&vuvwz)3oq8&-yvofetyjyy)#' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['*'] CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'xadmin', 'crispy_forms', # 'reversion', 'kombu.transport.django', 'djcelery', 'dynamic_scraper', 'debug_toolbar', 'scrapy_joy', 'open_news', 'open_loan', 'open_insurance', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ) ROOT_URLCONF = 'scrapy_joy.urls' WSGI_APPLICATION = 'scrapy_joy.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'scrapy_joy2', 'USER': 'root', 'PASSWORD': '123456', 'HOST': '127.0.0.1', 'PORT': '3306', 'OPTIONS': {'init_command': 'SET storage_engine=INNODB;'} } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'zh_cn' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = False DATE_FORMAT = 'Y-m-d' DATETIME_FORMAT = 'Y-m-d H:i' TIME_FORMAT = 'H:i' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'templates'), ) # **** 发送邮件设置**** EMAIL_HOST = 'smtp.163.com' EMAIL_PORT = 25 EMAIL_HOST_USER = '[email protected]' EMAIL_HOST_PASSWORD = 'dafcnranfmcvwrds' EMAIL_SUBJECT_PREFIX = u'[Kaisa利率]' DEFAULT_FROM_EMAIL = u'Kaisa利率 <[email protected]>' SERVER_EMAIL = '[email protected]' HOST_NAME = 'http://127.0.0.1:8000' # **** cacheops缓存设置 **** CACHEOPS_REDIS = { 'host': 'localhost', 'port': 6379, 'db': 1, 'socket_timeout': 3, 'password': '', } CACHEOPS_DEFAULTS = { 'timeout': 60*60 } CACHEOPS = { 'auth.user': {'ops': 'get', 'timeout': 60*15}, 'auth.*': {'ops': ('fetch', 'get')}, 'auth.permission': {'ops': 'all'}, '*.*': {}, } DEBUG_TOOLBAR_PATCH_SETTINGS = False INTERNAL_IPS = '127.0.0.1' # django-celery settings import djcelery djcelery.setup_loader() BROKER_HOST = "localhost" BROKER_PORT = 5672 BROKER_BACKEND = "django" BROKER_USER = "guest" BROKER_PASSWORD = "guest" BROKER_VHOST = "/" CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' try: from local_settings import * except: pass
the-stack_0_4341
from fractions import Fraction from unittest import TestCase from musurgia.fractaltree.fractaltree import FractalTree class Test(TestCase): def setUp(self) -> None: self.ft = FractalTree(proportions=[1, 2, 3], tree_permutation_order=[3, 1, 2], value=10) def test_0(self): with self.assertRaises(Exception): self.ft.get_layer(1) def test_1(self): self.assertEqual([self.ft], self.ft.get_layer(0)) def test_2(self): self.ft.add_layer() result = self.ft.get_children() self.assertEqual(result, self.ft.get_layer(1)) def test_3(self): for i in range(3): self.ft.add_layer() result = self.ft.get_children() self.assertEqual(result, self.ft.get_layer(1)) def test_4(self): for i in range(3): self.ft.add_layer() result = [child.get_children() for child in self.ft.get_children()] self.assertEqual(result, self.ft.get_layer(2)) def test_5(self): for i in range(3): self.ft.add_layer() result = self.ft.get_leaves() self.assertEqual(result, self.ft.get_layer(3)) def test_6(self): for i in range(3): self.ft.add_layer() with self.assertRaises(ValueError): self.ft.get_layer(4) def test_7(self): self.ft.add_layer() self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False) self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False) self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False) self.ft.add_layer(lambda n: True if n.fractal_order > 1 else False) result = [[['1.1'], [['1.2.1'], ['1.2.2.1', '1.2.2.2', '1.2.2.3'], ['1.2.3.1', '1.2.3.2', '1.2.3.3']], [['1.3.1.1', '1.3.1.2', '1.3.1.3'], ['1.3.2'], ['1.3.3.1', '1.3.3.2', '1.3.3.3']]], '2', [[['3.1.1'], ['3.1.2.1', '3.1.2.2', '3.1.2.3'], ['3.1.3.1', '3.1.3.2', '3.1.3.3']], [['3.2.1.1', '3.2.1.2', '3.2.1.3'], ['3.2.2'], ['3.2.3.1', '3.2.3.2', '3.2.3.3']], ['3.3']]] self.assertEqual(result, [name for name in self.ft.get_layer(4, key='name')]) def test_7_1(self): self.ft.add_layer() self.ft.add_layer() self.assertEqual([10], self.ft.get_layer(0, key='value')) def test_7_2(self): self.ft.add_layer() self.ft.add_layer() result = [Fraction(5, 1), Fraction(5, 3), Fraction(10, 3)] self.assertEqual(result, self.ft.get_layer(1, key='value')) def test_7_3(self): self.ft.add_layer() self.ft.add_layer() result = [[Fraction(5, 6), Fraction(5, 3), Fraction(5, 2)], [Fraction(5, 6), Fraction(5, 18), Fraction(5, 9)], [Fraction(10, 9), Fraction(5, 3), Fraction(5, 9)]] self.assertEqual(result, self.ft.get_layer(2, key='value')) def test_get_layer_key_lambda(self): self.ft.add_layer() self.ft.add_layer() result = [[0.83, 1.67, 2.5], [0.83, 0.28, 0.56], [1.11, 1.67, 0.56]] self.assertEqual(result, self.ft.get_layer(2, key=lambda node: round(float(node.value), 2)))
the-stack_0_4346
import unittest import openfigi class MyTestCase(unittest.TestCase): def test_wkn_ticker_anonymous(self): """Get an ETF by WKN and check if response makes sense""" ofg = openfigi.OpenFigi() ofg.enqueue_request(id_type='ID_WERTPAPIER', id_value='A0YEDG') response = ofg.fetch_response() self.assertTrue(type(response) is list) self.assertTrue(len(response) > 0) self.assertTrue(type(response[0]) is dict) self.assertTrue('data' in response[0].keys()) self.assertTrue(len(response[0]['data']) > 0) if __name__ == '__main__': unittest.main()
the-stack_0_4348
import numpy as np import scipy.sparse as sp import tensorflow as tf from keras import backend as K modes = { 'S': 1, # Single (rank(A)=2, rank(B)=2) 'M': 2, # Mixed (rank(A)=2, rank(B)=3) 'iM': 3, # Inverted mixed (rank(A)=3, rank(B)=2) 'B': 4, # Batch (rank(A)=3, rank(B)=3) 'UNK': -1 # Unknown } ################################################################################ # Ops for convolutions / Laplacians ################################################################################ def filter_dot(fltr, features): """ Performs the multiplication of a graph filter (N x N) with the node features, automatically dealing with single, mixed, and batch modes. :param fltr: the graph filter(s) (N x N in single and mixed mode, batch x N x N in batch mode). :param features: the node features (N x F in single mode, batch x N x F in mixed and batch mode). :return: the filtered features. """ if len(K.int_shape(features)) == 2: # Single mode return K.dot(fltr, features) else: if len(K.int_shape(fltr)) == 3: # Batch mode return K.batch_dot(fltr, features) else: # Mixed mode return mixed_mode_dot(fltr, features) def normalize_A(A): """ Computes symmetric normalization of A, dealing with sparse A and batch mode automatically. :param A: Tensor or SparseTensor with rank k = {2, 3}. :return: SparseTensor of rank k. """ D = degrees(A) D = tf.sqrt(D)[:, None] + K.epsilon() if K.ndim(A) == 3: # Batch mode output = (A / D) / transpose(D, perm=(0, 2, 1)) else: # Single mode output = (A / D) / transpose(D) return output def degrees(A): """ Computes the degrees of each node in A, dealing with sparse A and batch mode automatically. :param A: Tensor or SparseTensor with rank k = {2, 3}. :return: Tensor or SparseTensor of rank k - 1. """ if K.is_sparse(A): D = tf.sparse.reduce_sum(A, axis=-1) else: D = tf.reduce_sum(A, axis=-1) return D def degree_matrix(A, return_sparse_batch=False): """ Computes the degree matrix of A, deals with sparse A and batch mode automatically. :param A: Tensor or SparseTensor with rank k = {2, 3}. :param return_sparse_batch: if operating in batch mode, return a SparseTensor. Note that the sparse degree tensor returned by this function cannot be used for sparse matrix multiplication afterwards. :return: SparseTensor of rank k. """ D = degrees(A) batch_mode = K.ndim(D) == 2 N = tf.shape(D)[-1] batch_size = tf.shape(D)[0] if batch_mode else 1 inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1)) if batch_mode: if return_sparse_batch: outer_index = repeat( tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32) ) indices = tf.concat([outer_index[:, None], inner_index], 1) dense_shape = (batch_size, N, N) else: return tf.linalg.diag(D) else: indices = inner_index dense_shape = (N, N) indices = tf.cast(indices, tf.int64) values = tf.reshape(D, (-1, )) return tf.SparseTensor(indices, values, dense_shape) ################################################################################ # Scipy to tf.sparse conversion ################################################################################ def sp_matrix_to_sp_tensor_value(x): """ Converts a Scipy sparse matrix to a tf.SparseTensorValue :param x: a Scipy sparse matrix :return: tf.SparseTensorValue """ if not hasattr(x, 'tocoo'): try: x = sp.coo_matrix(x) except: raise TypeError('x must be convertible to scipy.coo_matrix') else: x = x.tocoo() return tf.SparseTensorValue( indices=np.array([x.row, x.col]).T, values=x.data, dense_shape=x.shape ) def sp_matrix_to_sp_tensor(x): """ Converts a Scipy sparse matrix to a tf.SparseTensor :param x: a Scipy sparse matrix :return: tf.SparseTensor """ if not hasattr(x, 'tocoo'): try: x = sp.coo_matrix(x) except: raise TypeError('x must be convertible to scipy.coo_matrix') else: x = x.tocoo() return tf.SparseTensor( indices=np.array([x.row, x.col]).T, values=x.data, dense_shape=x.shape ) ################################################################################ # Matrix multiplication ################################################################################ def matmul_A_B(A, B): """ Computes A * B, dealing with sparsity and single/batch/mixed modes automatically. Mixed mode multiplication also works when A has rank 3 and B has rank 2. Sparse multiplication does not work with batch mode. :param A: Tensor or SparseTensor with rank 2 or 3. :param B: Tensor or SparseTensor with rank 2 or 3. :return: """ mode = autodetect_mode(A, B) if mode == modes['S']: # Single mode output = single_mode_dot(A, B) elif mode == modes['M']: # Mixed mode output = mixed_mode_dot(A, B) elif mode == modes['iM']: # Inverted mixed (rank(A)=3, rank(B)=2) # Works only with dense tensors output = K.dot(A, B) elif mode == modes['B']: # Batch mode # Works only with dense tensors output = K.batch_dot(A, B) else: raise ValueError('A and B must have rank 2 or 3.') return output def matmul_AT_B_A(A, B): """ Computes A.T * B * A, dealing with sparsity and single/batch/mixed modes automatically. Mixed mode multiplication also works when A has rank 3 and B has rank 2. Sparse multiplication does not work with batch mode. :param A: Tensor or SparseTensor with rank 2 or 3. :param B: Tensor or SparseTensor with rank 2 or 3. :return: """ mode = autodetect_mode(A, B) if mode == modes['S']: # Single (rank(A)=2, rank(B)=2) output = single_mode_dot(single_mode_dot(transpose(A), B), A) elif mode == modes['M']: # Mixed (rank(A)=2, rank(B)=3) output = mixed_mode_dot(transpose(A), B) if K.is_sparse(A): output = transpose( mixed_mode_dot(transpose(A), transpose(output, (0, 2, 1))), (0, 2, 1) ) else: output = K.dot(output, A) elif mode == modes['iM']: # Inverted mixed (rank(A)=3, rank(B)=2) # Works only with dense tensors output = mixed_mode_dot(B, A) output = K.batch_dot(transpose(A, (0, 2, 1)), output) elif mode == modes['B']: # Batch (rank(A)=3, rank(B)=3) # Works only with dense tensors output = K.batch_dot( K.batch_dot( transpose(A, (0, 2, 1)), B ), A ) else: raise ValueError('A and B must have rank 2 or 3.') return output def matmul_AT_B(A, B): """ Computes A.T * B, dealing with sparsity and single/batch/mixed modes automatically. Mixed mode multiplication also works when A has rank 3 and B has rank 2. Sparse multiplication does not work with batch mode. :param A: Tensor or SparseTensor with rank 2 or 3. :param B: Tensor or SparseTensor with rank 2 or 3. :return: """ mode = autodetect_mode(A, B) if mode == modes['S']: # Single (rank(A)=2, rank(B)=2) output = single_mode_dot(transpose(A), B) elif mode == modes['M']: # Mixed (rank(A)=2, rank(B)=3) output = mixed_mode_dot(transpose(A), B) elif mode == modes['iM']: # Inverted mixed (rank(A)=3, rank(B)=2) # Works only with dense tensors output = K.dot(transpose(A, (0, 2, 1)), B) elif mode == modes['B']: # Batch (rank(A)=3, rank(B)=3) # Works only with dense tensors output = K.batch_dot(transpose(A, (0, 2, 1)), B) else: raise ValueError('A and B must have rank 2 or 3.') return output def matmul_A_BT(A, B): """ Computes A * B.T, dealing with sparsity and single/batch/mixed modes automatically. Mixed mode multiplication also works when A has rank 3 and B has rank 2. Sparse multiplication does not work with batch mode. :param A: Tensor or SparseTensor with rank 2 or 3. :param B: Tensor or SparseTensor with rank 2 or 3. :return: """ mode = autodetect_mode(A, B) if mode == modes['S']: # Single (rank(A)=2, rank(B)=2) output = single_mode_dot(A, transpose(B)) elif mode == modes['M']: # Mixed (rank(A)=2, rank(B)=3) output = mixed_mode_dot(A, transpose(B, (0, 2, 1))) elif mode == modes['iM']: # Inverted mixed (rank(A)=3, rank(B)=2) # Works only with dense tensors output = K.dot(A, transpose(B)) elif mode == modes['B']: # Batch (rank(A)=3, rank(B)=3) # Works only with dense tensors output = K.batch_dot(A, transpose(B, (0, 2, 1))) else: raise ValueError('A and B must have rank 2 or 3.') return output ################################################################################ # Ops related to the modes of operation (single, mixed, batch) ################################################################################ def autodetect_mode(A, X): """ Return a code identifying the mode of operation (single, mixed, batch), given A and X. See the modes variable for meaning of codes. :param A: Tensor. :param X: Tensor. :return: mode of operation. """ if K.ndim(X) == 2: if K.ndim(A) == 2: return modes['S'] elif K.ndim(A) == 3: return modes['iM'] else: return modes['UNK'] elif K.ndim(X) == 3: if K.ndim(A) == 2: return modes['M'] elif K.ndim(A) == 3: return modes['B'] else: return modes['UNK'] else: return modes['UNK'] def single_mode_dot(A, B): """ Dot product between two rank 2 matrices. Deals automatically with either A or B being sparse. :param A: rank 2 Tensor or SparseTensor. :param B: rank 2 Tensor or SparseTensor. :return: rank 2 Tensor or SparseTensor. """ a_sparse = K.is_sparse(A) b_sparse = K.is_sparse(B) if a_sparse and b_sparse: raise ValueError('Sparse x Sparse matmul is not implemented yet.') elif a_sparse: output = tf.sparse_tensor_dense_matmul(A, B) elif b_sparse: output = transpose( tf.sparse_tensor_dense_matmul( transpose(B), transpose(A) ) ) else: output = tf.matmul(A, B) return output def mixed_mode_dot(A, B): """ Computes the equivalent of `tf.einsum('ij,bjk->bik', fltr, output)`, but works for both dense and sparse input filters. :param A: rank 2 Tensor or SparseTensor. :param B: rank 3 Tensor or SparseTensor. :return: rank 3 Tensor or SparseTensor. """ s_0_, s_1_, s_2_ = K.int_shape(B) B_T = transpose(B, (1, 2, 0)) B_T = reshape(B_T, (s_1_, -1)) output = single_mode_dot(A, B_T) output = reshape(output, (s_1_, s_2_, -1)) output = transpose(output, (2, 0, 1)) return output ################################################################################ # Wrappers for automatic switching between dense and sparse ops ################################################################################ def transpose(A, perm=None, name=None): """ Transposes A according to perm, dealing with sparse A automatically. :param A: Tensor or SparseTensor with rank k. :param perm: permutation indices of size k. :param name: name for the operation. :return: Tensor or SparseTensor with rank k. """ if K.is_sparse(A): transpose_op = tf.sparse.transpose else: transpose_op = tf.transpose if perm is None: perm = (1, 0) # Make explicit so that shape will always be preserved return transpose_op(A, perm=perm, name=name) def reshape(A, shape=None, name=None): """ Reshapes A according to shape, dealing with sparse A automatically. :param A: Tensor or SparseTensor. :param shape: new shape. :param name: name for the operation. :return: Tensor or SparseTensor. """ if K.is_sparse(A): reshape_op = tf.sparse.reshape else: reshape_op = tf.reshape return reshape_op(A, shape=shape, name=name) ################################################################################ # Misc ops ################################################################################ def matrix_power(x, k): """ Computes the k-th power of a square matrix. :param x: a square matrix (Tensor or SparseTensor) :param k: exponent :return: matrix of same type and dtype as the input """ if K.ndim(x) != 2: raise ValueError('x must have rank 2.') sparse = K.is_sparse(x) if sparse: x_dense = tf.sparse.to_dense(x) else: x_dense = x x_k = x_dense for _ in range(k - 1): x_k = K.dot(x_k, x_dense) if sparse: return tf.contrib.layers.dense_to_sparse(x_k) else: return x_k def repeat(x, repeats): """ Repeats elements of a Tensor (equivalent to np.repeat, but only for 1D tensors). :param x: rank 1 tensor; :param repeats: rank 1 tensor with same shape as x, the number of repetitions for each element; :return: rank 1 tensor, of shape `(sum(repeats), )`. """ x = tf.expand_dims(x, 1) max_repeats = tf.reduce_max(repeats) tile_repeats = [1, max_repeats] arr_tiled = tf.tile(x, tile_repeats) mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1)) result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1]) return result def segment_top_k(x, I, ratio, top_k_var): """ Returns indices to get the top K values in x segment-wise, according to the segments defined in I. K is not fixed, but it is defined as a ratio of the number of elements in each segment. :param x: a rank 1 tensor; :param I: a rank 1 tensor with segment IDs for x; :param ratio: float, ratio of elements to keep for each segment; :param top_k_var: a tf.Variable created without shape validation (i.e., `tf.Variable(0.0, validate_shape=False)`); :return: a rank 1 tensor containing the indices to get the top K values of each segment in x. """ num_nodes = tf.segment_sum(tf.ones_like(I), I) # Number of nodes in each graph cumsum = tf.cumsum(num_nodes) # Cumulative number of nodes (A, A+B, A+B+C) cumsum_start = cumsum - num_nodes # Start index of each graph n_graphs = tf.shape(num_nodes)[0] # Number of graphs in batch max_n_nodes = tf.reduce_max(num_nodes) # Order of biggest graph in batch batch_n_nodes = tf.shape(I)[0] # Number of overall nodes in batch to_keep = tf.ceil(ratio * tf.cast(num_nodes, tf.float32)) to_keep = tf.cast(to_keep, tf.int32) # Nodes to keep in each graph index = tf.range(batch_n_nodes) index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes) y_min = tf.reduce_min(x) dense_y = tf.ones((n_graphs * max_n_nodes,)) # subtract 1 to ensure that filler values do not get picked dense_y = dense_y * tf.cast(y_min - 1, tf.float32) # top_k_var is a variable with unknown shape defined in the elsewhere dense_y = tf.assign(top_k_var, dense_y, validate_shape=False) dense_y = tf.scatter_update(dense_y, index, x) dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes)) perm = tf.argsort(dense_y, direction='DESCENDING') perm = perm + cumsum_start[:, None] perm = tf.reshape(perm, (-1,)) to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs,)) rep_times = tf.reshape(tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1), (-1,)) mask = repeat(to_rep, rep_times) perm = tf.boolean_mask(perm, mask) return perm
the-stack_0_4350
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals from six.moves import range import json, os from semantic_version import Version import frappe import requests import subprocess # nosec from frappe.utils import cstr from frappe.utils.gitutils import get_app_branch from frappe import _, safe_decode import git def get_change_log(user=None): if not user: user = frappe.session.user last_known_versions = frappe._dict(json.loads(frappe.db.get_value("User", user, "last_known_versions") or "{}")) current_versions = get_versions() if not last_known_versions: update_last_known_versions() return [] change_log = [] def set_in_change_log(app, opts, change_log): from_version = last_known_versions.get(app, {}).get("version") or "0.0.1" to_version = opts["version"] if from_version != to_version: app_change_log = get_change_log_for_app(app, from_version=from_version, to_version=to_version) if app_change_log: change_log.append({ "title": opts["title"], "description": opts["description"], "version": to_version, "change_log": app_change_log }) for app, opts in current_versions.items(): if app != "frappe": set_in_change_log(app, opts, change_log) if "frappe" in current_versions: set_in_change_log("frappe", current_versions["frappe"], change_log) return change_log def get_change_log_for_app(app, from_version, to_version): change_log_folder = os.path.join(frappe.get_app_path(app), "change_log") if not os.path.exists(change_log_folder): return from_version = Version(from_version) to_version = Version(to_version) # remove pre-release part to_version.prerelease = None major_version_folders = ["v{0}".format(i) for i in range(from_version.major, to_version.major + 1)] app_change_log = [] for folder in os.listdir(change_log_folder): if folder in major_version_folders: for file in os.listdir(os.path.join(change_log_folder, folder)): version = Version(os.path.splitext(file)[0][1:].replace("_", ".")) if from_version < version <= to_version: file_path = os.path.join(change_log_folder, folder, file) content = frappe.read_file(file_path) app_change_log.append([version, content]) app_change_log = sorted(app_change_log, key=lambda d: d[0], reverse=True) # convert version to string and send return [[cstr(d[0]), d[1]] for d in app_change_log] @frappe.whitelist() def update_last_known_versions(): frappe.db.set_value("User", frappe.session.user, "last_known_versions", json.dumps(get_versions()), update_modified=False) @frappe.whitelist() def get_versions(): """Get versions of all installed apps. Example: { "frappe": { "title": "Frappe Framework", "version": "5.0.0" } }""" versions = {} for app in frappe.get_installed_apps(sort=True): app_hooks = frappe.get_hooks(app_name=app) versions[app] = { "title": app_hooks.get("app_title")[0], "description": app_hooks.get("app_description")[0], "branch": get_app_branch(app) } if versions[app]['branch'] != 'master': branch_version = app_hooks.get('{0}_version'.format(versions[app]['branch'])) if branch_version: versions[app]['branch_version'] = branch_version[0] + ' ({0})'.format(get_app_last_commit_ref(app)) try: versions[app]["version"] = frappe.get_attr(app + ".__version__") except AttributeError: versions[app]["version"] = '0.0.1' return versions def get_app_branch(app): '''Returns branch of an app''' try: result = subprocess.check_output('cd ../apps/{0} && git rev-parse --abbrev-ref HEAD'.format(app), shell=True) result = safe_decode(result) result = result.strip() return result except Exception as e: return '' def get_app_last_commit_ref(app): try: result = subprocess.check_output('cd ../apps/{0} && git rev-parse HEAD --short 7'.format(app), shell=True) result = safe_decode(result) result = result.strip() return result except Exception as e: return '' def check_for_update(): updates = frappe._dict(major=[], minor=[], patch=[]) apps = get_versions() for app in apps: app_details = check_release_on_github(app) if not app_details: continue github_version, org_name = app_details # Get local instance's current version or the app branch_version = apps[app]['branch_version'].split(' ')[0] if apps[app].get('branch_version', '') else '' instance_version = Version(branch_version or apps[app].get('version')) # Compare and popup update message for update_type in updates: if github_version.__dict__[update_type] > instance_version.__dict__[update_type]: updates[update_type].append(frappe._dict( current_version = str(instance_version), available_version = str(github_version), org_name = org_name, app_name = app, title = apps[app]['title'], )) break if github_version.__dict__[update_type] < instance_version.__dict__[update_type]: break add_message_to_redis(updates) def parse_latest_non_beta_release(response): """ Pasrses the response JSON for all the releases and returns the latest non prerelease Parameters response (list): response object returned by github Returns json : json object pertaining to the latest non-beta release """ for release in response: if release['prerelease'] == True: continue return release def check_release_on_github(app): # Check if repo remote is on github from subprocess import CalledProcessError try: remote_url = subprocess.check_output("cd ../apps/{} && git ls-remote --get-url".format(app), shell=True).decode() except CalledProcessError: # Passing this since some apps may not have git initializaed in them return None if isinstance(remote_url, bytes): remote_url = remote_url.decode() if "github.com" not in remote_url: return None # Get latest version from github if 'https' not in remote_url: return None org_name = remote_url.split('/')[3] r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(org_name, app)) if r.status_code == 200 and r.json(): lastest_non_beta_release = parse_latest_non_beta_release(r.json()) return Version(lastest_non_beta_release['tag_name'].strip('v')), org_name else: # In case of an improper response or if there are no releases return None def add_message_to_redis(update_json): # "update-message" will store the update message string # "update-user-set" will be a set of users cache = frappe.cache() cache.set_value("update-info", json.dumps(update_json)) user_list = [x.name for x in frappe.get_all("User", filters={"enabled": True})] system_managers = [user for user in user_list if 'System Manager' in frappe.get_roles(user)] cache.sadd("update-user-set", *system_managers) @frappe.whitelist() def show_update_popup(): cache = frappe.cache() user = frappe.session.user update_info = cache.get_value("update-info") if not update_info: return updates = json.loads(update_info) current_versions = get_versions() # Check if user is int the set of users to send update message to update_message = "" if cache.sismember("update-user-set", user): for update_type in updates: release_links = "" for app in updates[update_type]: app = frappe._dict(app) release_links += "<a href='https://github.com/{org_name}/{app_name}/releases/tag/v{available_version}'><b>{title}</b>: v{available_version}</a><br>".format( available_version = app.available_version, org_name = app.org_name, app_name = app.app_name, title = app.title ) if release_links: update_message += _("New {} releases for the following apps are available".format(update_type)) + ":<br><br>{}".format(release_links) if update_message: frappe.msgprint(update_message, title=_("New updates are available"), indicator='green') cache.srem("update-user-set", user)
the-stack_0_4351
#!/usr/bin/env python # -*- coding: utf-8 -*- import vim import re import os import os.path from functools import wraps from .utils import * from .explorer import * from .manager import * from .mru import * from .devicons import ( webDevIconsGetFileTypeSymbol, webDevIconsStrLen, webDevIconsBytesLen, matchaddDevIconsDefault, matchaddDevIconsExact, matchaddDevIconsExtension, ) #***************************************************** # BufferExplorer #***************************************************** class BufferExplorer(Explorer): def __init__(self): self._prefix_length = 0 self._max_bufname_len = 0 def getContent(self, *args, **kwargs): mru_bufnrs = [] for num in reversed(lfEval("g:Lf_MruBufnrs")): if num not in mru_bufnrs: mru_bufnrs.append(int(num)) for num in reversed(mru_bufnrs): mru.setBufferTimestamp(num) lfCmd("let g:Lf_MruBufnrs = []") if "--all" not in kwargs.get("arguments", {}): if "--tabpage" not in kwargs.get("arguments", {}): buffers = {b.number: b for b in vim.buffers if lfEval("buflisted(%d)" % b.number) == '1'} else: buffers = {w.buffer.number: w.buffer for w in vim.current.tabpage.windows if lfEval("buflisted(%d)" % w.buffer.number) == '1'} else: if "--tabpage" not in kwargs.get("arguments", {}): buffers = {b.number: b for b in vim.buffers if os.path.basename(b.name) != "LeaderF"} else: buffers = {w.buffer.number: w.buffer for w in vim.current.tabpage.windows if os.path.basename(w.buffer.name) != "LeaderF"} # e.g., 12 u %a+-  aaa.txt bufnr_len = len(lfEval("bufnr('$')")) self._prefix_length = bufnr_len + 8 if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1': self._prefix_length += webDevIconsStrLen() self._max_bufname_len = max([int(lfEval("strdisplaywidth('%s')" % escQuote(getBasename(buffers[nr].name)))) for nr in mru.getMruBufnrs() if nr in buffers] + [len('[No Name]')] or [0]) bufnames = [] for nr in mru.getMruBufnrs(): if nr in buffers: buf_name = buffers[nr].name if not buf_name: buf_name = "[No Name]" if lfEval("g:Lf_ShowRelativePath") == '1': buf_name = lfRelpath(buf_name) basename = getBasename(buf_name) dirname = getDirname(buf_name) space_num = self._max_bufname_len \ - int(lfEval("strdisplaywidth('%s')" % escQuote(basename))) if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1': icon = webDevIconsGetFileTypeSymbol(basename) else: icon = '' # e.g., 12 u %a+-  aaa.txt buf_name = '{:{width}d} {:1s} {:1s}{:1s}{:1s}{:1s} {}{}{} "{}"'.format(nr, '' if buffers[nr].options["buflisted"] else 'u', '%' if int(lfEval("bufnr('%')")) == nr else '#' if int(lfEval("bufnr('#')")) == nr else '', 'a' if lfEval("bufwinnr(%d)" % nr) != '-1' else 'h', '+' if buffers[nr].options["modified"] else '', '-' if not buffers[nr].options["modifiable"] else '', icon, basename, ' ' * space_num, dirname if dirname else '.' + os.sep, width=bufnr_len) bufnames.append(buf_name) del buffers[nr] elif lfEval("bufnr(%d)" % nr) == '-1': mru.delMruBufnr(nr) return bufnames def getStlCategory(self): return 'Buffer' def getStlCurDir(self): return escQuote(lfEncode(os.getcwd())) def supportsNameOnly(self): return True def getPrefixLength(self): return self._prefix_length def getMaxBufnameLen(self): return self._max_bufname_len #***************************************************** # BufExplManager #***************************************************** class BufExplManager(Manager): def __init__(self): super(BufExplManager, self).__init__() def _getExplClass(self): return BufferExplorer def _defineMaps(self): lfCmd("call leaderf#Buffer#Maps()") def _acceptSelection(self, *args, **kwargs): if len(args) == 0: return line = args[0] buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line)) if kwargs.get("mode", '') == 't': buf_name = lfEval("bufname(%s)" % buf_number) lfCmd("tab drop %s" % escSpecial(buf_name)) else: if lfEval("get(g:, 'Lf_JumpToExistingWindow', 0)") == '1': buf_name = lfEval("bufname(%s)" % buf_number) lfCmd("hide drop %s" % escSpecial(buf_name)) else: lfCmd("hide buffer %d" % buf_number) def _getDigest(self, line, mode): """ specify what part in the line to be processed and highlighted Args: mode: 0, return the full path 1, return the name only 2, return the directory name """ if not line: return '' prefix_len = self._getExplorer().getPrefixLength() if mode == 0: return line[prefix_len:] elif mode == 1: buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line)) basename = getBasename(vim.buffers[buf_number].name) return basename if basename else "[No Name]" else: start_pos = line.find(' "') return line[start_pos+2 : -1] def _getDigestStartPos(self, line, mode): """ return the start position of the digest returned by _getDigest() Args: mode: 0, return the start postion of full path 1, return the start postion of name only 2, return the start postion of directory name """ if not line: return 0 prefix_len = self._getExplorer().getPrefixLength() - webDevIconsStrLen() + webDevIconsBytesLen() if mode == 0: return prefix_len elif mode == 1: return prefix_len else: buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line)) basename = getBasename(vim.buffers[buf_number].name) space_num = self._getExplorer().getMaxBufnameLen() \ - int(lfEval("strdisplaywidth('%s')" % escQuote(basename))) return prefix_len + lfBytesLen(basename) + space_num + 2 def _createHelp(self): help = [] help.append('" <CR>/<double-click>/o : open file under cursor') help.append('" x : open file under cursor in a horizontally split window') help.append('" v : open file under cursor in a vertically split window') help.append('" t : open file under cursor in a new tabpage') help.append('" d : wipe out buffer under cursor') help.append('" D : delete buffer under cursor') help.append('" i/<Tab> : switch to input mode') help.append('" q : quit') help.append('" <F1> : toggle this help') help.append('" ---------------------------------------------------------') return help def _afterEnter(self): super(BufExplManager, self)._afterEnter() winid = None if self._getInstance().getWinPos() == 'popup': lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufNumber'', ''^\s*\zs\d\+'')')""" % self._getInstance().getPopupWinId()) id = int(lfEval("matchid")) self._match_ids.append(id) lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufIndicators'', ''^\s*\d\+\s*\zsu\=\s*[#%%]\=...'')')""" % self._getInstance().getPopupWinId()) id = int(lfEval("matchid")) self._match_ids.append(id) lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufModified'', ''^\s*\d\+\s*u\=\s*[#%%]\=.+\s*\zs.*$'')')""" % self._getInstance().getPopupWinId()) id = int(lfEval("matchid")) self._match_ids.append(id) lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufNomodifiable'', ''^\s*\d\+\s*u\=\s*[#%%]\=..-\s*\zs.*$'')')""" % self._getInstance().getPopupWinId()) id = int(lfEval("matchid")) self._match_ids.append(id) lfCmd("""call win_execute(%d, 'let matchid = matchadd(''Lf_hl_bufDirname'', '' \zs".*"$'')')""" % self._getInstance().getPopupWinId()) id = int(lfEval("matchid")) self._match_ids.append(id) winid = self._getInstance().getPopupWinId() else: id = int(lfEval("matchadd('Lf_hl_bufNumber', '^\s*\zs\d\+')")) self._match_ids.append(id) id = int(lfEval("matchadd('Lf_hl_bufIndicators', '^\s*\d\+\s*\zsu\=\s*[#%]\=...')")) self._match_ids.append(id) id = int(lfEval("matchadd('Lf_hl_bufModified', '^\s*\d\+\s*u\=\s*[#%]\=.+\s*\zs.*$')")) self._match_ids.append(id) id = int(lfEval("matchadd('Lf_hl_bufNomodifiable', '^\s*\d\+\s*u\=\s*[#%]\=..-\s*\zs.*$')")) self._match_ids.append(id) id = int(lfEval('''matchadd('Lf_hl_bufDirname', ' \zs".*"$')''')) self._match_ids.append(id) # devicons if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == '1': self._match_ids.extend(matchaddDevIconsExtension(r'__icon__\ze\s\+\S\+\.__name__\($\|\s\)', winid)) self._match_ids.extend(matchaddDevIconsExact(r'__icon__\ze\s\+__name__\($\|\s\)', winid)) self._match_ids.extend(matchaddDevIconsDefault(r'__icon__\ze\s\+\S\+\($\|\s\)', winid)) def _beforeExit(self): super(BufExplManager, self)._beforeExit() def deleteBuffer(self, wipe=0): instance = self._getInstance() if self._inHelpLines(): return if instance.getWinPos() == 'popup': lfCmd("call win_execute(%d, 'setlocal modifiable')" % instance.getPopupWinId()) else: lfCmd("setlocal modifiable") line = instance._buffer_object[instance.window.cursor[0] - 1] if len(self._content) > 0: self._content.remove(line) self._getInstance().setStlTotal(len(self._content)//self._getUnit()) self._getInstance().setStlResultsCount(len(self._content)//self._getUnit()) buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line)) lfCmd("confirm %s %d" % ('bw' if wipe else 'bd', buf_number)) del instance._buffer_object[instance.window.cursor[0] - 1] if instance.getWinPos() == 'popup': instance.refreshPopupStatusline() lfCmd("call win_execute(%d, 'setlocal nomodifiable')" % instance.getPopupWinId()) else: lfCmd("setlocal nomodifiable") def _previewInPopup(self, *args, **kwargs): line = args[0] buf_number = int(re.sub(r"^.*?(\d+).*$", r"\1", line)) self._createPopupPreview(vim.buffers[buf_number].name, buf_number, 0) #***************************************************** # bufExplManager is a singleton #***************************************************** bufExplManager = BufExplManager() __all__ = ['bufExplManager']
the-stack_0_4352
#! /usr/bin/python # -*- coding: utf-8 -*- import base64 import gzip import json import math import os import pickle import re import shutil # import ast import sys import tarfile import time import zipfile import cloudpickle import h5py import numpy as np import scipy.io as sio from six.moves import cPickle import progressbar import tensorflow as tf import tensorlayer as tl from tensorflow.python.keras.saving import model_config as model_config_lib from tensorflow.python.platform import gfile from tensorflow.python.util import serialization from tensorflow.python.util.tf_export import keras_export from tensorlayer import logging, nlp, utils, visualize import cloudpickle import base64 from tensorflow.python.keras.saving import model_config as model_config_lib from tensorflow.python.util.tf_export import keras_export from tensorflow.python.util import serialization import json import datetime # from six.moves import zip if sys.version_info[0] == 2: from urllib import urlretrieve else: from urllib.request import urlretrieve # import tensorflow.contrib.eager.python.saver as tfes # TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver __all__ = [ 'assign_weights', 'del_file', 'del_folder', 'download_file_from_google_drive', 'exists_or_mkdir', 'file_exists', 'folder_exists', 'load_and_assign_npz', 'load_and_assign_npz_dict', 'load_ckpt', 'load_cropped_svhn', 'load_file_list', 'load_folder_list', 'load_npy_to_any', 'load_npz', 'maybe_download_and_extract', 'natural_keys', 'npz_to_W_pdf', 'read_file', 'save_any_to_npy', 'save_ckpt', 'save_npz', 'save_npz_dict', 'tf_variables_to_numpy', 'assign_tf_variable', 'save_weights_to_hdf5', 'load_hdf5_to_weights_in_order', 'load_hdf5_to_weights', 'save_hdf5_graph', 'load_hdf5_graph', # 'net2static_graph', 'static_graph2net', # 'save_pkl_graph', # 'load_pkl_graph', ] def func2str(expr): b = cloudpickle.dumps(expr) s = base64.b64encode(b).decode() return s def str2func(s): b = base64.b64decode(s) expr = cloudpickle.loads(b) return expr # def net2static_graph(network): # saved_file = dict() # # if network._NameNone is True: # # saved_file.update({"name": None}) # # else: # # saved_file.update({"name": network.name}) # # if not isinstance(network.inputs, list): # # saved_file.update({"inputs": network.inputs._info[0].name}) # # else: # # saved_inputs = [] # # for saved_input in network.inputs: # # saved_inputs.append(saved_input._info[0].name) # # saved_file.update({"inputs": saved_inputs}) # # if not isinstance(network.outputs, list): # # saved_file.update({"outputs": network.outputs._info[0].name}) # # else: # # saved_outputs = [] # # for saved_output in network.outputs: # # saved_outputs.append(saved_output._info[0].name) # # saved_file.update({"outputs": saved_outputs}) # saved_file.update({"config": network.config}) # # return saved_file @keras_export('keras.models.save_model') def save_keras_model(model): # f.attrs['keras_model_config'] = json.dumps( # { # 'class_name': model.__class__.__name__, # 'config': model.get_config() # }, # default=serialization.get_json_type).encode('utf8') # # f.flush() return json.dumps( { 'class_name': model.__class__.__name__, 'config': model.get_config() }, default=serialization.get_json_type ).encode('utf8') @keras_export('keras.models.load_model') def load_keras_model(model_config): custom_objects = {} if model_config is None: raise ValueError('No model found in config.') model_config = json.loads(model_config.decode('utf-8')) model = model_config_lib.model_from_config(model_config, custom_objects=custom_objects) return model def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False, customized_data=None): """Save the architecture of TL model into a hdf5 file. Support saving model weights. Parameters ----------- network : TensorLayer Model. The network to save. filepath : str The name of model file. save_weights : bool Whether to save model weights. customized_data : dict The user customized meta data. Examples -------- >>> # Save the architecture (with parameters) >>> tl.files.save_hdf5_graph(network, filepath='model.hdf5', save_weights=True) >>> # Save the architecture (without parameters) >>> tl.files.save_hdf5_graph(network, filepath='model.hdf5', save_weights=False) >>> # Load the architecture in another script (no parameters restore) >>> net = tl.files.load_hdf5_graph(filepath='model.hdf5', load_weights=False) >>> # Load the architecture in another script (restore parameters) >>> net = tl.files.load_hdf5_graph(filepath='model.hdf5', load_weights=True) """ if network.outputs is None: raise RuntimeError("save_hdf5_graph not support dynamic mode yet") logging.info("[*] Saving TL model into {}, saving weights={}".format(filepath, save_weights)) model_config = network.config # net2static_graph(network) model_config["version_info"]["save_date"] = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc ).isoformat() model_config_str = str(model_config) customized_data_str = str(customized_data) # version_info = { # "tensorlayer_version": tl.__version__, # "backend": "tensorflow", # "backend_version": tf.__version__, # "training_device": "gpu", # "save_date": datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() # } # version_info_str = str(version_info) with h5py.File(filepath, 'w') as f: f.attrs["model_config"] = model_config_str.encode('utf8') f.attrs["customized_data"] = customized_data_str.encode('utf8') # f.attrs["version_info"] = version_info_str.encode('utf8') if save_weights: _save_weights_to_hdf5_group(f, network.all_layers) f.flush() logging.info("[*] Saved TL model into {}, saving weights={}".format(filepath, save_weights)) def generate_func(args): for key in args: if isinstance(args[key], tuple) and args[key][0] == 'is_Func': fn = str2func(args[key][1]) args[key] = fn # if key in ['act']: # # fn_dict = args[key] # # module_path = fn_dict['module_path'] # # func_name = fn_dict['func_name'] # # lib = importlib.import_module(module_path) # # fn = getattr(lib, func_name) # # args[key] = fn # fn = str2func(args[key]) # args[key] = fn # elif key in ['fn']: # fn = str2func(args[key]) # args[key] = fn def eval_layer(layer_kwargs): layer_class = layer_kwargs.pop('class') args = layer_kwargs['args'] layer_type = args.pop('layer_type') if layer_type == "normal": generate_func(args) return eval('tl.layers.' + layer_class)(**args) elif layer_type == "layerlist": ret_layer = [] layers = args["layers"] for layer_graph in layers: ret_layer.append(eval_layer(layer_graph)) args['layers'] = ret_layer return eval('tl.layers.' + layer_class)(**args) elif layer_type == "modellayer": M = static_graph2net(args['model']) args['model'] = M return eval('tl.layers.' + layer_class)(**args) elif layer_type == "keraslayer": M = load_keras_model(args['fn']) input_shape = args.pop('keras_input_shape') _ = M(np.random.random(input_shape).astype(np.float32)) args['fn'] = M args['fn_weights'] = M.trainable_variables return eval('tl.layers.' + layer_class)(**args) else: raise RuntimeError("Unknown layer type.") def static_graph2net(model_config): layer_dict = {} model_name = model_config["name"] inputs_tensors = model_config["inputs"] outputs_tensors = model_config["outputs"] all_args = model_config["model_architecture"] for idx, layer_kwargs in enumerate(all_args): layer_class = layer_kwargs["class"] # class of current layer prev_layers = layer_kwargs.pop("prev_layer") # name of previous layers net = eval_layer(layer_kwargs) if layer_class in tl.layers.inputs.__all__: net = net._nodes[0].out_tensors[0] if prev_layers is not None: for prev_layer in prev_layers: if not isinstance(prev_layer, list): output = net(layer_dict[prev_layer]) layer_dict[output._info[0].name] = output else: list_layers = [layer_dict[layer] for layer in prev_layer] output = net(list_layers) layer_dict[output._info[0].name] = output else: layer_dict[net._info[0].name] = net if not isinstance(inputs_tensors, list): model_inputs = layer_dict[inputs_tensors] else: model_inputs = [] for inputs_tensor in inputs_tensors: model_inputs.append(layer_dict[inputs_tensor]) if not isinstance(outputs_tensors, list): model_outputs = layer_dict[outputs_tensors] else: model_outputs = [] for outputs_tensor in outputs_tensors: model_outputs.append(layer_dict[outputs_tensor]) from tensorlayer.models import Model M = Model(inputs=model_inputs, outputs=model_outputs, name=model_name) logging.info("[*] Load graph finished") return M def load_hdf5_graph(filepath='model.hdf5', load_weights=False): """Restore TL model archtecture from a a pickle file. Support loading model weights. Parameters ----------- filepath : str The name of model file. load_weights : bool Whether to load model weights. Returns -------- network : TensorLayer Model. Examples -------- - see ``tl.files.save_hdf5_graph`` """ logging.info("[*] Loading TL model from {}, loading weights={}".format(filepath, load_weights)) f = h5py.File(filepath, 'r') model_config_str = f.attrs["model_config"].decode('utf8') model_config = eval(model_config_str) # version_info_str = f.attrs["version_info"].decode('utf8') # version_info = eval(version_info_str) version_info = model_config["version_info"] backend_version = version_info["backend_version"] tensorlayer_version = version_info["tensorlayer_version"] if backend_version != tf.__version__: logging.warning( "Saved model uses tensorflow version {}, but now you are using tensorflow version {}".format( backend_version, tf.__version__ ) ) if tensorlayer_version != tl.__version__: logging.warning( "Saved model uses tensorlayer version {}, but now you are using tensorlayer version {}".format( tensorlayer_version, tl.__version__ ) ) M = static_graph2net(model_config) if load_weights: if not ('layer_names' in f.attrs.keys()): raise RuntimeError("Saved model does not contain weights.") M.load_weights(filepath=filepath) f.close() logging.info("[*] Loaded TL model from {}, loading weights={}".format(filepath, load_weights)) return M # def load_pkl_graph(name='model.pkl'): # """Restore TL model archtecture from a a pickle file. No parameters be restored. # # Parameters # ----------- # name : str # The name of graph file. # # Returns # -------- # network : TensorLayer Model. # # Examples # -------- # >>> # It is better to use load_hdf5_graph # """ # logging.info("[*] Loading TL graph from {}".format(name)) # with open(name, 'rb') as file: # saved_file = pickle.load(file) # # M = static_graph2net(saved_file) # # return M # # # def save_pkl_graph(network, name='model.pkl'): # """Save the architecture of TL model into a pickle file. No parameters be saved. # # Parameters # ----------- # network : TensorLayer layer # The network to save. # name : str # The name of graph file. # # Example # -------- # >>> # It is better to use save_hdf5_graph # """ # if network.outputs is None: # raise AssertionError("save_graph not support dynamic mode yet") # # logging.info("[*] Saving TL graph into {}".format(name)) # # saved_file = net2static_graph(network) # # with open(name, 'wb') as file: # pickle.dump(saved_file, file, protocol=pickle.HIGHEST_PROTOCOL) # logging.info("[*] Saved graph") # Load dataset functions def load_mnist_dataset(shape=(-1, 784), path='data'): """Load the original mnist. Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively. Parameters ---------- shape : tuple The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)). path : str The path that the data is downloaded to. Returns ------- X_train, y_train, X_val, y_val, X_test, y_test: tuple Return splitted training/validation/test set respectively. Examples -------- >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets') >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) """ return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') def load_fashion_mnist_dataset(shape=(-1, 784), path='data'): """Load the fashion mnist. Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples <http://marubon-ds.blogspot.co.uk/2017/09/fashion-mnist-exploring.html>`__. Parameters ---------- shape : tuple The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)). path : str The path that the data is downloaded to. Returns ------- X_train, y_train, X_val, y_val, X_test, y_test: tuple Return splitted training/validation/test set respectively. Examples -------- >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets') >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1)) """ return _load_mnist_dataset( shape, path, name='fashion_mnist', url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' ) def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): """A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). """ path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False): """Load CIFAR-10 dataset. It consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. Parameters ---------- shape : tupe The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3). path : str The path that the data is downloaded to, defaults is ``data/cifar10/``. plotable : boolean Whether to plot some image examples, False as default. Examples -------- >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3)) References ---------- - `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__ - `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__ - `<https://teratail.com/questions/28932>`__ """ path = os.path.join(path, 'cifar10') logging.info("Load or Download cifar10 > {}".format(path)) # Helper function to unpickle the data def unpickle(file): fp = open(file, 'rb') if sys.version_info.major == 2: data = pickle.load(fp) elif sys.version_info.major == 3: data = pickle.load(fp, encoding='latin-1') fp.close() return data filename = 'cifar-10-python.tar.gz' url = 'https://www.cs.toronto.edu/~kriz/' # Download and uncompress file maybe_download_and_extract(filename, path, url, extract=True) # Unpickle file and fill in data X_train = None y_train = [] for i in range(1, 6): data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i))) if i == 1: X_train = data_dic['data'] else: X_train = np.vstack((X_train, data_dic['data'])) y_train += data_dic['labels'] test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch")) X_test = test_data_dic['data'] y_test = np.array(test_data_dic['labels']) if shape == (-1, 3, 32, 32): X_test = X_test.reshape(shape) X_train = X_train.reshape(shape) elif shape == (-1, 32, 32, 3): X_test = X_test.reshape(shape, order='F') X_train = X_train.reshape(shape, order='F') X_test = np.transpose(X_test, (0, 2, 1, 3)) X_train = np.transpose(X_train, (0, 2, 1, 3)) else: X_test = X_test.reshape(shape) X_train = X_train.reshape(shape) y_train = np.array(y_train) if plotable: if sys.platform.startswith('darwin'): import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt logging.info('\nCIFAR-10') fig = plt.figure(1) logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape) plt.ion() # interactive mode count = 1 for _ in range(10): # each row for _ in range(10): # each column _ = fig.add_subplot(10, 10, count) if shape == (-1, 3, 32, 32): # plt.imshow(X_train[count-1], interpolation='nearest') plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest') # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest') elif shape == (-1, 32, 32, 3): plt.imshow(X_train[count - 1], interpolation='nearest') # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest') else: raise Exception("Do not support the given 'shape' to plot the image examples") plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick) plt.gca().yaxis.set_major_locator(plt.NullLocator()) count = count + 1 plt.draw() # interactive mode plt.pause(3) # interactive mode logging.info("X_train: %s" % X_train.shape) logging.info("y_train: %s" % y_train.shape) logging.info("X_test: %s" % X_test.shape) logging.info("y_test: %s" % y_test.shape) X_train = np.asarray(X_train, dtype=np.float32) X_test = np.asarray(X_test, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_test, y_test def load_cropped_svhn(path='data', include_extra=True): """Load Cropped SVHN. The Cropped Street View House Numbers (SVHN) Dataset contains 32x32x3 RGB images. Digit '1' has label 1, '9' has label 9 and '0' has label 0 (the original dataset uses 10 to represent '0'), see `ufldl website <http://ufldl.stanford.edu/housenumbers/>`__. Parameters ---------- path : str The path that the data is downloaded to. include_extra : boolean If True (default), add extra images to the training set. Returns ------- X_train, y_train, X_test, y_test: tuple Return splitted training/test set respectively. Examples --------- >>> X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False) >>> tl.vis.save_images(X_train[0:100], [10, 10], 'svhn.png') """ start_time = time.time() path = os.path.join(path, 'cropped_svhn') logging.info("Load or Download Cropped SVHN > {} | include extra images: {}".format(path, include_extra)) url = "http://ufldl.stanford.edu/housenumbers/" np_file = os.path.join(path, "train_32x32.npz") if file_exists(np_file) is False: filename = "train_32x32.mat" filepath = maybe_download_and_extract(filename, path, url) mat = sio.loadmat(filepath) X_train = mat['X'] / 255.0 # to [0, 1] X_train = np.transpose(X_train, (3, 0, 1, 2)) y_train = np.squeeze(mat['y'], axis=1) y_train[y_train == 10] = 0 # replace 10 to 0 np.savez(np_file, X=X_train, y=y_train) del_file(filepath) else: v = np.load(np_file, allow_pickle=True) X_train = v['X'] y_train = v['y'] logging.info(" n_train: {}".format(len(y_train))) np_file = os.path.join(path, "test_32x32.npz") if file_exists(np_file) is False: filename = "test_32x32.mat" filepath = maybe_download_and_extract(filename, path, url) mat = sio.loadmat(filepath) X_test = mat['X'] / 255.0 X_test = np.transpose(X_test, (3, 0, 1, 2)) y_test = np.squeeze(mat['y'], axis=1) y_test[y_test == 10] = 0 np.savez(np_file, X=X_test, y=y_test) del_file(filepath) else: v = np.load(np_file, allow_pickle=True) X_test = v['X'] y_test = v['y'] logging.info(" n_test: {}".format(len(y_test))) if include_extra: logging.info(" getting extra 531131 images, please wait ...") np_file = os.path.join(path, "extra_32x32.npz") if file_exists(np_file) is False: logging.info(" the first time to load extra images will take long time to convert the file format ...") filename = "extra_32x32.mat" filepath = maybe_download_and_extract(filename, path, url) mat = sio.loadmat(filepath) X_extra = mat['X'] / 255.0 X_extra = np.transpose(X_extra, (3, 0, 1, 2)) y_extra = np.squeeze(mat['y'], axis=1) y_extra[y_extra == 10] = 0 np.savez(np_file, X=X_extra, y=y_extra) del_file(filepath) else: v = np.load(np_file, allow_pickle=True) X_extra = v['X'] y_extra = v['y'] # print(X_train.shape, X_extra.shape) logging.info(" adding n_extra {} to n_train {}".format(len(y_extra), len(y_train))) t = time.time() X_train = np.concatenate((X_train, X_extra), 0) y_train = np.concatenate((y_train, y_extra), 0) # X_train = np.append(X_train, X_extra, axis=0) # y_train = np.append(y_train, y_extra, axis=0) logging.info(" added n_extra {} to n_train {} took {}s".format(len(y_extra), len(y_train), time.time() - t)) else: logging.info(" no extra images are included") logging.info(" image size: %s n_train: %d n_test: %d" % (str(X_train.shape[1:4]), len(y_train), len(y_test))) logging.info(" took: {}s".format(int(time.time() - start_time))) return X_train, y_train, X_test, y_test def load_ptb_dataset(path='data'): """Load Penn TreeBank (PTB) dataset. It is used in many LANGUAGE MODELING papers, including "Empirical Evaluation and Combination of Advanced Language Modeling Techniques", "Recurrent Neural Network Regularization". It consists of 929k training words, 73k validation words, and 82k test words. It has 10k words in its vocabulary. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/ptb/``. Returns -------- train_data, valid_data, test_data : list of int The training, validating and testing data in integer format. vocab_size : int The vocabulary size. Examples -------- >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset() References --------------- - ``tensorflow.models.rnn.ptb import reader`` - `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__ Notes ------ - If you want to get the raw data, see the source code. """ path = os.path.join(path, 'ptb') logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) # Maybe dowload and uncompress tar, or load exsisting files filename = 'simple-examples.tgz' url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/' maybe_download_and_extract(filename, path, url, extract=True) data_path = os.path.join(path, 'simple-examples', 'data') train_path = os.path.join(data_path, "ptb.train.txt") valid_path = os.path.join(data_path, "ptb.valid.txt") test_path = os.path.join(data_path, "ptb.test.txt") word_to_id = nlp.build_vocab(nlp.read_words(train_path)) train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id) valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id) test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id) vocab_size = len(word_to_id) # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>'] # logging.info(train_data) # ... 214, 5, 23, 1, 2] # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... } # logging.info(vocabulary) # 10000 # exit() return train_data, valid_data, test_data, vocab_size def load_matt_mahoney_text8_dataset(path='data'): """Load Matt Mahoney's dataset. Download a text file from Matt Mahoney's website if not present, and make sure it's the right size. Extract the first file enclosed in a zip file as a list of words. This dataset can be used for Word Embedding. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/mm_test8/``. Returns -------- list of str The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...] Examples -------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> print('Data size', len(words)) """ path = os.path.join(path, 'mm_test8') logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path)) filename = 'text8.zip' url = 'http://mattmahoney.net/dc/' maybe_download_and_extract(filename, path, url, expected_bytes=31344016) with zipfile.ZipFile(os.path.join(path, filename)) as f: word_list = f.read(f.namelist()[0]).split() for idx, _ in enumerate(word_list): word_list[idx] = word_list[idx].decode() return word_list def load_imdb_dataset( path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3 ): """Load IMDB dataset. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/imdb/``. nb_words : int Number of words to get. skip_top : int Top most frequent words to ignore (they will appear as oov_char value in the sequence data). maxlen : int Maximum sequence length. Any longer sequence will be truncated. seed : int Seed for reproducible data shuffling. start_char : int The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char : int Words that were cut out because of the num_words or skip_top limit will be replaced with this character. index_from : int Index actual words with this index and higher. Examples -------- >>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset( ... nb_words=20000, test_split=0.2) >>> print('X_train.shape', X_train.shape) (20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..] >>> print('y_train.shape', y_train.shape) (20000,) [1 0 0 ..., 1 0 1] References ----------- - `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__ """ path = os.path.join(path, 'imdb') filename = "imdb.pkl" url = 'https://s3.amazonaws.com/text-datasets/' maybe_download_and_extract(filename, path, url) if filename.endswith(".gz"): f = gzip.open(os.path.join(path, filename), 'rb') else: f = open(os.path.join(path, filename), 'rb') X, labels = cPickle.load(f) f.close() np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(labels) if start_char is not None: X = [[start_char] + [w + index_from for w in x] for x in X] elif index_from: X = [[w + index_from for w in x] for x in X] if maxlen: new_X = [] new_labels = [] for x, y in zip(X, labels): if len(x) < maxlen: new_X.append(x) new_labels.append(y) X = new_X labels = new_labels if not X: raise Exception( 'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. ' 'Increase maxlen.' ) if not nb_words: nb_words = max([max(x) for x in X]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X] else: nX = [] for x in X: nx = [] for w in x: if (w >= nb_words or w < skip_top): nx.append(w) nX.append(nx) X = nX X_train = np.array(X[:int(len(X) * (1 - test_split))]) y_train = np.array(labels[:int(len(X) * (1 - test_split))]) X_test = np.array(X[int(len(X) * (1 - test_split)):]) y_test = np.array(labels[int(len(X) * (1 - test_split)):]) return X_train, y_train, X_test, y_test def load_nietzsche_dataset(path='data'): """Load Nietzsche dataset. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/nietzsche/``. Returns -------- str The content. Examples -------- >>> see tutorial_generate_text.py >>> words = tl.files.load_nietzsche_dataset() >>> words = basic_clean_str(words) >>> words = words.split() """ logging.info("Load or Download nietzsche dataset > {}".format(path)) path = os.path.join(path, 'nietzsche') filename = "nietzsche.txt" url = 'https://s3.amazonaws.com/text-datasets/' filepath = maybe_download_and_extract(filename, path, url) with open(filepath, "r") as f: words = f.read() return words def load_wmt_en_fr_dataset(path='data'): """Load WMT'15 English-to-French translation dataset. It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- - Code modified from /tensorflow/models/rnn/translation/data_utils.py Notes ----- Usually, it will take a long time to download this dataset. """ path = os.path.join(path, 'wmt_en_fr') # URLs for WMT data. _WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/" _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/" def gunzip_file(gz_path, new_path): """Unzips from gz_path into new_path.""" logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: new_file.write(line) def get_wmt_enfr_train_set(path): """Download the WMT en-fr training corpus to directory unless it's there.""" filename = "training-giga-fren.tar" maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True) train_path = os.path.join(path, "giga-fren.release2.fixed") gunzip_file(train_path + ".fr.gz", train_path + ".fr") gunzip_file(train_path + ".en.gz", train_path + ".en") return train_path def get_wmt_enfr_dev_set(path): """Download the WMT en-fr training corpus to directory unless it's there.""" filename = "dev-v2.tgz" dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False) dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. en_dev_file.name = dev_name + ".en" dev_tar.extract(fr_dev_file, path) dev_tar.extract(en_dev_file, path) return dev_path logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) return train_path, dev_path def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False): """Load Flickr25K dataset. Returns a list of images by a given tag from Flick25k dataset, it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__ at the first time you use it. Parameters ------------ tag : str or None What images to return. - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__. - If you want to get all images, set to ``None``. path : str The path that the data is downloaded to, defaults is ``data/flickr25k/``. n_threads : int The number of thread to read image. printable : boolean Whether to print infomation when reading images, default is ``False``. Examples ----------- Get images with tag of sky >>> images = tl.files.load_flickr25k_dataset(tag='sky') Get all images >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True) """ path = os.path.join(path, 'flickr25k') filename = 'mirflickr25k.zip' url = 'http://press.liacs.nl/mirflickr/mirflickr25k/' # download dataset if folder_exists(os.path.join(path, "mirflickr")) is False: logging.info("[*] Flickr25k is nonexistent in {}".format(path)) maybe_download_and_extract(filename, path, url, extract=True) del_file(os.path.join(path, filename)) # return images by the given tag. # 1. image path list folder_imgs = os.path.join(path, "mirflickr") path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) path_imgs.sort(key=natural_keys) # 2. tag path list folder_tags = os.path.join(path, "mirflickr", "meta", "tags") path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False) path_tags.sort(key=natural_keys) # 3. select images if tag is None: logging.info("[Flickr25k] reading all images") else: logging.info("[Flickr25k] reading images with tag: {}".format(tag)) images_list = [] for idx, _v in enumerate(path_tags): tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n') # logging.info(idx+1, tags) if tag is None or tag in tags: images_list.append(path_imgs[idx]) images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable) return images def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printable=False): """Load Flick1M dataset. Returns a list of images by a given tag from Flickr1M dataset, it will download Flickr1M from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__ at the first time you use it. Parameters ------------ tag : str or None What images to return. - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__. - If you want to get all images, set to ``None``. size : int integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10. path : str The path that the data is downloaded to, defaults is ``data/flickr25k/``. n_threads : int The number of thread to read image. printable : boolean Whether to print infomation when reading images, default is ``False``. Examples ---------- Use 200k images >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2) Use 1 Million images >>> images = tl.files.load_flickr1M_dataset(tag='zebra') """ path = os.path.join(path, 'flickr1M') logging.info("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000)) images_zip = [ 'images0.zip', 'images1.zip', 'images2.zip', 'images3.zip', 'images4.zip', 'images5.zip', 'images6.zip', 'images7.zip', 'images8.zip', 'images9.zip' ] tag_zip = 'tags.zip' url = 'http://press.liacs.nl/mirflickr/mirflickr1m/' # download dataset for image_zip in images_zip[0:size]: image_folder = image_zip.split(".")[0] # logging.info(path+"/"+image_folder) if folder_exists(os.path.join(path, image_folder)) is False: # logging.info(image_zip) logging.info("[Flickr1M] {} is missing in {}".format(image_folder, path)) maybe_download_and_extract(image_zip, path, url, extract=True) del_file(os.path.join(path, image_zip)) # os.system("mv {} {}".format(os.path.join(path, 'images'), os.path.join(path, image_folder))) shutil.move(os.path.join(path, 'images'), os.path.join(path, image_folder)) else: logging.info("[Flickr1M] {} exists in {}".format(image_folder, path)) # download tag if folder_exists(os.path.join(path, "tags")) is False: logging.info("[Flickr1M] tag files is nonexistent in {}".format(path)) maybe_download_and_extract(tag_zip, path, url, extract=True) del_file(os.path.join(path, tag_zip)) else: logging.info("[Flickr1M] tags exists in {}".format(path)) # 1. image path list images_list = [] images_folder_list = [] for i in range(0, size): images_folder_list += load_folder_list(path=os.path.join(path, 'images%d' % i)) images_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd for folder in images_folder_list[0:size * 10]: tmp = load_file_list(path=folder, regx='\\.jpg', printable=False) tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.jpg images_list.extend([os.path.join(folder, x) for x in tmp]) # 2. tag path list tag_list = [] tag_folder_list = load_folder_list(os.path.join(path, "tags")) # tag_folder_list.sort(key=lambda s: int(s.split("/")[-1])) # folder/images/ddd tag_folder_list.sort(key=lambda s: int(os.path.basename(s))) for folder in tag_folder_list[0:size * 10]: tmp = load_file_list(path=folder, regx='\\.txt', printable=False) tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.txt tmp = [os.path.join(folder, s) for s in tmp] tag_list += tmp # 3. select images logging.info("[Flickr1M] searching tag: {}".format(tag)) select_images_list = [] for idx, _val in enumerate(tag_list): tags = read_file(tag_list[idx]).split('\n') if tag in tags: select_images_list.append(images_list[idx]) logging.info("[Flickr1M] reading images with tag: {}".format(tag)) images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable) return images def load_cyclegan_dataset(filename='summer2winter_yosemite', path='data'): """Load images from CycleGAN's database, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__. Parameters ------------ filename : str The dataset you want, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__. path : str The path that the data is downloaded to, defaults is `data/cyclegan` Examples --------- >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite') """ path = os.path.join(path, 'cyclegan') url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/' if folder_exists(os.path.join(path, filename)) is False: logging.info("[*] {} is nonexistent in {}".format(filename, path)) maybe_download_and_extract(filename + '.zip', path, url, extract=True) del_file(os.path.join(path, filename + '.zip')) def load_image_from_folder(path): path_imgs = load_file_list(path=path, regx='\\.jpg', printable=False) return visualize.read_images(path_imgs, path=path, n_threads=10, printable=False) im_train_A = load_image_from_folder(os.path.join(path, filename, "trainA")) im_train_B = load_image_from_folder(os.path.join(path, filename, "trainB")) im_test_A = load_image_from_folder(os.path.join(path, filename, "testA")) im_test_B = load_image_from_folder(os.path.join(path, filename, "testB")) def if_2d_to_3d(images): # [h, w] --> [h, w, 3] for i, _v in enumerate(images): if len(images[i].shape) == 2: images[i] = images[i][:, :, np.newaxis] images[i] = np.tile(images[i], (1, 1, 3)) return images im_train_A = if_2d_to_3d(im_train_A) im_train_B = if_2d_to_3d(im_train_B) im_test_A = if_2d_to_3d(im_test_A) im_test_B = if_2d_to_3d(im_test_B) return im_train_A, im_train_B, im_test_A, im_test_B def download_file_from_google_drive(ID, destination): """Download file from Google Drive. See ``tl.files.load_celebA_dataset`` for example. Parameters -------------- ID : str The driver ID. destination : str The destination for save file. """ try: from tqdm import tqdm except ImportError as e: print(e) raise ImportError("Module tqdm not found. Please install tqdm via pip or other package managers.") try: import requests except ImportError as e: print(e) raise ImportError("Module requests not found. Please install requests via pip or other package managers.") def save_response_content(response, destination, chunk_size=32 * 1024): total_size = int(response.headers.get('content-length', 0)) with open(destination, "wb") as f: for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination): if chunk: # filter out keep-alive new chunks f.write(chunk) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': ID}, stream=True) token = get_confirm_token(response) if token: params = {'id': ID, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination) def load_celebA_dataset(path='data'): """Load CelebA dataset Return a list of image path. Parameters ----------- path : str The path that the data is downloaded to, defaults is ``data/celebA/``. """ data_dir = 'celebA' filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM" save_path = os.path.join(path, filename) image_path = os.path.join(path, data_dir) if os.path.exists(image_path): logging.info('[*] {} already exists'.format(save_path)) else: exists_or_mkdir(path) download_file_from_google_drive(drive_id, save_path) zip_dir = '' with zipfile.ZipFile(save_path) as zf: zip_dir = zf.namelist()[0] zf.extractall(path) os.remove(save_path) os.rename(os.path.join(path, zip_dir), image_path) data_files = load_file_list(path=image_path, regx='\\.jpg', printable=False) for i, _v in enumerate(data_files): data_files[i] = os.path.join(image_path, data_files[i]) return data_files def load_voc_dataset(path='data', dataset='2012', contain_classes_in_person=False): """Pascal VOC 2007/2012 Dataset. It has 20 objects: aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor and additional 3 classes : head, hand, foot for person. Parameters ----------- path : str The path that the data is downloaded to, defaults is ``data/VOC``. dataset : str The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`. contain_classes_in_person : boolean Whether include head, hand and foot annotation, default is False. Returns --------- imgs_file_list : list of str Full paths of all images. imgs_semseg_file_list : list of str Full paths of all maps for semantic segmentation. Note that not all images have this map! imgs_insseg_file_list : list of str Full paths of all maps for instance segmentation. Note that not all images have this map! imgs_ann_file_list : list of str Full paths of all annotations for bounding box and object class, all images have this annotations. classes : list of str Classes in order. classes_in_person : list of str Classes in person. classes_dict : dictionary Class label to integer. n_objs_list : list of int Number of objects in all images in ``imgs_file_list`` in order. objs_info_list : list of str Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format. objs_info_dicts : dictionary The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``, format from `TensorFlow/Models/object-detection <https://github.com/tensorflow/models/blob/master/object_detection/create_pascal_tf_record.py>`__. Examples ---------- >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, >>> classes, classes_in_person, classes_dict, >>> n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset="2012", contain_classes_in_person=False) >>> idx = 26 >>> print(classes) ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] >>> print(classes_dict) {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13} >>> print(imgs_file_list[idx]) data/VOC/VOC2012/JPEGImages/2007_000423.jpg >>> print(n_objs_list[idx]) 2 >>> print(imgs_ann_file_list[idx]) data/VOC/VOC2012/Annotations/2007_000423.xml >>> print(objs_info_list[idx]) 14 0.173 0.461333333333 0.142 0.496 14 0.828 0.542666666667 0.188 0.594666666667 >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx]) >>> print(ann) [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]] >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann) >>> print(c, b) [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]] References ------------- - `Pascal VOC2012 Website <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#devkit>`__. - `Pascal VOC2007 Website <http://host.robots.ox.ac.uk/pascal/VOC/voc2007/>`__. """ import xml.etree.ElementTree as ET try: import lxml.etree as etree except ImportError as e: print(e) raise ImportError("Module lxml not found. Please install lxml via pip or other package managers.") path = os.path.join(path, 'VOC') def _recursive_parse_xml_to_dict(xml): """Recursively parses XML contents to python dict. We assume that `object` tags are the only ones that can appear multiple times at the same level of a tree. Args: xml: xml tree obtained by parsing XML file contents using lxml.etree Returns: Python dictionary holding XML contents. """ if not xml: # if xml is not None: return {xml.tag: xml.text} result = {} for child in xml: child_result = _recursive_parse_xml_to_dict(child) if child.tag != 'object': result[child.tag] = child_result[child.tag] else: if child.tag not in result: result[child.tag] = [] result[child.tag].append(child_result[child.tag]) return {xml.tag: result} if dataset == "2012": url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/" tar_filename = "VOCtrainval_11-May-2012.tar" extracted_filename = "VOC2012" # "VOCdevkit/VOC2012" logging.info(" [============= VOC 2012 =============]") elif dataset == "2012test": extracted_filename = "VOC2012test" # "VOCdevkit/VOC2012" logging.info(" [============= VOC 2012 Test Set =============]") logging.info( " \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n" ) time.sleep(3) if os.path.isdir(os.path.join(path, extracted_filename)) is False: logging.info("For VOC 2012 Test data - online registration required") logging.info( " Please download VOC2012test.tar from: \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar" ) logging.info(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path) exit() # # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar # url = "http://host.robots.ox.ac.uk:8080/eval/downloads/" # tar_filename = "VOC2012test.tar" elif dataset == "2007": url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" tar_filename = "VOCtrainval_06-Nov-2007.tar" extracted_filename = "VOC2007" logging.info(" [============= VOC 2007 =============]") elif dataset == "2007test": # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" tar_filename = "VOCtest_06-Nov-2007.tar" extracted_filename = "VOC2007test" logging.info(" [============= VOC 2007 Test Set =============]") else: raise Exception("Please set the dataset aug to 2012, 2012test or 2007.") # download dataset if dataset != "2012test": _platform = sys.platform if folder_exists(os.path.join(path, extracted_filename)) is False: logging.info("[VOC] {} is nonexistent in {}".format(extracted_filename, path)) maybe_download_and_extract(tar_filename, path, url, extract=True) del_file(os.path.join(path, tar_filename)) if dataset == "2012": if _platform == "win32": os.system("mv {}\VOCdevkit\VOC2012 {}\VOC2012".format(path, path)) else: os.system("mv {}/VOCdevkit/VOC2012 {}/VOC2012".format(path, path)) elif dataset == "2007": if _platform == "win32": os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007".format(path, path)) else: os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007".format(path, path)) elif dataset == "2007test": if _platform == "win32": os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007test".format(path, path)) else: os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(path, path)) del_folder(os.path.join(path, 'VOCdevkit')) # object classes(labels) NOTE: YOU CAN CUSTOMIZE THIS LIST classes = [ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor" ] if contain_classes_in_person: classes_in_person = ["head", "hand", "foot"] else: classes_in_person = [] classes += classes_in_person # use extra 3 classes for person classes_dict = utils.list_string_to_dict(classes) logging.info("[VOC] object classes {}".format(classes_dict)) # 1. image path list # folder_imgs = path+"/"+extracted_filename+"/JPEGImages/" folder_imgs = os.path.join(path, extracted_filename, "JPEGImages") imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) logging.info("[VOC] {} images found".format(len(imgs_file_list))) imgs_file_list.sort( key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]) ) # 2007_000027.jpg --> 2007000027 imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list] # logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1]) if dataset != "2012test": # ======== 2. semantic segmentation maps path list # folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/" folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass") imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False) logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list))) imgs_semseg_file_list.sort( key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]) ) # 2007_000032.png --> 2007000032 imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list] # logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1]) # ======== 3. instance segmentation maps path list # folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/" folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject") imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False) logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list))) imgs_insseg_file_list.sort( key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]) ) # 2007_000032.png --> 2007000032 imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list] # logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1]) else: imgs_semseg_file_list = [] imgs_insseg_file_list = [] # 4. annotations for bounding box and object class # folder_ann = path+"/"+extracted_filename+"/Annotations/" folder_ann = os.path.join(path, extracted_filename, "Annotations") imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False) logging.info( "[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list)) ) imgs_ann_file_list.sort( key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]) ) # 2007_000027.xml --> 2007000027 imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list] # logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1]) if dataset == "2012test": # remove unused images in JPEG folder imgs_file_list_new = [] for ann in imgs_ann_file_list: ann = os.path.split(ann)[-1].split('.')[0] for im in imgs_file_list: if ann in im: imgs_file_list_new.append(im) break imgs_file_list = imgs_file_list_new logging.info("[VOC] keep %d images" % len(imgs_file_list_new)) # parse XML annotations def convert(size, box): dw = 1. / size[0] dh = 1. / size[1] x = (box[0] + box[1]) / 2.0 y = (box[2] + box[3]) / 2.0 w = box[1] - box[0] h = box[3] - box[2] x = x * dw w = w * dw y = y * dh h = h * dh return x, y, w, h def convert_annotation(file_name): """Given VOC2012 XML Annotations, returns number of objects and info.""" in_file = open(file_name) out_file = "" tree = ET.parse(in_file) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) n_objs = 0 for obj in root.iter('object'): if dataset != "2012test": difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult) == 1: continue else: cls = obj.find('name').text if cls not in classes: continue cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = ( float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text) ) bb = convert((w, h), b) out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n' n_objs += 1 if cls in "person": for part in obj.iter('part'): cls = part.find('name').text if cls not in classes_in_person: continue cls_id = classes.index(cls) xmlbox = part.find('bndbox') b = ( float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text) ) bb = convert((w, h), b) # out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n' n_objs += 1 in_file.close() return n_objs, out_file logging.info("[VOC] Parsing xml annotations files") n_objs_list = [] objs_info_list = [] # Darknet Format list of string objs_info_dicts = {} for idx, ann_file in enumerate(imgs_ann_file_list): n_objs, objs_info = convert_annotation(ann_file) n_objs_list.append(n_objs) objs_info_list.append(objs_info) with tf.io.gfile.GFile(ann_file, 'r') as fid: xml_str = fid.read() xml = etree.fromstring(xml_str) data = _recursive_parse_xml_to_dict(xml)['annotation'] objs_info_dicts.update({imgs_file_list[idx]: data}) return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts def load_mpii_pose_dataset(path='data', is_16_pos_only=False): """Load MPII Human Pose Dataset. Parameters ----------- path : str The path that the data is downloaded to. is_16_pos_only : boolean If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation) Returns ---------- img_train_list : list of str The image directories of training data. ann_train_list : list of dict The annotations of training data. img_test_list : list of str The image directories of testing data. ann_test_list : list of dict The annotations of testing data. Examples -------- >>> import pprint >>> import tensorlayer as tl >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset() >>> image = tl.vis.read_image(img_train_list[0]) >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png') >>> pprint.pprint(ann_train_list[0]) References ----------- - `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__ - `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__ - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__ - `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__ """ path = os.path.join(path, 'mpii_human_pose') logging.info("Load or Download MPII Human Pose > {}".format(path)) # annotation url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/" tar_filename = "mpii_human_pose_v1_u12_2.zip" extracted_filename = "mpii_human_pose_v1_u12_2" if folder_exists(os.path.join(path, extracted_filename)) is False: logging.info("[MPII] (annotation) {} is nonexistent in {}".format(extracted_filename, path)) maybe_download_and_extract(tar_filename, path, url, extract=True) del_file(os.path.join(path, tar_filename)) # images url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/" tar_filename = "mpii_human_pose_v1.tar.gz" extracted_filename2 = "images" if folder_exists(os.path.join(path, extracted_filename2)) is False: logging.info("[MPII] (images) {} is nonexistent in {}".format(extracted_filename, path)) maybe_download_and_extract(tar_filename, path, url, extract=True) del_file(os.path.join(path, tar_filename)) # parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download logging.info("reading annotations from mat file ...") # mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat")) # def fix_wrong_joints(joint): # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py # if '12' in joint and '13' in joint and '2' in joint and '3' in joint: # if ((joint['12'][0] < joint['13'][0]) and # (joint['3'][0] < joint['2'][0])): # joint['2'], joint['3'] = joint['3'], joint['2'] # if ((joint['12'][0] > joint['13'][0]) and # (joint['3'][0] > joint['2'][0])): # joint['2'], joint['3'] = joint['3'], joint['2'] # return joint ann_train_list = [] ann_test_list = [] img_train_list = [] img_test_list = [] def save_joints(): # joint_data_fn = os.path.join(path, 'data.json') # fp = open(joint_data_fn, 'w') mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat")) for _, (anno, train_flag) in enumerate( # all images zip(mat['RELEASE']['annolist'][0, 0][0], mat['RELEASE']['img_train'][0, 0][0])): img_fn = anno['image']['name'][0, 0][0] train_flag = int(train_flag) # print(i, img_fn, train_flag) # DEBUG print all images if train_flag: img_train_list.append(img_fn) ann_train_list.append([]) else: img_test_list.append(img_fn) ann_test_list.append([]) head_rect = [] if 'x1' in str(anno['annorect'].dtype): head_rect = zip( [x1[0, 0] for x1 in anno['annorect']['x1'][0]], [y1[0, 0] for y1 in anno['annorect']['y1'][0]], [x2[0, 0] for x2 in anno['annorect']['x2'][0]], [y2[0, 0] for y2 in anno['annorect']['y2'][0]] ) else: head_rect = [] # TODO if 'annopoints' in str(anno['annorect'].dtype): annopoints = anno['annorect']['annopoints'][0] head_x1s = anno['annorect']['x1'][0] head_y1s = anno['annorect']['y1'][0] head_x2s = anno['annorect']['x2'][0] head_y2s = anno['annorect']['y2'][0] for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s, head_y2s): # if annopoint != []: # if len(annopoint) != 0: if annopoint.size: head_rect = [ float(head_x1[0, 0]), float(head_y1[0, 0]), float(head_x2[0, 0]), float(head_y2[0, 0]) ] # joint coordinates annopoint = annopoint['point'][0, 0] j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]] x = [x[0, 0] for x in annopoint['x'][0]] y = [y[0, 0] for y in annopoint['y'][0]] joint_pos = {} for _j_id, (_x, _y) in zip(j_id, zip(x, y)): joint_pos[int(_j_id)] = [float(_x), float(_y)] # joint_pos = fix_wrong_joints(joint_pos) # visibility list if 'is_visible' in str(annopoint.dtype): vis = [v[0] if v.size > 0 else [0] for v in annopoint['is_visible'][0]] vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)]) else: vis = None # if len(joint_pos) == 16: if ((is_16_pos_only ==True) and (len(joint_pos) == 16)) or (is_16_pos_only == False): # only use image with 16 key points / or use all data = { 'filename': img_fn, 'train': train_flag, 'head_rect': head_rect, 'is_visible': vis, 'joint_pos': joint_pos } # print(json.dumps(data), file=fp) # py3 if train_flag: ann_train_list[-1].append(data) else: ann_test_list[-1].append(data) # def write_line(datum, fp): # joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()]) # joints = np.array([j for i, j in joints]).flatten() # # out = [datum['filename']] # out.extend(joints) # out = [str(o) for o in out] # out = ','.join(out) # # print(out, file=fp) # def split_train_test(): # # fp_test = open('data/mpii/test_joints.csv', 'w') # fp_test = open(os.path.join(path, 'test_joints.csv'), 'w') # # fp_train = open('data/mpii/train_joints.csv', 'w') # fp_train = open(os.path.join(path, 'train_joints.csv'), 'w') # # all_data = open('data/mpii/data.json').readlines() # all_data = open(os.path.join(path, 'data.json')).readlines() # N = len(all_data) # N_test = int(N * 0.1) # N_train = N - N_test # # print('N:{}'.format(N)) # print('N_train:{}'.format(N_train)) # print('N_test:{}'.format(N_test)) # # np.random.seed(1701) # perm = np.random.permutation(N) # test_indices = perm[:N_test] # train_indices = perm[N_test:] # # print('train_indices:{}'.format(len(train_indices))) # print('test_indices:{}'.format(len(test_indices))) # # for i in train_indices: # datum = json.loads(all_data[i].strip()) # write_line(datum, fp_train) # # for i in test_indices: # datum = json.loads(all_data[i].strip()) # write_line(datum, fp_test) save_joints() # split_train_test() # # read images dir logging.info("reading images list ...") img_dir = os.path.join(path, extracted_filename2) _img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='\\.jpg', printable=False) # ann_list = json.load(open(os.path.join(path, 'data.json'))) for i, im in enumerate(img_train_list): if im not in _img_list: print('missing training image {} in {} (remove from img(ann)_train_list)'.format(im, img_dir)) # img_train_list.remove(im) del img_train_list[i] del ann_train_list[i] for i, im in enumerate(img_test_list): if im not in _img_list: print('missing testing image {} in {} (remove from img(ann)_test_list)'.format(im, img_dir)) # img_test_list.remove(im) del img_train_list[i] del ann_train_list[i] # check annotation and images n_train_images = len(img_train_list) n_test_images = len(img_test_list) n_images = n_train_images + n_test_images logging.info("n_images: {} n_train_images: {} n_test_images: {}".format(n_images, n_train_images, n_test_images)) n_train_ann = len(ann_train_list) n_test_ann = len(ann_test_list) n_ann = n_train_ann + n_test_ann logging.info("n_ann: {} n_train_ann: {} n_test_ann: {}".format(n_ann, n_train_ann, n_test_ann)) n_train_people = len(sum(ann_train_list, [])) n_test_people = len(sum(ann_test_list, [])) n_people = n_train_people + n_test_people logging.info("n_people: {} n_train_people: {} n_test_people: {}".format(n_people, n_train_people, n_test_people)) # add path to all image file name for i, value in enumerate(img_train_list): img_train_list[i] = os.path.join(img_dir, value) for i, value in enumerate(img_test_list): img_test_list[i] = os.path.join(img_dir, value) return img_train_list, ann_train_list, img_test_list, ann_test_list def save_npz(save_list=None, name='model.npz'): """Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore. Parameters ---------- save_list : list of tensor A list of parameters (tensor) to be saved. name : str The name of the `.npz` file. Examples -------- Save model to npz >>> tl.files.save_npz(network.all_weights, name='model.npz') Load model from npz (Method 1) >>> load_params = tl.files.load_npz(name='model.npz') >>> tl.files.assign_weights(load_params, network) Load model from npz (Method 2) >>> tl.files.load_and_assign_npz(name='model.npz', network=network) References ---------- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__ """ logging.info("[*] Saving TL weights into %s" % name) if save_list is None: save_list = [] save_list_var = tf_variables_to_numpy(save_list) np.savez(name, params=save_list_var) save_list_var = None del save_list_var logging.info("[*] Saved") def load_npz(path='', name='model.npz'): """Load the parameters of a Model saved by tl.files.save_npz(). Parameters ---------- path : str Folder path to `.npz` file. name : str The name of the `.npz` file. Returns -------- list of array A list of parameters in order. Examples -------- - See ``tl.files.save_npz`` References ---------- - `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__ """ d = np.load(os.path.join(path, name), allow_pickle=True) return d['params'] def assign_params(**kwargs): raise Exception("please change assign_params --> assign_weights") def assign_weights(weights, network): """Assign the given parameters to the TensorLayer network. Parameters ---------- weights : list of array A list of model weights (array) in order. network : :class:`Layer` The network to be assigned. Returns -------- 1) list of operations if in graph mode A list of tf ops in order that assign weights. Support sess.run(ops) manually. 2) list of tf variables if in eager mode A list of tf variables (assigned weights) in order. Examples -------- References ---------- - `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`__ """ ops = [] for idx, param in enumerate(weights): ops.append(network.all_weights[idx].assign(param)) return ops def load_and_assign_npz(name=None, network=None): """Load model from npz and assign to a network. Parameters ------------- name : str The name of the `.npz` file. network : :class:`Model` The network to be assigned. Examples -------- - See ``tl.files.save_npz`` """ if network is None: raise ValueError("network is None.") if not os.path.exists(name): logging.error("file {} doesn't exist.".format(name)) return False else: weights = load_npz(name=name) assign_weights(weights, network) logging.info("[*] Load {} SUCCESS!".format(name)) def save_npz_dict(save_list=None, name='model.npz'): """Input parameters and the file name, save parameters as a dictionary into .npz file. Use ``tl.files.load_and_assign_npz_dict()`` to restore. Parameters ---------- save_list : list of parameters A list of parameters (tensor) to be saved. name : str The name of the `.npz` file. """ if save_list is None: save_list = [] save_list_names = [tensor.name for tensor in save_list] save_list_var = tf_variables_to_numpy(save_list) save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)} np.savez(name, **save_var_dict) save_list_var = None save_var_dict = None del save_list_var del save_var_dict logging.info("[*] Model saved in npz_dict %s" % name) def load_and_assign_npz_dict(name='model.npz', network=None, skip=False): """Restore the parameters saved by ``tl.files.save_npz_dict()``. Parameters ------------- name : str The name of the `.npz` file. network : :class:`Model` The network to be assigned. skip : boolean If 'skip' == True, loaded weights whose name is not found in network's weights will be skipped. If 'skip' is False, error will be raised when mismatch is found. Default False. """ if not os.path.exists(name): logging.error("file {} doesn't exist.".format(name)) return False weights = np.load(name, allow_pickle=True) if len(weights.keys()) != len(set(weights.keys())): raise Exception("Duplication in model npz_dict %s" % name) net_weights_name = [w.name for w in network.all_weights] for key in weights.keys(): if key not in net_weights_name: if skip: logging.warning("Weights named '%s' not found in network. Skip it." % key) else: raise RuntimeError( "Weights named '%s' not found in network. Hint: set argument skip=Ture " "if you want to skip redundant or mismatch weights." % key ) else: assign_tf_variable(network.all_weights[net_weights_name.index(key)], weights[key]) logging.info("[*] Model restored from npz_dict %s" % name) def save_ckpt(mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False): """Save parameters into `ckpt` file. Parameters ------------ mode_name : str The name of the model, default is ``model.ckpt``. save_dir : str The path / file directory to the `ckpt`, default is ``checkpoint``. var_list : list of tensor The parameters / variables (tensor) to be saved. If empty, save all global variables (default). global_step : int or None Step number. printable : boolean Whether to print all parameters information. See Also -------- load_ckpt """ if var_list is None: if sess is None: # FIXME: not sure whether global variables can be accessed in eager mode raise ValueError( "If var_list is None, sess must be specified. " "In eager mode, can not access global variables easily. " ) var_list = [] ckpt_file = os.path.join(save_dir, mode_name) if var_list == []: var_list = tf.global_variables() logging.info("[*] save %s n_weights: %d" % (ckpt_file, len(var_list))) if printable: for idx, v in enumerate(var_list): logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) if sess: # graph mode saver = tf.train.Saver(var_list) saver.save(sess, ckpt_file, global_step=global_step) else: # eager mode # saver = tfes.Saver(var_list) # saver.save(ckpt_file, global_step=global_step) # TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver pass def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, is_latest=True, printable=False): """Load parameters from `ckpt` file. Parameters ------------ sess : Session TensorFlow Session. mode_name : str The name of the model, default is ``model.ckpt``. save_dir : str The path / file directory to the `ckpt`, default is ``checkpoint``. var_list : list of tensor The parameters / variables (tensor) to be saved. If empty, save all global variables (default). is_latest : boolean Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``. printable : boolean Whether to print all parameters information. Examples ---------- - Save all global parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True) - Save specific parameters. >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True) - Load latest ckpt. >>> tl.files.load_ckpt(sess=sess, var_list=net.all_params, save_dir='model', printable=True) - Load specific ckpt. >>> tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True) """ # if sess is None: # raise ValueError("session is None.") if var_list is None: if sess is None: # FIXME: not sure whether global variables can be accessed in eager mode raise ValueError( "If var_list is None, sess must be specified. " "In eager mode, can not access global variables easily. " ) var_list = [] if is_latest: ckpt_file = tf.train.latest_checkpoint(save_dir) else: ckpt_file = os.path.join(save_dir, mode_name) if not var_list: var_list = tf.global_variables() logging.info("[*] load %s n_weights: %d" % (ckpt_file, len(var_list))) if printable: for idx, v in enumerate(var_list): logging.info(" weights {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) try: if sess: # graph mode saver = tf.train.Saver(var_list) saver.restore(sess, ckpt_file) else: # eager mode # saver = tfes.Saver(var_list) # saver.restore(ckpt_file) # TODO: tf2.0 not stable, cannot import tensorflow.contrib.eager.python.saver pass except Exception as e: logging.info(e) logging.info("[*] load ckpt fail ...") def save_any_to_npy(save_dict=None, name='file.npy'): """Save variables to `.npy` file. Parameters ------------ save_dict : directory The variables to be saved. name : str File name. Examples --------- >>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy') >>> data = tl.files.load_npy_to_any(name='test.npy') >>> print(data) {'data': ['a','b']} """ if save_dict is None: save_dict = {} np.save(name, save_dict) def load_npy_to_any(path='', name='file.npy'): """Load `.npy` file. Parameters ------------ path : str Path to the file (optional). name : str File name. Examples --------- - see tl.files.save_any_to_npy() """ file_path = os.path.join(path, name) try: return np.load(file_path, allow_pickle=True).item() except Exception: return np.load(file_path, allow_pickle=True) raise Exception("[!] Fail to load %s" % file_path) def file_exists(filepath): """Check whether a file exists by given file path.""" return os.path.isfile(filepath) def folder_exists(folderpath): """Check whether a folder exists by given folder path.""" return os.path.isdir(folderpath) def del_file(filepath): """Delete a file by given file path.""" os.remove(filepath) def del_folder(folderpath): """Delete a folder by given folder path.""" shutil.rmtree(folderpath) def read_file(filepath): """Read a file and return a string. Examples --------- >>> data = tl.files.read_file('data.txt') """ with open(filepath, 'r') as afile: return afile.read() def load_file_list(path=None, regx='\.jpg', printable=True, keep_prefix=False): r"""Return a file list in a folder by given a path and regular expression. Parameters ---------- path : str or None A folder path, if `None`, use the current directory. regx : str The regx of file name. printable : boolean Whether to print the files infomation. keep_prefix : boolean Whether to keep path in the file name. Examples ---------- >>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\.(npz)') """ if path is None: path = os.getcwd() file_list = os.listdir(path) return_list = [] for _, f in enumerate(file_list): if re.search(regx, f): return_list.append(f) # return_list.sort() if keep_prefix: for i, f in enumerate(return_list): return_list[i] = os.path.join(path, f) if printable: logging.info('Match file list = %s' % return_list) logging.info('Number of files = %d' % len(return_list)) return return_list def load_folder_list(path=""): """Return a folder list in a folder by given a folder path. Parameters ---------- path : str A folder path. """ return [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))] def exists_or_mkdir(path, verbose=True): """Check a folder by given name, if not exist, create the folder and return False, if directory exists, return True. Parameters ---------- path : str A folder path. verbose : boolean If True (default), prints results. Returns -------- boolean True if folder already exist, otherwise, returns False and create the folder. Examples -------- >>> tl.files.exists_or_mkdir("checkpoints/train") """ if not os.path.exists(path): if verbose: logging.info("[*] creates %s ..." % path) os.makedirs(path) return False else: if verbose: logging.info("[!] %s exists ..." % path) return True def maybe_download_and_extract(filename, working_directory, url_source, extract=False, expected_bytes=None): """Checks if file exists in working_directory otherwise tries to dowload the file, and optionally also tries to extract the file if format is ".zip" or ".tar" Parameters ----------- filename : str The name of the (to be) dowloaded file. working_directory : str A folder path to search for the file in and dowload the file to url : str The URL to download the file from extract : boolean If True, tries to uncompress the dowloaded file is ".tar.gz/.tar.bz2" or ".zip" file, default is False. expected_bytes : int or None If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed. Returns ---------- str File path of the dowloaded (uncompressed) file. Examples -------- >>> down_file = tl.files.maybe_download_and_extract(filename='train-images-idx3-ubyte.gz', ... working_directory='data/', ... url_source='http://yann.lecun.com/exdb/mnist/') >>> tl.files.maybe_download_and_extract(filename='ADEChallengeData2016.zip', ... working_directory='data/', ... url_source='http://sceneparsing.csail.mit.edu/data/', ... extract=True) """ # We first define a download function, supporting both Python 2 and 3. def _download(filename, working_directory, url_source): progress_bar = progressbar.ProgressBar() def _dlProgress(count, blockSize, totalSize, pbar=progress_bar): if (totalSize != 0): if not pbar.max_value: totalBlocks = math.ceil(float(totalSize) / float(blockSize)) pbar.max_value = int(totalBlocks) pbar.update(count, force=True) filepath = os.path.join(working_directory, filename) logging.info('Downloading %s...\n' % filename) urlretrieve(url_source + filename, filepath, reporthook=_dlProgress) exists_or_mkdir(working_directory, verbose=False) filepath = os.path.join(working_directory, filename) if not os.path.exists(filepath): _download(filename, working_directory, url_source) statinfo = os.stat(filepath) logging.info('Succesfully downloaded %s %s bytes.' % (filename, statinfo.st_size)) # , 'bytes.') if (not (expected_bytes is None) and (expected_bytes != statinfo.st_size)): raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') if (extract): if tarfile.is_tarfile(filepath): logging.info('Trying to extract tar file') tarfile.open(filepath, 'r').extractall(working_directory) logging.info('... Success!') elif zipfile.is_zipfile(filepath): logging.info('Trying to extract zip file') with zipfile.ZipFile(filepath) as zf: zf.extractall(working_directory) logging.info('... Success!') else: logging.info("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported") return filepath def natural_keys(text): """Sort list of string with number in human order. Examples ---------- >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg'] >>> l.sort(key=tl.files.natural_keys) ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg'] >>> l.sort() # that is what we dont want ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg'] References ---------- - `link <http://nedbatchelder.com/blog/200712/human_sorting.html>`__ """ # - alist.sort(key=natural_keys) sorts in human order # http://nedbatchelder.com/blog/200712/human_sorting.html # (See Toothy's implementation in the comments) def atoi(text): return int(text) if text.isdigit() else text return [atoi(c) for c in re.split('(\d+)', text)] # Visualizing npz files def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'): r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`. Parameters ---------- path : str A folder path to `npz` files. regx : str Regx for the file name. Examples --------- Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf. >>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)') """ file_list = load_file_list(path=path, regx=regx) for f in file_list: W = load_npz(path, f)[0] logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf')) visualize.draw_weights(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) def tf_variables_to_numpy(variables): """Convert TF tensor or a list of tensors into a list of numpy array""" if not isinstance(variables, list): var_list = [variables] else: var_list = variables results = [v.numpy() for v in var_list] return results def assign_tf_variable(variable, value): """Assign value to a TF variable""" variable.assign(value) def _save_weights_to_hdf5_group(f, layers): """ Save layer/model weights into hdf5 group recursively. Parameters ---------- f: hdf5 group A hdf5 group created by h5py.File() or create_group(). layers: list A list of layers to save weights. """ f.attrs['layer_names'] = [layer.name.encode('utf8') for layer in layers] for layer in layers: g = f.create_group(layer.name) if isinstance(layer, tl.models.Model): _save_weights_to_hdf5_group(g, layer.all_layers) elif isinstance(layer, tl.layers.ModelLayer): _save_weights_to_hdf5_group(g, layer.model.all_layers) elif isinstance(layer, tl.layers.LayerList): _save_weights_to_hdf5_group(g, layer.layers) elif isinstance(layer, tl.layers.Layer): if layer.all_weights is not None: weight_values = tf_variables_to_numpy(layer.all_weights) weight_names = [w.name.encode('utf8') for w in layer.all_weights] else: weight_values = [] weight_names = [] g.attrs['weight_names'] = weight_names for name, val in zip(weight_names, weight_values): val_dataset = g.create_dataset(name, val.shape, dtype=val.dtype) if not val.shape: # scalar val_dataset[()] = val else: val_dataset[:] = val else: raise Exception("Only layer or model can be saved into hdf5.") def _load_weights_from_hdf5_group_in_order(f, layers): """ Load layer weights from a hdf5 group sequentially. Parameters ---------- f: hdf5 group A hdf5 group created by h5py.File() or create_group(). layers: list A list of layers to load weights. """ layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]] for idx, name in enumerate(layer_names): g = f[name] layer = layers[idx] if isinstance(layer, tl.models.Model): _load_weights_from_hdf5_group_in_order(g, layer.all_layers) elif isinstance(layer, tl.layers.ModelLayer): _load_weights_from_hdf5_group_in_order(g, layer.model.all_layers) elif isinstance(layer, tl.layers.LayerList): _load_weights_from_hdf5_group_in_order(g, layer.layers) elif isinstance(layer, tl.layers.Layer): weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] for iid, w_name in enumerate(weight_names): assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name])) else: raise Exception("Only layer or model can be saved into hdf5.") if idx == len(layers) - 1: break def _load_weights_from_hdf5_group(f, layers, skip=False): """ Load layer weights from a hdf5 group by layer name. Parameters ---------- f: hdf5 group A hdf5 group created by h5py.File() or create_group(). layers: list A list of layers to load weights. skip : boolean If 'skip' == True, loaded layer whose name is not found in 'layers' will be skipped. If 'skip' is False, error will be raised when mismatch is found. Default False. """ layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]] layer_index = {layer.name: layer for layer in layers} for idx, name in enumerate(layer_names): if name not in layer_index.keys(): if skip: logging.warning("Layer named '%s' not found in network. Skip it." % name) else: raise RuntimeError( "Layer named '%s' not found in network. Hint: set argument skip=Ture " "if you want to skip redundant or mismatch Layers." % name ) else: g = f[name] layer = layer_index[name] if isinstance(layer, tl.models.Model): _load_weights_from_hdf5_group(g, layer.all_layers, skip) elif isinstance(layer, tl.layers.ModelLayer): _load_weights_from_hdf5_group(g, layer.model.all_layers, skip) elif isinstance(layer, tl.layers.LayerList): _load_weights_from_hdf5_group(g, layer.layers, skip) elif isinstance(layer, tl.layers.Layer): weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] for iid, w_name in enumerate(weight_names): # FIXME : this is only for compatibility if isinstance(layer, tl.layers.BatchNorm) and np.asarray(g[w_name]).ndim > 1: assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]).squeeze()) continue assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name])) else: raise Exception("Only layer or model can be saved into hdf5.") def save_weights_to_hdf5(filepath, network): """Input filepath and save weights in hdf5 format. Parameters ---------- filepath : str Filename to which the weights will be saved. network : Model TL model. Returns ------- """ logging.info("[*] Saving TL weights into %s" % filepath) with h5py.File(filepath, 'w') as f: _save_weights_to_hdf5_group(f, network.all_layers) logging.info("[*] Saved") def load_hdf5_to_weights_in_order(filepath, network): """Load weights sequentially from a given file of hdf5 format Parameters ---------- filepath : str Filename to which the weights will be loaded, should be of hdf5 format. network : Model TL model. Notes: If the file contains more weights than given 'weights', then the redundant ones will be ignored if all previous weights match perfectly. Returns ------- """ f = h5py.File(filepath, 'r') try: layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]] except Exception: raise NameError( "The loaded hdf5 file needs to have 'layer_names' as attributes. " "Please check whether this hdf5 file is saved from TL." ) if len(network.all_layers) != len(layer_names): logging.warning( "Number of weights mismatch." "Trying to load a saved file with " + str(len(layer_names)) + " layers into a model with " + str(len(network.all_layers)) + " layers." ) _load_weights_from_hdf5_group_in_order(f, network.all_layers) f.close() logging.info("[*] Load %s SUCCESS!" % filepath) def load_hdf5_to_weights(filepath, network, skip=False): """Load weights by name from a given file of hdf5 format Parameters ---------- filepath : str Filename to which the weights will be loaded, should be of hdf5 format. network : Model TL model. skip : bool If 'skip' == True, loaded weights whose name is not found in 'weights' will be skipped. If 'skip' is False, error will be raised when mismatch is found. Default False. Returns ------- """ f = h5py.File(filepath, 'r') try: layer_names = [n.decode('utf8') for n in f.attrs["layer_names"]] except Exception: raise NameError( "The loaded hdf5 file needs to have 'layer_names' as attributes. " "Please check whether this hdf5 file is saved from TL." ) net_index = {layer.name: layer for layer in network.all_layers} if len(network.all_layers) != len(layer_names): logging.warning( "Number of weights mismatch." "Trying to load a saved file with " + str(len(layer_names)) + " layers into a model with " + str(len(network.all_layers)) + " layers." ) # check mismatch form network weights to hdf5 for name in net_index.keys(): if name not in layer_names: logging.warning("Network layer named '%s' not found in loaded hdf5 file. It will be skipped." % name) # load weights from hdf5 to network _load_weights_from_hdf5_group(f, network.all_layers, skip) f.close() logging.info("[*] Load %s SUCCESS!" % filepath)
the-stack_0_4353
import logging from typing import Dict from synch.factory import get_reader, get_writer from synch.settings import Settings logger = logging.getLogger("synch.replication.etl") def etl_full( alias: str, schema: str, tables_pk: Dict, renew=False, full=True ): """ full etl """ reader = get_reader(alias) source_db_database = Settings.get_source_db_database(alias, schema) schema = source_db_database.get("database") writer = get_writer() if not writer.check_database_exists(schema): if source_db_database.get("auto_create") is not False: writer.create_database(schema, Settings.cluster_name()) else: logger.warning( f"Can't etl since no database {schema} found in ClickHouse and auto_create=false" ) exit(-1) for table in source_db_database.get("tables"): if not full: if table['table'] not in list(tables_pk.keys()): continue if table.get("auto_full_etl") is False: continue table_name = table.get("table") pk = tables_pk.get(table_name) writer = get_writer(table.get("clickhouse_engine")) if not pk and not renew: logger.warning(f"No pk found in {schema}.{table_name}, skip") continue elif isinstance(pk, tuple): pk = f"({','.join(pk)})" if renew: drop_sql = f"drop table if exists {schema}.{table_name}" writer.execute(drop_sql) logger.info(f"drop table success:{schema}.{table_name}") if not writer.check_table_exists(schema, table_name): sign_column = table.get("sign_column") version_column = table.get("version_column") order_by = table.get("order_by") writer.execute( writer.get_table_create_sql( reader, schema, table_name, pk, table.get("partition_by"), table.get("engine_settings"), sign_column=sign_column, version_column=version_column, order_by=order_by, ) ) if Settings.is_cluster(): for w in get_writer(choice=False): w.execute( w.get_distributed_table_create_sql( schema, table_name, Settings.get("clickhouse.distributed_suffix") ) ) if reader.fix_column_type and not table.get("skip_decimal"): writer.fix_table_column_type(reader, schema, table_name) full_insert_sql = writer.get_full_insert_sql(reader, schema, table_name, sign_column) logger.info(f"{full_insert_sql}") writer.execute(full_insert_sql) logger.info(f"full data etl for {schema}.{table_name} success") else: logger.debug( f"{schema}.{table_name} exists, skip, or use --renew force etl with drop old tables" )
the-stack_0_4354
#! /usr/bin/env python3 import struct import enum def printMessage(s): return ' '.join("{:02x}".format(c) for c in s) class MessageType(enum.Enum): Text = 0 Numeric = 1 Logic = 2 def decodeMessage(s, msgType): payloadSize = struct.unpack_from('<H', s, 0)[0] if payloadSize < 5: # includes the mailSize raise BufferError('Payload size is too small') a, b, c, d = struct.unpack_from('<4B', s, 2) if a != 1 or b != 0 or c != 0x81 or d != 0x9e: raise BufferError('Header is not correct. Expecting 01 00 81 9e') mailSize = struct.unpack_from('<B', s, 6)[0] if payloadSize < (5 + mailSize): # includes the valueSize raise BufferError('Payload size is too small') mailBytes = struct.unpack_from('<' + str(mailSize) + 's', s, 7)[0] mail = mailBytes.decode('ascii')[:-1] valueSize = struct.unpack_from('<H', s, 7 + mailSize)[0] if payloadSize < (7 + mailSize + valueSize): # includes the valueSize raise BufferError('Payload size does not match the packet') if msgType == MessageType.Logic: if valueSize != 1: raise BufferError('Value size is not one byte required for Logic Type') valueBytes = struct.unpack_from('<B', s, 9 + mailSize)[0] value = True if valueBytes != 0 else False elif msgType == MessageType.Numeric: if valueSize != 4: raise BufferError('Value size is not four bytes required for Numeric Type') value = struct.unpack_from('<f', s, 9 + mailSize)[0] else: valueBytes = struct.unpack_from('<' + str(valueSize) + 's', s, 9 + mailSize)[0] value = valueBytes.decode('ascii')[:-1] remnant = None if len(s) > (payloadSize + 2): remnant = s[(payloadSize) + 2:] return (mail, value, remnant) def encodeMessage(msgType, mail, value): mail = mail + '\x00' mailBytes = mail.encode('ascii') mailSize = len(mailBytes) fmt = '<H4BB' + str(mailSize) + 'sH' if msgType == MessageType.Logic: valueSize = 1 valueBytes = 1 if value is True else 0 fmt += 'B' elif msgType == MessageType.Numeric: valueSize = 4 valueBytes = float(value) fmt += 'f' else: value = value + '\x00' valueBytes = value.encode('ascii') valueSize = len(valueBytes) fmt += str(valueSize) + 's' payloadSize = 7 + mailSize + valueSize s = struct.pack(fmt, payloadSize, 0x01, 0x00, 0x81, 0x9e, mailSize, mailBytes, valueSize, valueBytes) return s if __name__ == "__main__": s = encodeMessage(MessageType.Text, 'abc', 'Hello') print(printMessage(s))
the-stack_0_4357
# Electrum - Lightweight Bitcoin Client # Copyright (c) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import dns import json import traceback import sys from .address import Address from . import dnssec from .util import FileImportFailed, FileImportFailedEncrypted class Contacts(dict): def __init__(self, storage): self.storage = storage d = self.storage.get('contacts', {}) try: self.update(d) except: return # backward compatibility for k, v in self.items(): _type, n = v if _type == 'address' and Address.is_valid(n): self.pop(k) self[n] = ('address', k) def save(self): self.storage.put('contacts', dict(self)) def import_file(self, path): try: with open(path, 'r') as f: d = self._validate(json.loads(f.read())) except json.decoder.JSONDecodeError: traceback.print_exc(file=sys.stderr) raise FileImportFailedEncrypted() except BaseException: traceback.print_exc(file=sys.stdout) raise FileImportFailed() self.update(d) self.save() def __setitem__(self, key, value): dict.__setitem__(self, key, value) self.save() def pop(self, key): if key in self.keys(): dict.pop(self, key) self.save() def resolve(self, k): if Address.is_valid(k): return { 'address': Address.from_string(k), 'type': 'address' } if k in self.keys(): _type, addr = self[k] if _type == 'address': return { 'address': addr, 'type': 'contact' } out = self.resolve_openalias(k) if out: address, name, validated = out return { 'address': address, 'name': name, 'type': 'openalias', 'validated': validated } raise Exception("Invalid Bitcoin address or alias", k) def resolve_openalias(self, url): # support email-style addresses, per the OA standard url = url.replace('@', '.') records, validated = dnssec.query(url, dns.rdatatype.TXT) prefix = 'btc' for record in records: string = record.strings[0] if string.startswith('oa1:' + prefix): address = self.find_regex(string, r'recipient_address=([A-Za-z0-9]+)') name = self.find_regex(string, r'recipient_name=([^;]+)') if not name: name = address if not address: continue return Address.from_string(address), name, validated def find_regex(self, haystack, needle): regex = re.compile(needle) try: return regex.search(haystack).groups()[0] except AttributeError: return None def _validate(self, data): for k,v in list(data.items()): if k == 'contacts': return self._validate(v) if not Address.is_valid(k): data.pop(k) else: _type,_ = v if _type != 'address': data.pop(k) return data
the-stack_0_4359
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys import json import numpy as np from scipy import misc as scp_misc import tensorflow as tf import facenet import align.detect_face as detect_face # from PIL import Image def initialize_mtcnn(gpu_memory_fraction): with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess, None) return pnet, rnet, onet def align_image(input_image, output_image, pnet, rnet, onet, image_size=182, margin=44, random_order=True, gpu_memory_fraction=1.0, debug=False, just_count=False): minsize = 20 # minimum size of face threshold = [0.7, 0.7, 0.9] # three steps's threshold factor = 0.709 # scale factor if not os.path.exists(output_image): try: img = scp_misc.imread(input_image) except (IOError, ValueError, IndexError) as e: errorMessage = '{}: {}'.format(input_image, e) if debug: print(errorMessage) else: if img.ndim < 2: if debug: print('Unable to align "%s"' % image_path) if img.ndim == 2: img = facenet.to_rgb(img) img = img[:, :, 0:3] bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if just_count == True: return True, nrof_faces if nrof_faces > 0: det = bounding_boxes[:, 0:4] img_size = np.asarray(img.shape)[0:2] if nrof_faces > 1: det = np.squeeze(det) counter = 0 scaled_list = [] for d in det: bb = np.zeros(4, dtype=np.int32) bb[0] = np.maximum(d[0] - margin / 2, 0) bb[1] = np.maximum(d[1] - margin / 2, 0) bb[2] = np.minimum(d[2] + margin / 2, img_size[1]) bb[3] = np.minimum(d[3] + margin / 2, img_size[0]) cropped = img[bb[1]:bb[3], bb[0]:bb[2], :] scaled = scp_misc.imresize(cropped, (image_size, image_size), interp='bilinear') filename = "{}_{}.jpg".format(output_image.split(".")[0] + "image", str(counter)) scp_misc.imsave(filename, scaled) scaled_list.append(scaled) counter = counter +1 return True, scaled_list if nrof_faces == 1: det = np.squeeze(det) bb = np.zeros(4, dtype=np.int32) bb[0] = np.maximum(det[0] - margin / 2, 0) bb[1] = np.maximum(det[1] - margin / 2, 0) bb[2] = np.minimum(det[2] + margin / 2, img_size[1]) bb[3] = np.minimum(det[3] + margin / 2, img_size[0]) cropped = img[bb[1]:bb[3], bb[0]:bb[2], :] scaled = scp_misc.imresize(cropped, (image_size, image_size), interp='bilinear') scp_misc.imsave(output_image, scaled) return True, scaled else: if debug: print('Unable to align "%s"' % input_image) return False, 1 def main(args): # TODO Check why this was previously being initialised inside the image loop file_to_facecount = dict() pnet, rnet, onet = initialize_mtcnn(0.8) for filename in os.listdir(args.input_dir): input_image = filename output_image = filename if os.path.isfile(os.path.join(args.input_dir, input_image)) == False: continue input_image = os.path.join(args.input_dir, input_image) output_image = os.path.join(args.output_dir, output_image) _, result = align_image(input_image, output_image, pnet, rnet, onet, image_size=args.image_size, margin=args.margin, random_order=args.random_order, gpu_memory_fraction=args.gpu_memory_fraction, debug=False, just_count=args.just_count) if args.just_count == True: file_to_facecount[filename] = result if args.just_count: json.dump(file_to_facecount, open(os.path.join(args.output_dir, args.count_file), "w")) def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('input_dir', type=str, help='Directory with unaligned images.') parser.add_argument('output_dir', type=str, help='Directory with aligned face thumbnails.') parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=182) parser.add_argument('--margin', type=int, help='Margin for the crop around the bounding box (height, width) in pixels.', default=44) parser.add_argument('--random_order', help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true') parser.add_argument('--gpu_memory_fraction', type=float, help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0) parser.add_argument('--has_classes', dest='has_classes', action='store_true', help='Input folder is split into class subfolders, and these should be replicated', default=True) parser.add_argument('--no_classes', dest='has_classes', action='store_false', help='Input folder is split into class subfolders, and these should be replicated', default=True) parser.add_argument('--just_count', dest='just_count', action='store_true', help='Just save out a JSON mapping filenames to counts of faces found', default=False) parser.add_argument('--count_file', type=str, help='Where to save counts of faces', default="face_counts.json") return parser.parse_args(argv) if __name__ == "__main__": main(parse_arguments(sys.argv[1:])) # #print(ads) #print("bleh"\ #print(os.listdir(path)) # # for filename in os.listdir(path): # print(filename) # x = filename.split('_')[0] # ads.append(x) # directory = (path + "/" + x) # if not os.path.exists(directory): # os.makedirs(directory) # #shutil.copy(newpath + "/" + filename, directory)
the-stack_0_4360
from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy.ext import declarative as decl import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, class_mapper, \ configure_mappers, clear_mappers, \ polymorphic_union, deferred, Session from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \ ConcreteBase, has_inherited_table from sqlalchemy.testing import fixtures Base = None class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): Session.close_all() clear_mappers() Base.metadata.drop_all() class DeclarativeInheritanceTest(DeclarativeTestBase): def test_we_must_copy_mapper_args(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator, 'polymorphic_identity': 'person'} class Engineer(Person): primary_language = Column(String(50)) assert 'inherits' not in Person.__mapper_args__ assert class_mapper(Engineer).polymorphic_identity is None assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type def test_we_must_only_copy_column_mapper_args(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) a = Column(Integer) b = Column(Integer) c = Column(Integer) d = Column(Integer) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator, 'polymorphic_identity': 'person', 'version_id_col': 'a', 'column_prefix': 'bar', 'include_properties': ['id', 'a', 'b'], } assert class_mapper(Person).version_id_col == 'a' assert class_mapper(Person).include_properties == set(['id', 'a', 'b']) def test_custom_join_condition(self): class Foo(Base): __tablename__ = 'foo' id = Column('id', Integer, primary_key=True) class Bar(Foo): __tablename__ = 'bar' id = Column('id', Integer, primary_key=True) foo_id = Column('foo_id', Integer) __mapper_args__ = {'inherit_condition': foo_id == Foo.id} # compile succeeds because inherit_condition is honored configure_mappers() def test_joined(self): class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column('company_id', Integer, ForeignKey('companies.id')) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column('primary_language', String(50)) class Manager(Person): __tablename__ = 'managers' __mapper_args__ = {'polymorphic_identity': 'manager'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) golf_swing = Column('golf_swing', String(50)) Base.metadata.create_all() sess = create_session() c1 = Company(name='MegaCorp, Inc.', employees=[Engineer(name='dilbert', primary_language='java'), Engineer(name='wally', primary_language='c++'), Manager(name='dogbert', golf_swing='fore!')]) c2 = Company(name='Elbonia, Inc.', employees=[Engineer(name='vlad', primary_language='cobol')]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) # ensure that the Manager mapper was compiled with the Manager id # column as higher priority. this ensures that "Manager.id" # is appropriately treated as the "id" column in the "manager" # table (reversed from 0.6's behavior.) eq_( Manager.id.property.columns, [Manager.__table__.c.id, Person.__table__.c.id] ) # assert that the "id" column is available without a second # load. as of 0.7, the ColumnProperty tests all columns # in it's list to see which is present in the row. sess.expunge_all() def go(): assert sess.query(Manager).filter(Manager.name == 'dogbert' ).one().id self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): assert sess.query(Person).filter(Manager.name == 'dogbert' ).one().id self.assert_sql_count(testing.db, go, 1) def test_add_subcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) Engineer.primary_language = Column('primary_language', String(50)) Base.metadata.create_all() sess = create_session() e1 = Engineer(primary_language='java', name='dilbert') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Engineer(primary_language='java', name='dilbert')) def test_add_parentcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) Person.name = Column('name', String(50)) Base.metadata.create_all() sess = create_session() e1 = Engineer(primary_language='java', name='dilbert') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Engineer(primary_language='java', name='dilbert')) def test_add_sub_parentcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) class Admin(Engineer): __tablename__ = 'admins' __mapper_args__ = {'polymorphic_identity': 'admin'} workstation = Column(String(50)) id = Column('id', Integer, ForeignKey('engineers.id'), primary_key=True) Person.name = Column('name', String(50)) Base.metadata.create_all() sess = create_session() e1 = Admin(primary_language='java', name='dilbert', workstation='foo') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Admin(primary_language='java', name='dilbert', workstation='foo')) def test_subclass_mixin(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class MyMixin(object): pass class Engineer(MyMixin, Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column('primary_language', String(50)) assert class_mapper(Engineer).inherits is class_mapper(Person) def test_with_undefined_foreignkey(self): class Parent(Base): __tablename__ = 'parent' id = Column('id', Integer, primary_key=True) tp = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=tp) class Child1(Parent): __tablename__ = 'child1' id = Column('id', Integer, ForeignKey('parent.id'), primary_key=True) related_child2 = Column('c2', Integer, ForeignKey('child2.id')) __mapper_args__ = dict(polymorphic_identity='child1') # no exception is raised by the ForeignKey to "child2" even # though child2 doesn't exist yet class Child2(Parent): __tablename__ = 'child2' id = Column('id', Integer, ForeignKey('parent.id'), primary_key=True) related_child1 = Column('c1', Integer) __mapper_args__ = dict(polymorphic_identity='child2') sa.orm.configure_mappers() # no exceptions here def test_foreign_keys_with_col(self): """Test that foreign keys that reference a literal 'id' subclass 'id' attribute behave intuitively. See [ticket:1892]. """ class Booking(Base): __tablename__ = 'booking' id = Column(Integer, primary_key=True) class PlanBooking(Booking): __tablename__ = 'plan_booking' id = Column(Integer, ForeignKey(Booking.id), primary_key=True) # referencing PlanBooking.id gives us the column # on plan_booking, not booking class FeatureBooking(Booking): __tablename__ = 'feature_booking' id = Column(Integer, ForeignKey(Booking.id), primary_key=True) plan_booking_id = Column(Integer, ForeignKey(PlanBooking.id)) plan_booking = relationship(PlanBooking, backref='feature_bookings') assert FeatureBooking.__table__.c.plan_booking_id.\ references(PlanBooking.__table__.c.id) assert FeatureBooking.__table__.c.id.\ references(Booking.__table__.c.id) def test_single_colsonbase(self): """test single inheritance where all the columns are on the base class.""" class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column('company_id', Integer, ForeignKey('companies.id')) name = Column('name', String(50)) discriminator = Column('type', String(50)) primary_language = Column('primary_language', String(50)) golf_swing = Column('golf_swing', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} Base.metadata.create_all() sess = create_session() c1 = Company(name='MegaCorp, Inc.', employees=[Engineer(name='dilbert', primary_language='java'), Engineer(name='wally', primary_language='c++'), Manager(name='dogbert', golf_swing='fore!')]) c2 = Company(name='Elbonia, Inc.', employees=[Engineer(name='vlad', primary_language='cobol')]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) def test_single_colsonsub(self): """test single inheritance where the columns are local to their class. this is a newer usage. """ class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column(Integer, ForeignKey('companies.id')) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) # we have here a situation that is somewhat unique. the Person # class is mapped to the "people" table, but it was mapped when # the table did not include the "primary_language" or # "golf_swing" columns. declarative will also manipulate the # exclude_properties collection so that sibling classes don't # cross-pollinate. assert Person.__table__.c.company_id is not None assert Person.__table__.c.golf_swing is not None assert Person.__table__.c.primary_language is not None assert Engineer.primary_language is not None assert Manager.golf_swing is not None assert not hasattr(Person, 'primary_language') assert not hasattr(Person, 'golf_swing') assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Manager, 'primary_language') Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1]) e3 = Engineer(name='vlad', primary_language='cobol') c2 = Company(name='Elbonia, Inc.', employees=[e3]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) eq_(sess.query(Engineer).filter_by(primary_language='cobol' ).one(), Engineer(name='vlad', primary_language='cobol')) @testing.skip_if(lambda: testing.against('oracle'), "Test has an empty insert in it at the moment") def test_columns_single_inheritance_conflict_resolution(self): """Test that a declared_attr can return the existing column and it will be ignored. this allows conditional columns to be added. See [ticket:2472]. """ class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): """single table inheritance""" @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Manager(Person): """single table inheritance""" @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Other(Base): __tablename__ = 'other' id = Column(Integer, primary_key=True) is_( Engineer.target_id.property.columns[0], Person.__table__.c.target_id ) is_( Manager.target_id.property.columns[0], Person.__table__.c.target_id ) # do a brief round trip on this Base.metadata.create_all() session = Session() o1, o2 = Other(), Other() session.add_all([ Engineer(target=o1), Manager(target=o2), Manager(target=o1) ]) session.commit() eq_(session.query(Engineer).first().target, o1) def test_joined_from_single(self): class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column(Integer, ForeignKey('companies.id')) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) assert Person.__table__.c.golf_swing is not None assert not Person.__table__.c.has_key('primary_language') assert Engineer.__table__.c.primary_language is not None assert Engineer.primary_language is not None assert Manager.golf_swing is not None assert not hasattr(Person, 'primary_language') assert not hasattr(Person, 'golf_swing') assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Manager, 'primary_language') Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1]) e3 = Engineer(name='vlad', primary_language='cobol') c2 = Company(name='Elbonia, Inc.', employees=[e3]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).with_polymorphic(Engineer). filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) eq_(sess.query(Engineer).filter_by(primary_language='cobol' ).one(), Engineer(name='vlad', primary_language='cobol')) def test_single_from_joined_colsonsub(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Manager(Person): __tablename__ = 'manager' __mapper_args__ = {'polymorphic_identity': 'manager'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) golf_swing = Column(String(50)) class Boss(Manager): boss_name = Column(String(50)) is_( Boss.__mapper__.column_attrs['boss_name'].columns[0], Manager.__table__.c.boss_name ) def test_polymorphic_on_converted_from_inst(self): class A(Base): __tablename__ = 'A' id = Column(Integer, primary_key=True) discriminator = Column(String) @declared_attr def __mapper_args__(cls): return { 'polymorphic_identity': cls.__name__, 'polymorphic_on': cls.discriminator } class B(A): pass is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator) def test_add_deferred(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) Person.name = deferred(Column(String(10))) Base.metadata.create_all() sess = create_session() p = Person(name='ratbert') sess.add(p) sess.flush() sess.expunge_all() eq_(sess.query(Person).all(), [Person(name='ratbert')]) sess.expunge_all() person = sess.query(Person).filter(Person.name == 'ratbert' ).one() assert 'name' not in person.__dict__ def test_single_fksonsub(self): """test single inheritance with a foreign key-holding column on a subclass. """ class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language_id = Column(Integer, ForeignKey('languages.id')) primary_language = relationship('Language') class Language(Base, fixtures.ComparableEntity): __tablename__ = 'languages' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) assert not hasattr(Person, 'primary_language_id') Base.metadata.create_all() sess = create_session() java, cpp, cobol = Language(name='java'), Language(name='cpp'), \ Language(name='cobol') e1 = Engineer(name='dilbert', primary_language=java) e2 = Engineer(name='wally', primary_language=cpp) e3 = Engineer(name='vlad', primary_language=cobol) sess.add_all([e1, e2, e3]) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language.has( Language.name == 'cobol')).first(), Engineer(name='vlad', primary_language=Language(name='cobol'))) eq_(sess.query(Engineer).filter(Engineer.primary_language.has( Language.name == 'cobol')).one(), Engineer(name='vlad', primary_language=Language(name='cobol'))) eq_(sess.query(Person).join(Engineer.primary_language).order_by( Language.name).all(), [Engineer(name='vlad', primary_language=Language(name='cobol')), Engineer(name='wally', primary_language=Language(name='cpp' )), Engineer(name='dilbert', primary_language=Language(name='java'))]) def test_single_three_levels(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) class JuniorEngineer(Engineer): __mapper_args__ = \ {'polymorphic_identity': 'junior_engineer'} nerf_gun = Column(String(50)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) assert JuniorEngineer.nerf_gun assert JuniorEngineer.primary_language assert JuniorEngineer.name assert Manager.golf_swing assert Engineer.primary_language assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Engineer, 'nerf_gun') assert not hasattr(Manager, 'nerf_gun') assert not hasattr(Manager, 'primary_language') def test_single_detects_conflict(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) # test sibling col conflict def go(): class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) primary_language = Column(String(50)) assert_raises(sa.exc.ArgumentError, go) # test parent col conflict def go(): class Salesman(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} name = Column(String(50)) assert_raises(sa.exc.ArgumentError, go) def test_single_no_special_cols(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} def go(): class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column('primary_language', String(50)) foo_bar = Column(Integer, primary_key=True) assert_raises_message(sa.exc.ArgumentError, 'place primary key', go) def test_single_no_table_args(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} def go(): class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column('primary_language', String(50)) # this should be on the Person class, as this is single # table inheritance, which is why we test that this # throws an exception! __table_args__ = {'mysql_engine': 'InnoDB'} assert_raises_message(sa.exc.ArgumentError, 'place __table_args__', go) @testing.emits_warning("This declarative") def test_dupe_name_in_hierarchy(self): class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) a_1 = A class A(a_1): __tablename__ = 'b' id = Column(Integer(), ForeignKey(a_1.id), primary_key=True) assert A.__mapper__.inherits is a_1.__mapper__ class OverlapColPrecedenceTest(DeclarativeTestBase): """test #1892 cases when declarative does column precedence.""" def _run_test(self, Engineer, e_id, p_id): p_table = Base.metadata.tables['person'] e_table = Base.metadata.tables['engineer'] assert Engineer.id.property.columns[0] is e_table.c[e_id] assert Engineer.id.property.columns[1] is p_table.c[p_id] def test_basic(self): class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.id'), primary_key=True) self._run_test(Engineer, "id", "id") def test_alt_name_base(self): class Person(Base): __tablename__ = 'person' id = Column("pid", Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.pid'), primary_key=True) self._run_test(Engineer, "id", "pid") def test_alt_name_sub(self): class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column("eid", Integer, ForeignKey('person.id'), primary_key=True) self._run_test(Engineer, "eid", "id") def test_alt_name_both(self): class Person(Base): __tablename__ = 'person' id = Column("pid", Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column("eid", Integer, ForeignKey('person.pid'), primary_key=True) self._run_test(Engineer, "eid", "pid") from test.orm.test_events import _RemoveListeners class ConcreteInhTest(_RemoveListeners, DeclarativeTestBase): def _roundtrip(self, Employee, Manager, Engineer, Boss, polymorphic=True, explicit_type=False): Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') e3 = Engineer(name='vlad', primary_language='cobol') b1 = Boss(name="pointy haired") if polymorphic: for obj in [e1, e2, m1, e3, b1]: if explicit_type: eq_(obj.type, obj.__mapper__.polymorphic_identity) else: assert_raises_message( AttributeError, "does not implement attribute .?'type' " "at the instance level.", getattr, obj, "type" ) else: assert "type" not in Engineer.__dict__ assert "type" not in Manager.__dict__ assert "type" not in Boss.__dict__ sess.add_all([e1, e2, m1, e3, b1]) sess.flush() sess.expunge_all() if polymorphic: eq_(sess.query(Employee).order_by(Employee.name).all(), [Engineer(name='dilbert'), Manager(name='dogbert'), Boss(name='pointy haired'), Engineer(name='vlad'), Engineer(name='wally')]) else: eq_(sess.query(Engineer).order_by(Engineer.name).all(), [Engineer(name='dilbert'), Engineer(name='vlad'), Engineer(name='wally')]) eq_(sess.query(Manager).all(), [Manager(name='dogbert')]) eq_(sess.query(Boss).all(), [Boss(name='pointy haired')]) def test_explicit(self): engineers = Table('engineers', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('primary_language', String(50))) managers = Table('managers', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('golf_swing', String(50)) ) boss = Table('boss', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('golf_swing', String(50)) ) punion = polymorphic_union({ 'engineer': engineers, 'manager': managers, 'boss': boss}, 'type', 'punion') class Employee(Base, fixtures.ComparableEntity): __table__ = punion __mapper_args__ = {'polymorphic_on': punion.c.type} class Engineer(Employee): __table__ = engineers __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} class Manager(Employee): __table__ = managers __mapper_args__ = {'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __table__ = boss __mapper_args__ = {'polymorphic_identity': 'boss', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_concrete_inline_non_polymorphic(self): """test the example from the declarative docs.""" class Employee(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class Engineer(Employee): __tablename__ = 'engineers' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) primary_language = Column(String(50)) name = Column(String(50)) class Manager(Employee): __tablename__ = 'manager' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) golf_swing = Column(String(50)) name = Column(String(50)) class Boss(Manager): __tablename__ = 'boss' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) golf_swing = Column(String(50)) name = Column(String(50)) self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False) def test_abstract_concrete_extension(self): class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'boss', 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_concrete_extension(self): class Employee(ConcreteBase, Base, fixtures.ComparableEntity): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity': 'employee', 'concrete': True} class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'boss', 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_has_inherited_table_doesnt_consider_base(self): class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) assert not has_inherited_table(A) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert has_inherited_table(B) def test_has_inherited_table_in_mapper_args(self): class Test(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) type = Column(String(20)) @declared_attr def __mapper_args__(cls): if not has_inherited_table(cls): ret = { 'polymorphic_identity': 'default', 'polymorphic_on': cls.type, } else: ret = {'polymorphic_identity': cls.__name__} return ret class PolyTest(Test): __tablename__ = 'poly_test' id = Column(Integer, ForeignKey(Test.id), primary_key=True) configure_mappers() assert Test.__mapper__.polymorphic_on is Test.__table__.c.type assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type def test_ok_to_override_type_from_abstract(self): class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) @property def type(self): return "manager" __mapper_args__ = { 'polymorphic_identity': "manager", 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) @property def type(self): return "boss" __mapper_args__ = { 'polymorphic_identity': "boss", 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) @property def type(self): return "engineer" __mapper_args__ = {'polymorphic_identity': "engineer", 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True)