ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a39e037f4288271865061568120e95f606e09ea
""" Add an excerpt field to the page. """ from django.db import models from django.utils.translation import ugettext_lazy as _ from feincms import extensions class Extension(extensions.Extension): def handle_model(self): self.model.add_to_class('partner_login_required', models.BooleanField( _('partner login required'), help_text=_('If changed all children of this page will be marked with the same value. ' 'If checked viewing this page will be restricted to partner login only.'))) def handle_modeladmin(self, modeladmin): modeladmin.list_display.extend(['partner_login_required']) modeladmin.list_filter.extend(['partner_login_required']) modeladmin.add_extension_options('partner_login_required')
py
1a39e0f140a044e9e75aed3e31a8474f40a9ee40
from django.shortcuts import render from vdw.raw.sources.models import Source def sources(request): sources = Source.objects.filter(published=True, archived=False)\ .select_related('stats') return render(request, 'sources/sources.html', { 'sources': sources, })
py
1a39e113b9e363a015085e4adc7805958ad5d8a3
from setuptools import find_packages, setup with open('README.md', 'r') as fh: long_description = fh.read() setup( name='backoid', description='backoid', version="0.0.1", long_description=long_description, long_description_content_type="text/markdown", packages=find_packages("src"), package_dir={"": "src"}, install_requires=[ 'pymysql>=0.9.3', 'azure-storage-blob', 'pyyaml' ], classifiers=[ "Natural Language :: English", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.7", ], entry_points={ 'console_scripts': [ 'backoid = backoid.cli:main' ] } )
py
1a39e1bad164394e7faf594536049754a76fb416
import numpy as np ''' TODO: - statistics in table as an attribute - using the already built-in index to compute the attributes - Set the MAX freq, NORMAL while a building the index ''' def get_olken_sample_2way(table1, table2, join_column): N1 = len(table1.data[join_column]) retval = None flag = False while(not flag): rand_idx1 = np.random.randint(low=0, high=N1) t1_val = table1.data[join_column][rand_idx1] joining_tups = table2.index[join_column][t1_val] # using the table-index rand_idx2 = np.random.randint(low=0, high=len(joining_tups)) # Get the frquency of t1_val in table2 (in the join_column) freq_v = table2.get_freq(join_column, t1_val) maxval = table2.get_max_freq(join_column) accept_prob = freq_v * 1.0 / maxval random_toss = np.random.random_sample() if random_toss <= accept_prob: flag = True retval = (rand_idx1, rand_idx2) return retval
py
1a39e1f78725c172941fb9c8f1f858cfee9eb49e
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import os from pants.base.file_system_project_tree import FileSystemProjectTree from pants_test.pants_run_integration_test import PantsRunIntegrationTest class FilemapIntegrationTest(PantsRunIntegrationTest): PATH_PREFIX = 'testprojects/tests/python/pants/file_sets/' TEST_EXCLUDE_FILES = { 'a.py', 'aa.py', 'aaa.py', 'ab.py', 'aabb.py', 'test_a.py', 'dir1/a.py', 'dir1/aa.py', 'dir1/aaa.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/a.py', 'dir1/dirdir1/aa.py', 'dir1/dirdir1/ab.py' } def setUp(self): super(FilemapIntegrationTest, self).setUp() project_tree = FileSystemProjectTree(os.path.abspath(self.PATH_PREFIX), ['BUILD', '.*']) scan_set = set() def should_ignore(file): return file.endswith('.pyc') for root, dirs, files in project_tree.walk(''): scan_set.update({os.path.join(root, f) for f in files if not should_ignore(f)}) self.assertEqual(scan_set, self.TEST_EXCLUDE_FILES) def _mk_target(self, test_name): return '{}:{}'.format(self.PATH_PREFIX, test_name) def _extract_exclude_output(self, test_name): stdout_data = self.do_command('filemap', self._mk_target(test_name), success=True).stdout_data return {s.split(' ')[0].replace(self.PATH_PREFIX, '') for s in stdout_data.split('\n') if s.startswith(self.PATH_PREFIX)} def test_testprojects(self): self.do_command('filemap', 'testprojects::', success=True) def test_python_sources(self): run = self.do_command('filemap', 'testprojects/src/python/sources', success=True) self.assertIn('testprojects/src/python/sources/sources.py', run.stdout_data) def test_exclude_invalid_string(self): build_path = os.path.join(self.PATH_PREFIX, 'BUILD.invalid') build_content = '''python_library(name='exclude_strings_disallowed', sources=rglobs('*.py', exclude='aa.py'))''' with self.temporary_file_content(build_path, build_content): pants_run = self.do_command('filemap', self._mk_target('exclude_strings_disallowed'), success=False) self.assertRegexpMatches(pants_run.stderr_data, r'Excludes of type `.*` are not supported') def test_exclude_list_of_strings(self): test_out = self._extract_exclude_output('exclude_list_of_strings') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'dir1/aaa.py'}, test_out) def test_exclude_globs(self): test_out = self._extract_exclude_output('exclude_globs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aabb.py', 'dir1/dirdir1/aa.py'}, test_out) def test_exclude_strings(self): test_out = self._extract_exclude_output('exclude_strings') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aa.py', 'ab.py'}, test_out) def test_exclude_set(self): test_out = self._extract_exclude_output('exclude_set') self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'a.py'}, test_out) def test_exclude_rglobs(self): test_out = self._extract_exclude_output('exclude_rglobs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'}, test_out) def test_exclude_zglobs(self): test_out = self._extract_exclude_output('exclude_zglobs') self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'}, test_out) def test_exclude_composite(self): test_out = self._extract_exclude_output('exclude_composite') self.assertEqual(self.TEST_EXCLUDE_FILES - {'a.py', 'aaa.py', 'dir1/a.py', 'dir1/dirdir1/a.py'}, test_out) def test_implicit_sources(self): test_out = self._extract_exclude_output('implicit_sources') self.assertEqual({'a.py', 'aa.py', 'aaa.py', 'aabb.py', 'ab.py'}, test_out) test_out = self._extract_exclude_output('test_with_implicit_sources') self.assertEqual({'test_a.py'}, test_out)
py
1a39e2a7ee171056c233df29e26c37a8f29b4a64
"""AccountReports API Version 1.0. This API client was generated using a template. Make sure this code is valid before using it. """ import logging from datetime import date, datetime from .base import BaseCanvasAPI from .base import BaseModel class AccountReportsAPI(BaseCanvasAPI): """AccountReports API Version 1.0.""" def __init__(self, *args, **kwargs): """Init method for AccountReportsAPI.""" super(AccountReportsAPI, self).__init__(*args, **kwargs) self.logger = logging.getLogger("py3canvas.AccountReportsAPI") def list_available_reports(self, account_id): """ List Available Reports. Returns a paginated list of reports for the current context. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ ID """ path["account_id"] = account_id self.logger.debug( "GET /api/v1/accounts/{account_id}/reports with query params: {params} and form data: {data}".format( params=params, data=data, **path ) ) return self.generic_request( "GET", "/api/v1/accounts/{account_id}/reports".format(**path), data=data, params=params, no_data=True, ) def start_report( self, account_id, report, parameters=None, parameters_course_id=None, parameters_users=None, ): """ Start a Report. Generates a report instance for the account. Note that "report" in the request must match one of the available report names. To fetch a list of available report names and parameters for each report (including whether or not those parameters are required), see {api:AccountReportsController#available_reports List Available Reports}. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ ID """ path["account_id"] = account_id # REQUIRED - PATH - report """ ID """ path["report"] = report # OPTIONAL - parameters """ The parameters will vary for each report. To fetch a list of available parameters for each report, see {api:AccountReportsController#available_reports List Available Reports}. A few example parameters have been provided below. Note that the example parameters provided below may not be valid for every report. """ if parameters is not None: data["parameters"] = parameters # OPTIONAL - parameters[course_id] """ The id of the course to report on. Note: this parameter has been listed to serve as an example and may not be valid for every report. """ if parameters_course_id is not None: data["parameters[course_id]"] = parameters_course_id # OPTIONAL - parameters[users] """ If true, user data will be included. If false, user data will be omitted. Note: this parameter has been listed to serve as an example and may not be valid for every report. """ if parameters_users is not None: data["parameters[users]"] = parameters_users self.logger.debug( "POST /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format( params=params, data=data, **path ) ) return self.generic_request( "POST", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, single_item=True, ) def index_of_reports(self, account_id, report): """ Index of Reports. Shows all reports that have been run for the account of a specific type. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ ID """ path["account_id"] = account_id # REQUIRED - PATH - report """ ID """ path["report"] = report self.logger.debug( "GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format( params=params, data=data, **path ) ) return self.generic_request( "GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True, ) def status_of_report(self, account_id, id, report): """ Status of a Report. Returns the status of a report. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ ID """ path["account_id"] = account_id # REQUIRED - PATH - report """ ID """ path["report"] = report # REQUIRED - PATH - id """ ID """ path["id"] = id self.logger.debug( "GET /api/v1/accounts/{account_id}/reports/{report}/{id} with query params: {params} and form data: {data}".format( params=params, data=data, **path ) ) return self.generic_request( "GET", "/api/v1/accounts/{account_id}/reports/{report}/{id}".format(**path), data=data, params=params, single_item=True, ) def delete_report(self, account_id, id, report): """ Delete a Report. Deletes a generated report instance. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ ID """ path["account_id"] = account_id # REQUIRED - PATH - report """ ID """ path["report"] = report # REQUIRED - PATH - id """ ID """ path["id"] = id self.logger.debug( "DELETE /api/v1/accounts/{account_id}/reports/{report}/{id} with query params: {params} and form data: {data}".format( params=params, data=data, **path ) ) return self.generic_request( "DELETE", "/api/v1/accounts/{account_id}/reports/{report}/{id}".format(**path), data=data, params=params, single_item=True, ) class Report(BaseModel): """Report Model.""" def __init__( self, id=None, report=None, file_url=None, attachment=None, status=None, created_at=None, started_at=None, ended_at=None, parameters=None, progress=None, current_line=None, ): """Init method for Report class.""" self._id = id self._report = report self._file_url = file_url self._attachment = attachment self._status = status self._created_at = created_at self._started_at = started_at self._ended_at = ended_at self._parameters = parameters self._progress = progress self._current_line = current_line self.logger = logging.getLogger("py3canvas.Report") @property def id(self): """The unique identifier for the report.""" return self._id @id.setter def id(self, value): """Setter for id property.""" self.logger.warn( "Setting values on id will NOT update the remote Canvas instance." ) self._id = value @property def report(self): """The type of report.""" return self._report @report.setter def report(self, value): """Setter for report property.""" self.logger.warn( "Setting values on report will NOT update the remote Canvas instance." ) self._report = value @property def file_url(self): """The url to the report download.""" return self._file_url @file_url.setter def file_url(self, value): """Setter for file_url property.""" self.logger.warn( "Setting values on file_url will NOT update the remote Canvas instance." ) self._file_url = value @property def attachment(self): """The attachment api object of the report. Only available after the report has completed.""" return self._attachment @attachment.setter def attachment(self, value): """Setter for attachment property.""" self.logger.warn( "Setting values on attachment will NOT update the remote Canvas instance." ) self._attachment = value @property def status(self): """The status of the report.""" return self._status @status.setter def status(self, value): """Setter for status property.""" self.logger.warn( "Setting values on status will NOT update the remote Canvas instance." ) self._status = value @property def created_at(self): """The date and time the report was created.""" return self._created_at @created_at.setter def created_at(self, value): """Setter for created_at property.""" self.logger.warn( "Setting values on created_at will NOT update the remote Canvas instance." ) self._created_at = value @property def started_at(self): """The date and time the report started processing.""" return self._started_at @started_at.setter def started_at(self, value): """Setter for started_at property.""" self.logger.warn( "Setting values on started_at will NOT update the remote Canvas instance." ) self._started_at = value @property def ended_at(self): """The date and time the report finished processing.""" return self._ended_at @ended_at.setter def ended_at(self, value): """Setter for ended_at property.""" self.logger.warn( "Setting values on ended_at will NOT update the remote Canvas instance." ) self._ended_at = value @property def parameters(self): """The report parameters.""" return self._parameters @parameters.setter def parameters(self, value): """Setter for parameters property.""" self.logger.warn( "Setting values on parameters will NOT update the remote Canvas instance." ) self._parameters = value @property def progress(self): """The progress of the report.""" return self._progress @progress.setter def progress(self, value): """Setter for progress property.""" self.logger.warn( "Setting values on progress will NOT update the remote Canvas instance." ) self._progress = value @property def current_line(self): """This is the current line count being written to the report. It updates every 1000 records.""" return self._current_line @current_line.setter def current_line(self, value): """Setter for current_line property.""" self.logger.warn( "Setting values on current_line will NOT update the remote Canvas instance." ) self._current_line = value class Reportparameters(BaseModel): """Reportparameters Model. The parameters returned will vary for each report.""" def __init__( self, enrollment_term_id=None, include_deleted=None, course_id=None, order=None, users=None, accounts=None, terms=None, courses=None, sections=None, enrollments=None, groups=None, xlist=None, sis_terms_csv=None, sis_accounts_csv=None, include_enrollment_state=None, enrollment_state=None, start_at=None, end_at=None, ): """Init method for Reportparameters class.""" self._enrollment_term_id = enrollment_term_id self._include_deleted = include_deleted self._course_id = course_id self._order = order self._users = users self._accounts = accounts self._terms = terms self._courses = courses self._sections = sections self._enrollments = enrollments self._groups = groups self._xlist = xlist self._sis_terms_csv = sis_terms_csv self._sis_accounts_csv = sis_accounts_csv self._include_enrollment_state = include_enrollment_state self._enrollment_state = enrollment_state self._start_at = start_at self._end_at = end_at self.logger = logging.getLogger("py3canvas.Reportparameters") @property def enrollment_term_id(self): """The canvas id of the term to get grades from.""" return self._enrollment_term_id @enrollment_term_id.setter def enrollment_term_id(self, value): """Setter for enrollment_term_id property.""" self.logger.warn( "Setting values on enrollment_term_id will NOT update the remote Canvas instance." ) self._enrollment_term_id = value @property def include_deleted(self): """If true, deleted objects will be included. If false, deleted objects will be omitted.""" return self._include_deleted @include_deleted.setter def include_deleted(self, value): """Setter for include_deleted property.""" self.logger.warn( "Setting values on include_deleted will NOT update the remote Canvas instance." ) self._include_deleted = value @property def course_id(self): """The id of the course to report on.""" return self._course_id @course_id.setter def course_id(self, value): """Setter for course_id property.""" self.logger.warn( "Setting values on course_id will NOT update the remote Canvas instance." ) self._course_id = value @property def order(self): """The sort order for the csv, Options: 'users', 'courses', 'outcomes'.""" return self._order @order.setter def order(self, value): """Setter for order property.""" self.logger.warn( "Setting values on order will NOT update the remote Canvas instance." ) self._order = value @property def users(self): """If true, user data will be included. If false, user data will be omitted.""" return self._users @users.setter def users(self, value): """Setter for users property.""" self.logger.warn( "Setting values on users will NOT update the remote Canvas instance." ) self._users = value @property def accounts(self): """If true, account data will be included. If false, account data will be omitted.""" return self._accounts @accounts.setter def accounts(self, value): """Setter for accounts property.""" self.logger.warn( "Setting values on accounts will NOT update the remote Canvas instance." ) self._accounts = value @property def terms(self): """If true, term data will be included. If false, term data will be omitted.""" return self._terms @terms.setter def terms(self, value): """Setter for terms property.""" self.logger.warn( "Setting values on terms will NOT update the remote Canvas instance." ) self._terms = value @property def courses(self): """If true, course data will be included. If false, course data will be omitted.""" return self._courses @courses.setter def courses(self, value): """Setter for courses property.""" self.logger.warn( "Setting values on courses will NOT update the remote Canvas instance." ) self._courses = value @property def sections(self): """If true, section data will be included. If false, section data will be omitted.""" return self._sections @sections.setter def sections(self, value): """Setter for sections property.""" self.logger.warn( "Setting values on sections will NOT update the remote Canvas instance." ) self._sections = value @property def enrollments(self): """If true, enrollment data will be included. If false, enrollment data will be omitted.""" return self._enrollments @enrollments.setter def enrollments(self, value): """Setter for enrollments property.""" self.logger.warn( "Setting values on enrollments will NOT update the remote Canvas instance." ) self._enrollments = value @property def groups(self): """If true, group data will be included. If false, group data will be omitted.""" return self._groups @groups.setter def groups(self, value): """Setter for groups property.""" self.logger.warn( "Setting values on groups will NOT update the remote Canvas instance." ) self._groups = value @property def xlist(self): """If true, data for crosslisted courses will be included. If false, data for crosslisted courses will be omitted.""" return self._xlist @xlist.setter def xlist(self, value): """Setter for xlist property.""" self.logger.warn( "Setting values on xlist will NOT update the remote Canvas instance." ) self._xlist = value @property def sis_terms_csv(self): """sis_terms_csv.""" return self._sis_terms_csv @sis_terms_csv.setter def sis_terms_csv(self, value): """Setter for sis_terms_csv property.""" self.logger.warn( "Setting values on sis_terms_csv will NOT update the remote Canvas instance." ) self._sis_terms_csv = value @property def sis_accounts_csv(self): """sis_accounts_csv.""" return self._sis_accounts_csv @sis_accounts_csv.setter def sis_accounts_csv(self, value): """Setter for sis_accounts_csv property.""" self.logger.warn( "Setting values on sis_accounts_csv will NOT update the remote Canvas instance." ) self._sis_accounts_csv = value @property def include_enrollment_state(self): """If true, enrollment state will be included. If false, enrollment state will be omitted. Defaults to false.""" return self._include_enrollment_state @include_enrollment_state.setter def include_enrollment_state(self, value): """Setter for include_enrollment_state property.""" self.logger.warn( "Setting values on include_enrollment_state will NOT update the remote Canvas instance." ) self._include_enrollment_state = value @property def enrollment_state(self): """Include enrollment state. Defaults to 'all' Options: ['active'| 'invited'| 'creation_pending'| 'deleted'| 'rejected'| 'completed'| 'inactive'| 'all'].""" return self._enrollment_state @enrollment_state.setter def enrollment_state(self, value): """Setter for enrollment_state property.""" self.logger.warn( "Setting values on enrollment_state will NOT update the remote Canvas instance." ) self._enrollment_state = value @property def start_at(self): """The beginning date for submissions. Max time range is 2 weeks.""" return self._start_at @start_at.setter def start_at(self, value): """Setter for start_at property.""" self.logger.warn( "Setting values on start_at will NOT update the remote Canvas instance." ) self._start_at = value @property def end_at(self): """The end date for submissions. Max time range is 2 weeks.""" return self._end_at @end_at.setter def end_at(self, value): """Setter for end_at property.""" self.logger.warn( "Setting values on end_at will NOT update the remote Canvas instance." ) self._end_at = value
py
1a39e2d3204be70809d8aa8c51568e29d1a5e90b
from torch.nn import BCELoss import time import torch def criterion(pred,gt): # weight_map=torch.ones_like(gt) # weight_map[gt>=0.5]*=100 # weight_map[gt<0.5]*=0.01 bceloss=BCELoss() loss=bceloss(pred,gt) return loss def iou_loss(pred,gt): # B,_,H,W=pred.size() intersect=(pred*gt).sum() union=pred.sum()+gt.sum()-intersect loss=1-intersect/union return loss def get_time(): T = time.strftime('%m.%d.%H.%M.%S', time.localtime()) return T def get_info(head,loss_dict): for k,v in loss_dict.items(): head+='{}:{:6f} '.format(k,v) return head
py
1a39e2dd0dd907a18525e5bb52b946189c39d945
import json import zipfile import os import sys import pytest from click.testing import CliRunner import mock from chalice import cli from chalice.cli import factory from chalice.config import Config from chalice.utils import record_deployed_values from chalice import local from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME @pytest.fixture def runner(): return CliRunner() @pytest.fixture def mock_cli_factory(): cli_factory = mock.Mock(spec=factory.CLIFactory) cli_factory.create_config_obj.return_value = Config.create(project_dir='.') cli_factory.create_botocore_session.return_value = mock.sentinel.Session return cli_factory def assert_chalice_app_structure_created(dirname): app_contents = os.listdir(os.path.join(os.getcwd(), dirname)) assert 'app.py' in app_contents assert 'requirements.txt' in app_contents assert '.chalice' in app_contents assert '.gitignore' in app_contents def _run_cli_command(runner, function, args, cli_factory=None): # Handles passing in 'obj' so we can get commands # that use @pass_context to work properly. # click doesn't support this natively so we have to duplicate # what 'def cli(...)' is doing. if cli_factory is None: cli_factory = factory.CLIFactory('.') result = runner.invoke( function, args, obj={'project_dir': '.', 'debug': False, 'factory': cli_factory}) return result def test_create_new_project_creates_app(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 # The 'new-project' command creates a directory based on # the project name assert os.listdir(os.getcwd()) == ['testproject'] assert_chalice_app_structure_created(dirname='testproject') def test_create_project_with_prompted_app_name(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, input='testproject') assert result.exit_code == 0 assert os.listdir(os.getcwd()) == ['testproject'] assert_chalice_app_structure_created(dirname='testproject') def test_error_raised_if_dir_already_exists(runner): with runner.isolated_filesystem(): os.mkdir('testproject') result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 1 assert 'Directory already exists: testproject' in result.output def test_can_load_project_config_after_project_creation(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 config = factory.CLIFactory('testproject').load_project_config() assert config == { 'version': '2.0', 'app_name': 'testproject', 'stages': { 'dev': {'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME}, } } def test_default_new_project_adds_index_route(runner): with runner.isolated_filesystem(): result = runner.invoke(cli.new_project, ['testproject']) assert result.exit_code == 0 app = factory.CLIFactory('testproject').load_chalice_app() assert '/' in app.routes def test_gen_policy_command_creates_policy(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = runner.invoke(cli.cli, ['gen-policy'], obj={}) assert result.exit_code == 0 # The output should be valid JSON. parsed_policy = json.loads(result.output) # We don't want to validate the specific parts of the policy # (that's tested elsewhere), but we'll check to make sure # it looks like a policy document. assert 'Version' in parsed_policy assert 'Statement' in parsed_policy def test_can_package_command(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.package, ['outdir']) assert result.exit_code == 0, result.output assert os.path.isdir('outdir') dir_contents = os.listdir('outdir') assert 'sam.json' in dir_contents assert 'deployment.zip' in dir_contents def test_can_package_with_single_file(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.package, ['--single-file', 'package.zip']) assert result.exit_code == 0, result.output assert os.path.isfile('package.zip') with zipfile.ZipFile('package.zip', 'r') as f: assert sorted(f.namelist()) == ['deployment.zip', 'sam.json'] def test_does_deploy_with_default_api_gateway_stage_name(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # This isn't perfect as we're assuming we know how to # create the config_obj like the deploy() command does, # it should give us more confidence that the api gateway # stage defaults are still working. cli_factory = factory.CLIFactory('.') config = cli_factory.create_config_obj( chalice_stage_name='dev', autogen_policy=None, api_gateway_stage=None ) assert config.api_gateway_stage == DEFAULT_APIGATEWAY_STAGE_NAME def test_can_specify_api_gateway_stage(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--api-gateway-stage', 'notdev'], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_config_obj.assert_called_with( autogen_policy=None, chalice_stage_name='dev', api_gateway_stage='notdev' ) def test_can_deploy_specify_connection_timeout(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.deploy, ['--connection-timeout', 100], cli_factory=mock_cli_factory) assert result.exit_code == 0 mock_cli_factory.create_botocore_session.assert_called_with( connection_timeout=100 ) def test_can_retrieve_url(runner, mock_cli_factory): deployed_values_dev = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://dev-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } deployed_values_prod = { "schema_version": "2.0", "resources": [ {"rest_api_url": "https://prod-url/", "name": "rest_api", "resource_type": "rest_api"}, ] } with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') deployed_dir = os.path.join('.chalice', 'deployed') os.makedirs(deployed_dir) record_deployed_values( deployed_values_dev, os.path.join(deployed_dir, 'dev.json') ) record_deployed_values( deployed_values_prod, os.path.join(deployed_dir, 'prod.json') ) result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 0 assert result.output == 'https://dev-url/\n' prod_result = _run_cli_command(runner, cli.url, ['--stage', 'prod'], cli_factory=mock_cli_factory) assert prod_result.exit_code == 0 assert prod_result.output == 'https://prod-url/\n' def test_error_when_no_deployed_record(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command(runner, cli.url, [], cli_factory=mock_cli_factory) assert result.exit_code == 2 assert 'not find' in result.output @pytest.mark.skipif(sys.version_info[0] == 3, reason=('Python Version 3 cannot create pipelines due to ' 'CodeBuild not having a Python 3.6 image. This ' 'mark can be removed when that image exists.')) def test_can_generate_pipeline_for_all(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. assert "AWSTemplateFormatVersion" in template assert "Outputs" in template def test_no_errors_if_override_codebuild_image(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The actual contents are tested in the unit # tests. Just a sanity check that it looks right. image = template['Parameters']['CodeBuildImage']['Default'] assert image == 'python:3.6.1' def test_can_configure_github(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') # The -i option is provided so we don't have to skip this # test on python3.6 result = _run_cli_command( runner, cli.generate_pipeline, ['--source', 'github', '-i' 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('pipeline.json') with open('pipeline.json', 'r') as f: template = json.load(f) # The template is already tested in the unit tests # for template generation. We just want a basic # sanity check to make sure things are mapped # properly. assert 'GithubOwner' in template['Parameters'] assert 'GithubRepoName' in template['Parameters'] def test_can_extract_buildspec_yaml(runner): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.generate_pipeline, ['--buildspec-file', 'buildspec.yml', '-i', 'python:3.6.1', 'pipeline.json']) assert result.exit_code == 0, result.output assert os.path.isfile('buildspec.yml') with open('buildspec.yml') as f: data = f.read() # The contents of this file are tested elsewhere, # we just want a basic sanity check here. assert 'chalice package' in data def test_env_vars_set_in_local(runner, mock_cli_factory, monkeypatch): local_server = mock.Mock(spec=local.LocalDevServer) mock_cli_factory.create_local_server.return_value = local_server mock_cli_factory.create_config_obj.return_value = Config.create( project_dir='.', environment_variables={'foo': 'bar'}) actual_env = {} monkeypatch.setattr(os, 'environ', actual_env) with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') _run_cli_command(runner, cli.local, [], cli_factory=mock_cli_factory) assert actual_env['foo'] == 'bar' def test_can_specify_profile_for_logs(runner, mock_cli_factory): with runner.isolated_filesystem(): cli.create_new_project_skeleton('testproject') os.chdir('testproject') result = _run_cli_command( runner, cli.logs, ['--profile', 'my-profile'], cli_factory=mock_cli_factory ) assert result.exit_code == 0 assert mock_cli_factory.profile == 'my-profile'
py
1a39e2ee24ee867ea6888714b73baffde7db98c6
import os import pandas as pd def read_synchronisation_file(experiment_root): filepath = os.path.join(experiment_root, "labels", "synchronisation.csv") return pd.read_csv(filepath) def convert_timestamps(experiment_root, timestamps, from_reference, to_reference): """ Convert numeric timestamps (seconds for start of the video or posix timestamp) of a reference time (e.g. P3_eyetracker) to a different reference time (e.g. video time) Parameters ---------- experiment_root: str Root of the current experiment (to find the right synchronisation matrix) timestamps: float or array like timestamps to be converted from_reference: str name of the reference of the original timestamps to_reference: str name of the reference time the timestamp has to be converted to Returns ------- converted_timestamps: float or array like Timestamps given in to_reference time values """ synchronisation_file = read_synchronisation_file(experiment_root) offset = synchronisation_file.loc[synchronisation_file["from"] == from_reference, to_reference].values[0] converted_timestamps = timestamps + offset return converted_timestamps if __name__ == '__main__': exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8" print(convert_timestamps(exp_root, [1482326641, 1482326642], "P3_eyetracker", "video"))
py
1a39e3456d4013db296f6e476742b11a2dd88d62
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. import unittest from pyiron_base.job.template import PythonTemplateJob from pyiron_base._tests import TestWithProject class ToyJob(PythonTemplateJob): def __init__(self, project, job_name): super(ToyJob, self).__init__(project, job_name) self.input['input_energy'] = 100 def run_static(self): with self.project_hdf5.open("output/generic") as h5out: h5out["energy_tot"] = self.input["input_energy"] self.status.finished = True class TestProjectData(TestWithProject): @classmethod def setUpClass(cls): super().setUpClass() for i, c in enumerate("abcd"): j = cls.project.create_job(ToyJob, f"test_{c}") j.input['input_energy'] = i j.run() def setUp(self): self.table = self.project.create.table('test_table') self.table.filter_function = lambda j: j.name in ["test_a", "test_b"] self.table.add['name'] = lambda j: j.name self.table.run() def tearDown(self): self.project.remove_job(self.table.name) def test_filter(self): """Filter functions should restrict jobs included in the table.""" df = self.table.get_dataframe() self.assertEqual(2, len(df), "Table not correctly filtered.") self.assertEqual(["test_a", "test_b"], df.name.to_list(), "Table not correctly filtered.") def test_filter_reload(self): """Lambdas should work as filter functions even if read from HDF5.""" try: table_loaded = self.project.load(self.table.name) except: self.fail("Error on reloading table with filter lambda.") if __name__ == '__main__': unittest.main()
py
1a39e37c37b90f7e1b111d6d13a4f64b30df51f2
""" Calculations that deal with seismic moment tensors. Notes from Lay and Wallace Chapter 8: * Decomposition 1: Mij = isotropic + deviatoric * Decomposition 2: Mij = isotropic + 3 vector dipoles * Decomposition 3: Mij = isotropic + 3 double couples * Decomposition 4: Mij = isotropic + 3 CLVDs * Decomposition 5: Mij = isotropic + major DC + minor DC * Decomposition 6: Mij = isotropic + DC + CLVD The most useful in practice are Decomposition 1 and Decomposition 6. """ import numpy as np def get_MT(mrr, mtt, mpp, mrt, mrp, mtp): """Build a matrix from the six components of the moment tensor""" MT = np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]]); return MT; def diagonalize_MT(MT): """Return a diagonal matrix whose elements are the ordered eigenvalues.""" eigvals, eigvecs = np.linalg.eig(MT); eigvals = sorted(eigvals)[::-1]; return np.diag(eigvals); def get_deviatoric_MT(MT): """Get deviatoric MT (returns a matrix)""" iso_MT = get_iso_MT(MT); M_dev = np.subtract(MT, iso_MT); return M_dev; def get_iso_MT(MT): """Return the isotropic moment tensor (returns a matrix)""" x = (1 / 3) * np.trace(MT); iso_MT = np.multiply(np.eye(3), x); return iso_MT def get_clvd_dc_from_deviatoric_MT(MT): """ Return the dc and clvd components of a deviatoric MT, from Shearer Equation 9.14. Returns two matricies. """ eigenvalues = np.diag(MT); assert(eigenvalues[0] > eigenvalues[1] > eigenvalues[2]), ValueError("Deviatoric eigenvalues out of order.") dc_component = (1/2)*(eigenvalues[0]-eigenvalues[2]); clvd_component = eigenvalues[1]*(1/2); M_dc = np.diag([dc_component, 0, -dc_component]); M_clvd = np.diag([-clvd_component, 2*clvd_component, -clvd_component]); return M_clvd, M_dc; def decompose_iso_dc_clvd(MT): """ A useful function to decompose a full moment tensor into an isotropic part, a double-couple, and a CLVD component. Returns three matrices. """ diag_MT = diagonalize_MT(MT); # equivalent to a coordinate transformation M_iso = get_iso_MT(diag_MT); # get the trace M_dev = get_deviatoric_MT(diag_MT); M_dev = diagonalize_MT(M_dev); # diagonalized in the proper order M_clvd, M_dc = get_clvd_dc_from_deviatoric_MT(M_dev); return M_iso, M_clvd, M_dc; # def get_separate_scalar_moments(MT): # """return isotropic, clvd, and double couple moments. Not frequently used.""" # M_iso, M_clvd, M_dc = decompose_iso_dc_clvd(MT); # iso_moment = abs(M_iso[0][0]); # clvd_moment = abs(M_clvd[0][0]); # dc_moment = abs(M_dc[0][0]); # return iso_moment, clvd_moment, dc_moment; def get_total_scalar_moment(MT): """Shearer Equation 9.8: quadratic sum of element of moment tensor components, in newton-meters""" MT = np.divide(MT, 1e16); # done to prevent computer buffer overflow total = 0; for i in range(3): for j in range(3): total = total + MT[i][j]*MT[i][j]; Mo = (1/np.sqrt(2)) * np.sqrt(total); Mo = np.multiply(Mo, 1e16); return Mo; def get_percent_double_couple(MT): """Get the percent double couple and percent clvd moment from a deviatoric moment tensor. When isotropic term is involved, this can get more complicated and there are several approaches. See Shearer equation 9.17 for epsilon. See Vavrycuk, 2001 for other approaches when isotropic component is involved. """ m_dev = diagonalize_MT(get_deviatoric_MT(MT)); epsilon = np.diag(m_dev)[1] / np.max([np.abs(np.diag(m_dev)[0]), np.abs(np.diag(m_dev)[2])]); fraction = epsilon * 2; perc_clvd = 100 * (abs(fraction)); perc_dc = 100 - perc_clvd; return perc_dc, perc_clvd;
py
1a39e4db1eeb2e5f8952dd585719cf322469ad59
# Copyright 2019 ZTE corporation. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from typing import Any, Mapping, NamedTuple, Optional, Sequence from itertools import zip_longest from . import utilities from .models.data_format import DataFormat def get_tensor_by_fuzzy_name(graph, name): if ':' in name: tensor = graph.get_tensor_by_name(name) else: tensor = graph.get_operation_by_name(name).outputs[0] return tensor class Config(NamedTuple): input_names: Optional[Sequence[str]] data_formats: Sequence[Optional[DataFormat]] output_names: Optional[Sequence[str]] @staticmethod def from_json(value: Mapping[str, Any]) -> 'Config': return Config(input_names=value.get('input_names'), data_formats=utilities.get_data_formats(value.get('input_formats')), output_names=value.get('output_names')) @staticmethod def from_env(env: Mapping[str, str]) -> 'Config': return Config(input_names=utilities.split_by(env.get('INPUT_NAMES'), ','), data_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS'), ',')), output_names=utilities.split_by(env.get('OUTPUT_NAMES'), ',')) def get_input_tensors_from_graph(self, graph): if self.input_names is None: input_tensors = [operation.outputs[0] for operation in graph.get_operations() if operation.type == 'Placeholder'] else: input_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.input_names] return input_tensors def get_output_tensors_from_graph(self, graph): if self.output_names is None: output_tensors = [output_tensor for operation in graph.get_operations() if operation.type not in ['Assign', 'Const', 'Identity', 'IsVariableInitialized', 'NoOp', 'Placeholder', 'SaveV2', 'VarIsInitializedOp'] for output_tensor in operation.outputs if not output_tensor.consumers()] else: output_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.output_names] return output_tensors def get_inputs(graph, config): return zip_longest(config.get_input_tensors_from_graph(graph), config.data_formats)
py
1a39e50257fbddc2eb675087dd06e0c681b1c2dd
#!/usr/bin/env python3 # Copyright 2021 The Pigweed Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Command line interface for mcuxpresso_builder.""" import argparse import pathlib import sys from pw_build_mcuxpresso import components def _parse_args() -> argparse.Namespace: """Setup argparse and parse command line args.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='command', metavar='<command>', required=True) project_parser = subparsers.add_parser( 'project', help='output components of an MCUXpresso project') project_parser.add_argument('manifest_filename', type=pathlib.Path) project_parser.add_argument('--include', type=str, action='append') project_parser.add_argument('--exclude', type=str, action='append') project_parser.add_argument('--prefix', dest='path_prefix', type=str) return parser.parse_args() def main(): """Main command line function.""" args = _parse_args() if args.command == 'project': components.project(args.manifest_filename, include=args.include, exclude=args.exclude, path_prefix=args.path_prefix) sys.exit(0) if __name__ == '__main__': main()
py
1a39e7fea3362154425660bbdeaa39f5d2a22858
# -*- coding: utf-8 -*- r""" Overconvergent `p`-adic modular forms for small primes This module implements computations of Hecke operators and `U_p`-eigenfunctions on `p`-adic overconvergent modular forms of tame level 1, where `p` is one of the primes `\{2, 3, 5, 7, 13\}`, using the algorithms described in [Loe2007]_. - [Loe2007]_ AUTHORS: - David Loeffler (August 2008): initial version - David Loeffler (March 2009): extensively reworked - Lloyd Kilford (May 2009): add :meth:`~sage.modular.overconvergent.genus0.OverconvergentModularFormsSpace.slopes` method - David Loeffler (June 2009): miscellaneous bug fixes and usability improvements The Theory ~~~~~~~~~~ Let `p` be one of the above primes, so `X_0(p)` has genus 0, and let .. MATH:: f_p = \sqrt[p-1]{\frac{\Delta(pz)}{\Delta(z)}} (an `\eta`-product of level `p` -- see module :mod:`sage.modular.etaproducts`). Then one can show that `f_p` gives an isomorphism `X_0(p) \to \mathbb{P}^1`. Furthermore, if we work over `\CC_p`, the `r`-overconvergent locus on `X_0(p)` (or of `X_0(1)`, via the canonical subgroup lifting), corresponds to the `p`-adic disc .. MATH:: |f_p|_p \le p^{\frac{12r}{p-1}}. (This is Theorem 1 of [Loe2007]_.) Hence if we fix an element `c` with `|c| = p^{-\frac{12r}{p-1}}`, the space `S_k^\dag(1, r)` of overconvergent `p`-adic modular forms has an orthonormal basis given by the functions `(cf)^n`. So any element can be written in the form `E_k \times \sum_{n \ge 0} a_n (cf)^n`, where `a_n \to 0` as `N \to \infty`, and any such sequence `a_n` defines a unique overconvergent form. One can now find the matrix of Hecke operators in this basis, either by calculating `q`-expansions, or (for the special case of `U_p`) using a recurrence formula due to Kolberg. An Extended Example ~~~~~~~~~~~~~~~~~~~ We create a space of 3-adic modular forms:: sage: M = OverconvergentModularForms(3, 8, 1/6, prec=60) Creating an element directly as a linear combination of basis vectors. .. link :: sage: f1 = M.3 + M.5; f1.q_expansion() 27*q^3 + 1055916/1093*q^4 + 19913121/1093*q^5 + 268430112/1093*q^6 + ... sage: f1.coordinates(8) [0, 0, 0, 1, 0, 1, 0, 0] We can coerce from elements of classical spaces of modular forms: .. link :: sage: f2 = M(CuspForms(3, 8).0); f2 3-adic overconvergent modular form of weight-character 8 with q-expansion q + 6*q^2 - 27*q^3 - 92*q^4 + 390*q^5 - 162*q^6 ... We express this in a basis, and see that the coefficients go to zero very fast: .. link :: sage: [x.valuation(3) for x in f2.coordinates(60)] [+Infinity, -1, 3, 6, 10, 13, 18, 20, 24, 27, 31, 34, 39, 41, 45, 48, 52, 55, 61, 62, 66, 69, 73, 76, 81, 83, 87, 90, 94, 97, 102, 104, 108, 111, 115, 118, 124, 125, 129, 132, 136, 139, 144, 146, 150, 153, 157, 160, 165, 167, 171, 174, 178, 181, 188, 188, 192, 195, 199, 202] This form has more level at `p`, and hence is less overconvergent: .. link :: sage: f3 = M(CuspForms(9, 8).0); [x.valuation(3) for x in f3.coordinates(60)] [+Infinity, -1, -1, 0, -4, -4, -2, -3, 0, 0, -1, -1, 1, 0, 3, 3, 3, 3, 5, 3, 7, 7, 6, 6, 8, 7, 10, 10, 8, 8, 10, 9, 12, 12, 12, 12, 14, 12, 17, 16, 15, 15, 17, 16, 19, 19, 18, 18, 20, 19, 22, 22, 22, 22, 24, 21, 25, 26, 24, 24] An error will be raised for forms which are not sufficiently overconvergent: .. link :: sage: M(CuspForms(27, 8).0) Traceback (most recent call last): ... ValueError: Form is not overconvergent enough (form is only 1/12-overconvergent) Let's compute some Hecke operators. Note that the coefficients of this matrix are `p`-adically tiny: .. link :: sage: M.hecke_matrix(3, 4).change_ring(Qp(3,prec=1)) [ 1 + O(3) 0 0 0] [ 0 2*3^3 + O(3^4) 2*3^3 + O(3^4) 3^2 + O(3^3)] [ 0 2*3^7 + O(3^8) 2*3^8 + O(3^9) 3^6 + O(3^7)] [ 0 2*3^10 + O(3^11) 2*3^10 + O(3^11) 2*3^9 + O(3^10)] We compute the eigenfunctions of a 4x4 truncation: .. link :: sage: efuncs = M.eigenfunctions(4) sage: for i in [1..3]: ....: print(efuncs[i].q_expansion(prec=4).change_ring(Qp(3,prec=20))) (1 + O(3^20))*q + (2*3 + 3^15 + 3^16 + 3^17 + 2*3^19 + 2*3^20 + O(3^21))*q^2 + (2*3^3 + 2*3^4 + 2*3^5 + 2*3^6 + 2*3^7 + 2*3^8 + 2*3^9 + 2*3^10 + 2*3^11 + 2*3^12 + 2*3^13 + 2*3^14 + 2*3^15 + 2*3^16 + 3^17 + 2*3^18 + 2*3^19 + 3^21 + 3^22 + O(3^23))*q^3 + O(q^4) (1 + O(3^20))*q + (3 + 2*3^2 + 3^3 + 3^4 + 3^12 + 3^13 + 2*3^14 + 3^15 + 2*3^17 + 3^18 + 3^19 + 3^20 + O(3^21))*q^2 + (3^7 + 3^13 + 2*3^14 + 2*3^15 + 3^16 + 3^17 + 2*3^18 + 3^20 + 2*3^21 + 2*3^22 + 2*3^23 + 2*3^25 + O(3^27))*q^3 + O(q^4) (1 + O(3^20))*q + (2*3 + 3^3 + 2*3^4 + 3^6 + 2*3^8 + 3^9 + 3^10 + 2*3^11 + 2*3^13 + 3^16 + 3^18 + 3^19 + 3^20 + O(3^21))*q^2 + (3^9 + 2*3^12 + 3^15 + 3^17 + 3^18 + 3^19 + 3^20 + 2*3^22 + 2*3^23 + 2*3^27 + 2*3^28 + O(3^29))*q^3 + O(q^4) The first eigenfunction is a classical cusp form of level 3: .. link :: sage: (efuncs[1] - M(CuspForms(3, 8).0)).valuation() 13 The second is an Eisenstein series! .. link :: sage: (efuncs[2] - M(EisensteinForms(3, 8).1)).valuation() 10 The third is a genuinely new thing (not a classical modular form at all); the coefficients are almost certainly not algebraic over `\QQ`. Note that the slope is 9, so Coleman's classicality criterion (forms of slope `< k-1` are classical) does not apply. .. link :: sage: a3 = efuncs[3].q_expansion()[3]; a3 3^9 + 2*3^12 + 3^15 + 3^17 + 3^18 + 3^19 + 3^20 + 2*3^22 + 2*3^23 + 2*3^27 + 2*3^28 + 3^32 + 3^33 + 2*3^34 + 3^38 + 2*3^39 + 3^40 + 2*3^41 + 3^44 + 3^45 + 3^46 + 2*3^47 + 2*3^48 + 3^49 + 3^50 + 2*3^51 + 2*3^52 + 3^53 + 2*3^54 + 3^55 + 3^56 + 3^57 + 2*3^58 + 2*3^59 + 3^60 + 2*3^61 + 2*3^63 + 2*3^64 + 3^65 + 2*3^67 + 3^68 + 2*3^69 + 2*3^71 + 3^72 + 2*3^74 + 3^75 + 3^76 + 3^79 + 3^80 + 2*3^83 + 2*3^84 + 3^85 + 2*3^87 + 3^88 + 2*3^89 + 2*3^90 + 2*3^91 + 3^92 + O(3^98) sage: efuncs[3].slope() 9 ----------- """ #***************************************************************************** # Copyright (C) 2008 William Stein <[email protected]> # 2008-9 David Loeffler <[email protected]> # # Distributed under the terms of the GNU General Public License (GPL) # https://www.gnu.org/licenses/ #***************************************************************************** from sage.matrix.all import matrix, MatrixSpace, diagonal_matrix from sage.misc.verbose import verbose from sage.misc.cachefunc import cached_method from sage.modular.all import (trivial_character, EtaProduct, j_invariant_qexp, hecke_operator_on_qexp) from sage.modular.arithgroup.all import is_Gamma0, is_Gamma1 from sage.modular.modform.element import ModularFormElement from sage.modules.all import vector from sage.modules.module import Module from sage.structure.element import Vector, ModuleElement from sage.structure.richcmp import richcmp from sage.plot.plot import plot from sage.rings.all import (O, Infinity, ZZ, QQ, pAdicField, PolynomialRing, PowerSeriesRing, is_pAdicField) import weakref from .weightspace import WeightSpace_constructor as WeightSpace, WeightCharacter __ocmfdict = {} #################### # Factory function # #################### def OverconvergentModularForms(prime, weight, radius, base_ring=QQ, prec = 20, char = None): r""" Create a space of overconvergent `p`-adic modular forms of level `\Gamma_0(p)`, over the given base ring. The base ring need not be a `p`-adic ring (the spaces we compute with typically have bases over `\QQ`). INPUT: - ``prime`` - a prime number `p`, which must be one of the primes `\{2, 3, 5, 7, 13\}`, or the congruence subgroup `\Gamma_0(p)` where `p` is one of these primes. - ``weight`` - an integer (which at present must be 0 or `\ge 2`), the weight. - ``radius`` - a rational number in the interval `\left( 0, \frac{p}{p+1} \right)`, the radius of overconvergence. - ``base_ring`` (default: `\QQ`), a ring over which to compute. This need not be a `p`-adic ring. - ``prec`` - an integer (default: 20), the number of `q`-expansion terms to compute. - ``char`` - a Dirichlet character modulo `p` or ``None`` (the default). Here ``None`` is interpreted as the trivial character modulo `p`. The character `\chi` and weight `k` must satisfy `(-1)^k = \chi(-1)`, and the base ring must contain an element `v` such that `{\rm ord}_p(v) = \frac{12 r}{p-1}` where `r` is the radius of overconvergence (and `{\rm ord}_p` is normalised so `{\rm ord}_p(p) = 1`). EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2) Space of 3-adic 1/2-overconvergent modular forms of weight-character 0 over Rational Field sage: OverconvergentModularForms(3, 16, 1/2) Space of 3-adic 1/2-overconvergent modular forms of weight-character 16 over Rational Field sage: OverconvergentModularForms(3, 3, 1/2, char = DirichletGroup(3,QQ).0) Space of 3-adic 1/2-overconvergent modular forms of weight-character (3, 3, [-1]) over Rational Field """ if is_Gamma0(prime) or is_Gamma1(prime): prime = prime.level() else: prime = ZZ(prime) if char is None: char = trivial_character(prime, base_ring=QQ) if int(prime) not in [2, 3, 5, 7, 13]: raise ValueError("p must be one of {2, 3, 5, 7, 13}") key = (prime, weight, radius, base_ring, prec, char) if key in __ocmfdict: w = __ocmfdict[key] M = w() if not (M is None): return M M = OverconvergentModularFormsSpace(*key) __ocmfdict[key] = weakref.ref(M) return M ######################### # Main class definition # ######################### class OverconvergentModularFormsSpace(Module): r""" A space of overconvergent modular forms of level `\Gamma_0(p)`, where `p` is a prime such that `X_0(p)` has genus 0. Elements are represented as power series, with a formal power series `F` corresponding to the modular form `E_k^\ast \times F(g)` where `E_k^\ast` is the `p`-deprived Eisenstein series of weight-character `k`, and `g` is a uniformiser of `X_0(p)` normalised so that the `r`-overconvergent region `X_0(p)_{\ge r}` corresponds to `|g| \le 1`. TESTS:: sage: K.<w> = Qp(13).extension(x^2-13); M = OverconvergentModularForms(13, 20, radius=1/2, base_ring=K) sage: M is loads(dumps(M)) True """ ############### # Init script # ############### def __init__(self, prime, weight, radius, base_ring, prec, char): r""" Create a space of overconvergent `p`-adic modular forms of level `\Gamma_0(p)`, over the given base ring. The base ring need not be a `p`-adic ring (the spaces we compute with typically have bases over `\QQ`). EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2) Space of 3-adic 1/2-overconvergent modular forms of weight-character 0 over Rational Field """ self._p = prime if not ( base_ring == QQ or is_pAdicField(base_ring) ): raise TypeError("Base ring must be QQ or a p-adic field") if base_ring != QQ and base_ring.prime() != self._p: raise TypeError("Residue characteristic of base ring (=%s) must be %s" % (base_ring, self._p)) if isinstance(weight, WeightCharacter): self._wtchar = weight else: self._wtchar = WeightSpace(prime, base_ring = char.base_ring())(weight, char, algebraic=True) if not self._wtchar.is_even(): raise ValueError("Weight-character must be even") Module.__init__(self, base_ring) self._prec = prec self._qsr = PowerSeriesRing(base_ring, 'q', prec) # q-series ring self._gsr = PowerSeriesRing(base_ring, 'g', prec) # g-adic expansions, g = c*f self._cached_recurrence_matrix = None self._set_radius(radius) self._basis_cache = [self._wtchar.pAdicEisensteinSeries(self._qsr, self.prec())] self._uniformiser = self._qsr(EtaProduct(prime, {prime: 24/ZZ(prime-1), ZZ(1):-24/ZZ(prime-1)}).qexp(self.prec())) for i in range(1, self.prec()): self._basis_cache.append(self._basis_cache[-1] * self._uniformiser * self._const) ##################################### # Methods called by the init script # ##################################### def _set_radius(self, radius): r""" Set the radius of overconvergence to be `r`, where `r` is a rational number in the interval `0 < r < \frac{p}{p+1}`. This only makes sense if the base ring contains an element of normalised valuation `\frac{12r}{p-1}`. If this valuation is an integer, we use the appropriate power of `p`. Otherwise, we assume the base ring has a ``uniformiser`` method and take an appropriate power of the uniformiser, raising an error if no such element exists. EXAMPLES:: sage: M = OverconvergentModularForms(3, 2, 1/2) # indirect doctest sage: M._set_radius(1/3); M Space of 3-adic 1/3-overconvergent modular forms of weight-character 2 over Rational Field sage: L.<w> = Qp(3).extension(x^5 - 3) sage: OverconvergentModularForms(3, 2, 1/30, base_ring=L).normalising_factor() # indirect doctest w + O(w^101) sage: OverconvergentModularForms(3, 2, 1/40, base_ring=L) Traceback (most recent call last): ... ValueError: no element of base ring (=3-adic Eisenstein Extension ...) has normalised valuation 3/20 """ p = ZZ(self.prime()) if (radius < 0 or radius > p/(p+1)): raise ValueError("radius (=%s) must be between 0 and p/(p+1)" % radius) d = 12/(p-1)*radius if d.is_integral(): self._const = p ** ZZ(d) self._radius = radius else: try: pi = self.base_ring().uniformiser() e = d / pi.normalized_valuation() except AttributeError: # base ring isn't a p-adic ring pi = p e = d if not e.is_integral(): raise ValueError("no element of base ring (=%s) has normalised valuation %s" % (self.base_ring(), radius * 12 /(p-1))) self._radius = radius self._const = pi ** ZZ(e) ############################################## # Boring functions that access internal data # ############################################## def is_exact(self): r""" True if elements of this space are represented exactly, i.e., there is no precision loss when doing arithmetic. As this is never true for overconvergent modular forms spaces, this returns False. EXAMPLES:: sage: OverconvergentModularForms(13, 12, 0).is_exact() False """ return False def change_ring(self, ring): r""" Return the space corresponding to self but over the given base ring. EXAMPLES:: sage: M = OverconvergentModularForms(2, 0, 1/2) sage: M.change_ring(Qp(2)) Space of 2-adic 1/2-overconvergent modular forms of weight-character 0 over 2-adic Field with ... """ return OverconvergentModularForms(self.prime(), self.weight(), self.radius(), ring, self.prec(), self.character()) def base_extend(self, ring): r""" Return the base extension of self to the given base ring. There must be a canonical map to this ring from the current base ring, otherwise a TypeError will be raised. EXAMPLES:: sage: M = OverconvergentModularForms(2, 0, 1/2, base_ring = Qp(2)) sage: M.base_extend(Qp(2).extension(x^2 - 2, names="w")) Space of 2-adic 1/2-overconvergent modular forms of weight-character 0 over 2-adic Eisenstein Extension ... sage: M.base_extend(QQ) Traceback (most recent call last): ... TypeError: Base extension of self (over '2-adic Field with capped relative precision 20') to ring 'Rational Field' not defined. """ if ring.has_coerce_map_from(self.base_ring()): return self.change_ring(ring) else: raise TypeError("Base extension of self (over '%s') to ring '%s' not defined." % (self.base_ring(), ring)) def _an_element_(self): r""" Return an element of this space (used by the coercion machinery). EXAMPLES:: sage: OverconvergentModularForms(3, 2, 1/3, prec=4).an_element() # indirect doctest 3-adic overconvergent modular form of weight-character 2 with q-expansion 9*q + 216*q^2 + 2430*q^3 + O(q^4) """ return OverconvergentModularFormElement(self, self._gsr.an_element()) def character(self): r""" Return the character of self. For overconvergent forms, the weight and the character are unified into the concept of a weight-character, so this returns exactly the same thing as self.weight(). EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2).character() 0 sage: type(OverconvergentModularForms(3, 0, 1/2).character()) <class '...weightspace.AlgebraicWeight'> sage: OverconvergentModularForms(3, 3, 1/2, char=DirichletGroup(3,QQ).0).character() (3, 3, [-1]) """ return self._wtchar def weight(self): r""" Return the character of self. For overconvergent forms, the weight and the character are unified into the concept of a weight-character, so this returns exactly the same thing as self.character(). EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2).weight() 0 sage: type(OverconvergentModularForms(3, 0, 1/2).weight()) <class '...weightspace.AlgebraicWeight'> sage: OverconvergentModularForms(3, 3, 1/2, char=DirichletGroup(3,QQ).0).weight() (3, 3, [-1]) """ return self._wtchar def normalising_factor(self): r""" The normalising factor `c` such that `g = c f` is a parameter for the `r`-overconvergent disc in `X_0(p)`, where `f` is the standard uniformiser. EXAMPLES:: sage: L.<w> = Qp(7).extension(x^2 - 7) sage: OverconvergentModularForms(7, 0, 1/4, base_ring=L).normalising_factor() w + O(w^41) """ return self._const def __eq__(self, other): r""" Check whether ``self`` is equal to ``other``. EXAMPLES:: sage: OverconvergentModularForms(3, 12, 1/2) == ModularForms(3, 12) False sage: OverconvergentModularForms(3, 0, 1/2) == OverconvergentModularForms(3, 0, 1/3) False sage: OverconvergentModularForms(3, 0, 1/2) == OverconvergentModularForms(3, 0, 1/2, base_ring = Qp(3)) False sage: OverconvergentModularForms(3, 0, 1/2) == OverconvergentModularForms(3, 0, 1/2) True """ if not isinstance(other, OverconvergentModularFormsSpace): return False else: return self._params() == other._params() def __ne__(self, other): """ Check whether ``self`` is not equal to ``other``. EXAMPLES:: sage: OverconvergentModularForms(3, 12, 1/2) != ModularForms(3, 12) True sage: OverconvergentModularForms(3, 0, 1/2) != OverconvergentModularForms(3, 0, 1/3) True sage: OverconvergentModularForms(3, 0, 1/2) != OverconvergentModularForms(3, 0, 1/2, base_ring = Qp(3)) True sage: OverconvergentModularForms(3, 0, 1/2) != OverconvergentModularForms(3, 0, 1/2) False """ return not (self == other) def __hash__(self): """ Return the hash of ``self``. EXAMPLES:: sage: h1 = hash(OverconvergentModularForms(3, 12, 1/2)) sage: h2 = hash(OverconvergentModularForms(3, 12, 1/2)) sage: h3 = hash(OverconvergentModularForms(3, 0, 1/2)) sage: h1 == h2 and h1 != h3 True """ return hash(self._params()) def _params(self): r""" Return the parameters that define this module uniquely: prime, weight, character, radius of overconvergence and base ring. Mostly used for pickling. EXAMPLES:: sage: L.<w> = Qp(7).extension(x^2 - 7) sage: OverconvergentModularForms(7, 0, 1/4, base_ring=L)._params() (7, 0, 1/4, 7-adic Eisenstein Extension Field in w defined by x^2 - 7, 20, Dirichlet character modulo 7 of conductor 1 mapping 3 |--> 1) """ return (self.prime(), self.weight().k(), self.radius(), self.base_ring(), self.prec(), self.weight().chi()) def __reduce__(self): r""" Return the function and arguments used to construct self. Used for pickling. EXAMPLES:: sage: L.<w> = Qp(7).extension(x^2 - 7) sage: OverconvergentModularForms(7, 0, 1/4, base_ring=L).__reduce__() (<function OverconvergentModularForms at ...>, (7, 0, 1/4, 7-adic Eisenstein Extension Field in w defined by x^2 - 7, 20, Dirichlet character modulo 7 of conductor 1 mapping 3 |--> 1)) """ return (OverconvergentModularForms, self._params()) def gen(self, i): r""" Return the ith module generator of self. EXAMPLES:: sage: M = OverconvergentModularForms(3, 2, 1/2, prec=4) sage: M.gen(0) 3-adic overconvergent modular form of weight-character 2 with q-expansion 1 + 12*q + 36*q^2 + 12*q^3 + O(q^4) sage: M.gen(1) 3-adic overconvergent modular form of weight-character 2 with q-expansion 27*q + 648*q^2 + 7290*q^3 + O(q^4) sage: M.gen(30) 3-adic overconvergent modular form of weight-character 2 with q-expansion O(q^4) """ return OverconvergentModularFormElement(self, gexp=self._gsr.gen()**i) def _repr_(self): r""" Return a string representation of self. EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2)._repr_() 'Space of 3-adic 1/2-overconvergent modular forms of weight-character 0 over Rational Field' """ return "Space of %s-adic %s-overconvergent modular forms of weight-character %s over %s" % (self.prime(), self.radius(), self.weight(), self.base_ring()) def prime(self): r""" Return the residue characteristic of self, i.e. the prime `p` such that this is a `p`-adic space. EXAMPLES:: sage: OverconvergentModularForms(5, 12, 1/3).prime() 5 """ return self._p def radius(self): r""" The radius of overconvergence of this space. EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/3).radius() 1/3 """ return self._radius def gens(self): r""" Return a generator object that iterates over the (infinite) set of basis vectors of self. EXAMPLES:: sage: o = OverconvergentModularForms(3, 12, 1/2) sage: t = o.gens() sage: next(t) 3-adic overconvergent modular form of weight-character 12 with q-expansion 1 - 32760/61203943*q - 67125240/61203943*q^2 - ... sage: next(t) 3-adic overconvergent modular form of weight-character 12 with q-expansion 27*q + 19829193012/61203943*q^2 + 146902585770/61203943*q^3 + ... """ i = 0 while True: yield self.gen(i) i += 1 def prec(self): r""" Return the series precision of self. Note that this is different from the `p`-adic precision of the base ring. EXAMPLES:: sage: OverconvergentModularForms(3, 0, 1/2).prec() 20 sage: OverconvergentModularForms(3, 0, 1/2,prec=40).prec() 40 """ return self._prec ##################################### # Element construction and coercion # ##################################### def _element_constructor_(self, input): r""" Create an element of this space. Allowable inputs are: - elements of compatible spaces of modular forms or overconvergent modular forms - arbitrary power series in `q` - lists of elements of the base ring (interpreted as vectors in the basis given by self.gens()). Precision may be specified by padding lists at the end with zeros; inputs with a higher precision than the set precision of this space will be rounded. EXAMPLES: From a `q`-expansion:: sage: M = OverconvergentModularForms(3, 0, 1/2, prec=5) sage: R.<q> = QQ[[]] sage: f=M(q + q^2 - q^3 + O(q^16)); f 3-adic overconvergent modular form of weight-character 0 with q-expansion q + q^2 - q^3 + O(q^5) sage: M.coordinate_vector(f) (0, 1/27, -11/729, 173/19683, -3172/531441) From a list or a vector:: sage: M([1,0,1]) 3-adic overconvergent modular form of weight-character 0 with q-expansion 1 + 729*q^2 + O(q^3) sage: M([1,0,1,0,0]) 3-adic overconvergent modular form of weight-character 0 with q-expansion 1 + 729*q^2 + 17496*q^3 + 236196*q^4 + O(q^5) sage: f = M([1,0,1,0,0]); v = M.coordinate_vector(f); v (1, 0, 1, 0, 0) sage: M(v) == f True From a classical modular form:: sage: f = CuspForms(Gamma0(3), 12).0; f q - 176*q^4 + 2430*q^5 + O(q^6) sage: fdag = OverconvergentModularForms(3, 12, 1/3, prec=8)(f); fdag 3-adic overconvergent modular form of weight-character 12 with q-expansion q - 176*q^4 + 2430*q^5 - 5832*q^6 - 19336*q^7 + O(q^8) sage: fdag.parent().coordinate_vector(f)*(1 + O(3^2)) (0, 3^-2 + O(3^0), 2*3^-3 + 2*3^-2 + O(3^-1), 3^-4 + 3^-3 + O(3^-2), 2 + 3 + O(3^2), 2*3 + 3^2 + O(3^3), 2*3^4 + 2*3^5 + O(3^6), 3^5 + 3^6 + O(3^7)) sage: OverconvergentModularForms(3, 6, 1/3)(f) Traceback (most recent call last): ... TypeError: Cannot create an element of 'Space of 3-adic ...' from element of incompatible space 'Cuspidal subspace ...' We test that zero elements are handled properly:: sage: M(0) 3-adic overconvergent modular form of weight-character 0 with q-expansion O(q^5) sage: M(O(q^3)) 3-adic overconvergent modular form of weight-character 0 with q-expansion O(q^3) We test coercion between spaces of different precision:: sage: M10 = OverconvergentModularForms(3, 0, 1/2, prec=10) sage: f = M10.1 sage: M(f) 3-adic overconvergent modular form of weight-character 0 with q-expansion 27*q + 324*q^2 + 2430*q^3 + 13716*q^4 + O(q^5) sage: M10(M(f)) 3-adic overconvergent modular form of weight-character 0 with q-expansion 27*q + 324*q^2 + 2430*q^3 + 13716*q^4 + O(q^5) """ if isinstance(input, int): input = ZZ(input) if isinstance(input, OverconvergentModularFormElement): return self._coerce_from_ocmf(input) elif isinstance(input, ModularFormElement): if ( (input.level() == 1 or input.level().prime_factors() == [self.prime()]) and input.weight() == self.weight().k() and input.character().primitive_character() == self.weight().chi().primitive_character()): p = ZZ(self.prime()) nu = (input.level() == 1 and p/(p+1)) or (1 / (p + 1) * p**(2 - input.level().valuation(p))) if self.radius() > nu: raise ValueError("Form is not overconvergent enough (form is only %s-overconvergent)" % nu) else: return self(self._qsr(input.q_expansion(self.prec()))) else: raise TypeError("Cannot create an element of '%s' from element of incompatible space '%s'" % (self, input.parent())) elif isinstance(input, (list, tuple, Vector)): v = list(input) n = len(v) return OverconvergentModularFormElement(self, gexp=self._gsr(v).add_bigoh(n), qexp=None) elif self._qsr.has_coerce_map_from(input.parent()): return OverconvergentModularFormElement(self, gexp=None, qexp=self._qsr(input)) else: raise TypeError("Don't know how to create an overconvergent modular form from %s" % input) @cached_method def zero(self): """ Return the zero of this space. EXAMPLES:: sage: K.<w> = Qp(13).extension(x^2-13); M = OverconvergentModularForms(13, 20, radius=1/2, base_ring=K) sage: K.zero() 0 """ return self(0) def _coerce_from_ocmf(self, f): r""" Try to convert the overconvergent modular form `f` into an element of self. An error will be raised if this is obviously nonsense. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: MM = M.base_extend(Qp(3)) sage: R.<q> = Qp(3)[[]]; f = MM(q + O(q^2)); f 3-adic overconvergent modular form of weight-character 0 with q-expansion (1 + O(3^20))*q + O(q^2) sage: M._coerce_from_ocmf(f) 3-adic overconvergent modular form of weight-character 0 with q-expansion q + O(q^2) sage: f in M # indirect doctest True """ prime, weight, radius, base_ring, prec, char = f.parent()._params() if (prime, weight, char) != (self.prime(), self.weight().k(), self.weight().chi()): raise TypeError("Cannot create an element of '%s' from element of incompatible space '%s'" % (self, input.parent())) return self(self._qsr(f.q_expansion())) def _coerce_map_from_(self, other): r""" Canonical coercion of x into self. Here the possibilities for x are more restricted. TESTS:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: MM = M.base_extend(Qp(3)) sage: MM.has_coerce_map_from(M) # indirect doctest True sage: MM.coerce(M.1) 3-adic overconvergent modular form of weight-character 0 with q-expansion (3^3 + O(3^23))*q + (3^4 + 3^5 + O(3^24))*q^2 ... sage: M.has_coerce_map_from(MM) False sage: M.coerce(1) 3-adic overconvergent modular form of weight-character 0 with q-expansion 1 + O(q^20) """ if (isinstance(other, OverconvergentModularFormsSpace) and self.base_ring().has_coerce_map_from(other.base_ring())): return True else: return self.base_ring().has_coerce_map_from(other) def coordinate_vector(self, x): r""" Write x as a vector with respect to the basis given by self.basis(). Here x must be an element of this space or something that can be converted into one. If x has precision less than the default precision of self, then the returned vector will be shorter. EXAMPLES:: sage: M = OverconvergentModularForms(Gamma0(3), 0, 1/3, prec=4) sage: M.coordinate_vector(M.gen(2)) (0, 0, 1, 0) sage: q = QQ[['q']].gen(); M.coordinate_vector(q - q^2 + O(q^4)) (0, 1/9, -13/81, 74/243) sage: M.coordinate_vector(q - q^2 + O(q^3)) (0, 1/9, -13/81) """ if hasattr(x, 'base_ring') and x.base_ring() != self.base_ring(): return self.base_extend(x.base_ring()).coordinate_vector(x) if x.parent() != self: x = self(x) return vector(self.base_ring(), x.gexp().padded_list(x.gexp().prec())) ########################################################## # Pointless routines required by parent class definition # ########################################################## def ngens(self): r""" The number of generators of self (as a module over its base ring), i.e. infinity. EXAMPLES:: sage: M = OverconvergentModularForms(2, 4, 1/6) sage: M.ngens() +Infinity """ return Infinity def gens_dict(self): r""" Return a dictionary mapping the names of generators of this space to their values. (Required by parent class definition.) As this does not make any sense here, this raises a TypeError. EXAMPLES:: sage: M = OverconvergentModularForms(2, 4, 1/6) sage: M.gens_dict() Traceback (most recent call last): ... TypeError: gens_dict does not make sense as number of generators is infinite """ raise TypeError("gens_dict does not make sense as number of generators is infinite") ##################################### # Routines with some actual content # ##################################### def hecke_operator(self, f, m): r""" Given an element `f` and an integer `m`, calculates the Hecke operator `T_m` acting on `f`. The input may be either a "bare" power series, or an OverconvergentModularFormElement object; the return value will be of the same type. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: f = M.1 sage: M.hecke_operator(f, 3) 3-adic overconvergent modular form of weight-character 0 with q-expansion 2430*q + 265356*q^2 + 10670373*q^3 + 249948828*q^4 + 4113612864*q^5 + 52494114852*q^6 + O(q^7) sage: M.hecke_operator(f.q_expansion(), 3) 2430*q + 265356*q^2 + 10670373*q^3 + 249948828*q^4 + 4113612864*q^5 + 52494114852*q^6 + O(q^7) """ # This should just be an instance of hecke_operator_on_qexp but that # won't accept arbitrary power series as input, although it's clearly # supposed to, which seems rather to defy the point but never mind... if f.parent() is self: return self(self.hecke_operator(f.q_expansion(), m)) elif isinstance(f, OverconvergentModularFormElement): if f.parent() is self.base_extend(f.parent().base_ring()): return f.parent().hecke_operator(f, m) else: raise TypeError("Not an element of this space") else: return hecke_operator_on_qexp(f, m, self.weight().k(), eps=self.weight().chi()) def _convert_to_basis(self, qexp): r""" Given a `q`-expansion, converts it to a vector in the basis of this space, to the maximum possible precision (which is the minimum of the `q`-adic precision of the `q`-expansion and the precision of self). EXAMPLES:: sage: M = OverconvergentModularForms(2, 0, 1/2) sage: R.<q> = QQ[[]] sage: M._convert_to_basis(q + q^2 + O(q^4)) 1/64*g - 23/4096*g^2 + 201/65536*g^3 + O(g^4) """ n = min(qexp.prec(), self.prec()) x = qexp g = self._gsr.gen() answer = self._gsr(0) for i in range(n): assert(x.valuation() >= i) answer += (x[i] / self._basis_cache[i][i])*g**i x = x - self._basis_cache[i] * answer[i] return answer + O(g**n) def hecke_matrix(self, m, n, use_recurrence = False, exact_arith = False): r""" Calculate the matrix of the `T_m` operator in the basis of this space, truncated to an `n \times n` matrix. Conventions are that operators act on the left on column vectors (this is the opposite of the conventions of the sage.modules.matrix_morphism class!) Uses naive `q`-expansion arguments if use_recurrence=False and uses the Kolberg style recurrences if use_recurrence=True. The argument "exact_arith" causes the computation to be done with rational arithmetic, even if the base ring is an inexact `p`-adic ring. This is useful as there can be precision loss issues (particularly with use_recurrence=False). EXAMPLES:: sage: OverconvergentModularForms(2, 0, 1/2).hecke_matrix(2, 4) [ 1 0 0 0] [ 0 24 64 0] [ 0 32 1152 4608] [ 0 0 3072 61440] sage: OverconvergentModularForms(2, 12, 1/2, base_ring=pAdicField(2)).hecke_matrix(2, 3) * (1 + O(2^2)) [ 1 + O(2^2) 0 0] [ 0 2^3 + O(2^5) 2^6 + O(2^8)] [ 0 2^4 + O(2^6) 2^7 + 2^8 + O(2^9)] sage: OverconvergentModularForms(2, 12, 1/2, base_ring=pAdicField(2)).hecke_matrix(2, 3, exact_arith=True) [ 1 0 0] [ 0 33881928/1414477 64] [ 0 -192898739923312/2000745183529 1626332544/1414477] """ if exact_arith and not self.base_ring().is_exact(): return self.change_ring(QQ).hecke_matrix(m, n, use_recurrence) M = MatrixSpace(self.base_ring(), n) mat = M(0) for j in range(min(n, self.prime())): l = self._convert_to_basis(self.hecke_operator(self._basis_cache[j], m)) for i in range(n): try: mat[i,j] = l[i] except IndexError: if not self.weight().is_zero(): raise ValueError("n is too large for current precision") else: if i <= self.prime() * j: raise ValueError("n is too large computing initial conds: can't work out u[%s, %s]" % (i,j)) else: mat[i,j] = 0 # computations are exact for weight 0, and we know these terms are zero if use_recurrence: if m != self.prime(): raise ValueError("Recurrence method not valid when m != p") for j in range(self.prime(), n): # can only apply recurrence if have i,j both >= p. if j >= self.prec(): for i in range(self.prime()): if self.weight() != 0: raise ValueError("n is too large for current precision") else: if j <= self.prime() * i: raise ValueError("n is too large computing initial conds: can't work out u[%s,%s]" % (i,j)) mat[i,j] = 0 else: l = self._convert_to_basis(self.hecke_operator(self._basis_cache[j], m)) for i in range(self.prime()): mat[i,j] = l[i] for i in range(self.prime(), n): for u in range(self.prime()): for v in range(self.prime()): mat[i,j] = mat[i,j] + mat[i-u-1, j-v-1]*self.recurrence_matrix()[u,v] else: if n * self.prime() > self.prec(): raise ValueError("n is too large") for j in range(self.prime(), n): l = self._convert_to_basis(self.hecke_operator(self._basis_cache[j], m)) for i in range(n): mat[i, j] = l[i] return mat def slopes(self, n, use_recurrence=False): r""" Compute the slopes of the `U_p` operator acting on self, using an n x n matrix. EXAMPLES:: sage: OverconvergentModularForms(5,2,1/3,base_ring=Qp(5),prec=100).slopes(5) [0, 2, 5, 6, 9] sage: OverconvergentModularForms(2,1,1/3,char=DirichletGroup(4,QQ).0).slopes(5) [0, 2, 4, 6, 8] """ if self.base_ring() == QQ: slopelist=self.cps_u(n).truncate().newton_slopes(self.prime()) elif is_pAdicField(self.base_ring()): slopelist=self.cps_u(n).truncate().newton_slopes() else: print("slopes are only defined for base field QQ or a p-adic field") return [-i for i in slopelist] def eigenfunctions(self, n, F = None, exact_arith=True): """ Calculate approximations to eigenfunctions of self. These are the eigenfunctions of self.hecke_matrix(p, n), which are approximations to the true eigenfunctions. Returns a list of OverconvergentModularFormElement objects, in increasing order of slope. INPUT: - ``n`` - integer. The size of the matrix to use. - ``F`` - None, or a field over which to calculate eigenvalues. If the field is None, the current base ring is used. If the base ring is not a `p`-adic ring, an error will be raised. - ``exact_arith`` - True or False (default True). If True, use exact rational arithmetic to calculate the matrix of the `U` operator and its characteristic power series, even when the base ring is an inexact `p`-adic ring. This is typically slower, but more numerically stable. NOTE: Try using ``set_verbose(1, 'sage/modular/overconvergent')`` to get more feedback on what is going on in this algorithm. For even more feedback, use 2 instead of 1. EXAMPLES:: sage: X = OverconvergentModularForms(2, 2, 1/6).eigenfunctions(8, Qp(2, 100)) sage: X[1] 2-adic overconvergent modular form of weight-character 2 with q-expansion (1 + O(2^74))*q + (2^4 + 2^5 + 2^9 + 2^10 + 2^12 + 2^13 + 2^15 + 2^17 + 2^19 + 2^20 + 2^21 + 2^23 + 2^28 + 2^30 + 2^31 + 2^32 + 2^34 + 2^36 + 2^37 + 2^39 + 2^40 + 2^43 + 2^44 + 2^45 + 2^47 + 2^48 + 2^52 + 2^53 + 2^54 + 2^55 + 2^56 + 2^58 + 2^59 + 2^60 + 2^61 + 2^67 + 2^68 + 2^70 + 2^71 + 2^72 + 2^74 + 2^76 + O(2^78))*q^2 + (2^2 + 2^7 + 2^8 + 2^9 + 2^12 + 2^13 + 2^16 + 2^17 + 2^21 + 2^23 + 2^25 + 2^28 + 2^33 + 2^34 + 2^36 + 2^37 + 2^42 + 2^45 + 2^47 + 2^49 + 2^50 + 2^51 + 2^54 + 2^55 + 2^58 + 2^60 + 2^61 + 2^67 + 2^71 + 2^72 + O(2^76))*q^3 + (2^8 + 2^11 + 2^14 + 2^19 + 2^21 + 2^22 + 2^24 + 2^25 + 2^26 + 2^27 + 2^28 + 2^29 + 2^32 + 2^33 + 2^35 + 2^36 + 2^44 + 2^45 + 2^46 + 2^47 + 2^49 + 2^50 + 2^53 + 2^54 + 2^55 + 2^56 + 2^57 + 2^60 + 2^63 + 2^66 + 2^67 + 2^69 + 2^74 + 2^76 + 2^79 + 2^80 + 2^81 + O(2^82))*q^4 + (2 + 2^2 + 2^9 + 2^13 + 2^15 + 2^17 + 2^19 + 2^21 + 2^23 + 2^26 + 2^27 + 2^28 + 2^30 + 2^33 + 2^34 + 2^35 + 2^36 + 2^37 + 2^38 + 2^39 + 2^41 + 2^42 + 2^43 + 2^45 + 2^58 + 2^59 + 2^60 + 2^61 + 2^62 + 2^63 + 2^65 + 2^66 + 2^68 + 2^69 + 2^71 + 2^72 + O(2^75))*q^5 + (2^6 + 2^7 + 2^15 + 2^16 + 2^21 + 2^24 + 2^25 + 2^28 + 2^29 + 2^33 + 2^34 + 2^37 + 2^44 + 2^45 + 2^48 + 2^50 + 2^51 + 2^54 + 2^55 + 2^57 + 2^58 + 2^59 + 2^60 + 2^64 + 2^69 + 2^71 + 2^73 + 2^75 + 2^78 + O(2^80))*q^6 + (2^3 + 2^8 + 2^9 + 2^10 + 2^11 + 2^12 + 2^14 + 2^15 + 2^17 + 2^19 + 2^20 + 2^21 + 2^23 + 2^25 + 2^26 + 2^34 + 2^37 + 2^38 + 2^39 + 2^40 + 2^41 + 2^45 + 2^47 + 2^49 + 2^51 + 2^53 + 2^54 + 2^55 + 2^57 + 2^58 + 2^59 + 2^60 + 2^61 + 2^66 + 2^69 + 2^70 + 2^71 + 2^74 + 2^76 + O(2^77))*q^7 + O(q^8) sage: [x.slope() for x in X] [0, 4, 8, 14, 16, 18, 26, 30] """ if F is None: F = self.base_ring() if F.is_exact(): #raise TypeError, "cannot calculate eigenfunctions over exact base fields" F = pAdicField(self.prime(), 100) m = self.hecke_matrix(self.prime(), n, use_recurrence=True, exact_arith=exact_arith) cp = m.charpoly() eigenvalues = cp.roots(F) eigenfunctions = [] verbose("Expected %s eigenvalues, got %s" % (n, len(eigenvalues))) for (r, d) in eigenvalues: if d != 1: continue mr = m.__pari__() - r.__pari__() # Annoying thing: r isn't quite as precise as it claims to be # (bug reported to sage-support list) while F(mr.matdet()) != 0: verbose("p-adic solver returned wrong result in slope %s; refining" % r.valuation(), level=2) r = r - cp(r)/cp.derivative()(r) mr2 = m.__pari__() - r.__pari__() if mr2.matdet().valuation(self.prime()) > mr.matdet().valuation(self.prime()): mr = mr2 else: mr = None break if mr is None: verbose("Unable to calculate exact root in slope %s" % r.valuation()) continue # now calculate the kernel using PARI v = mr.matker() if repr(v) == "[;]": verbose("PARI returned empty eigenspace in slope %s" % r.valuation()) continue # Can't happen? Does PARI always return a # nonempty kernel for matrices that have det # indistinguishable from 0? if v.ncols() != 1: verbose("PARI returned non-simple eigenspace in slope %s" % r.valuation()) continue gexp = self._gsr(0) for i in range(v.nrows()): gexp += self._gsr.gen()**i * F(v[i,0]) gexp = gexp + O(self._gsr.gen()**int(v.nrows())) if gexp[0] != 0: gexp = gexp/gexp[0] elif gexp[1] != 0: gexp = gexp/gexp[1]/self._const # This is slightly subtle. We want all eigenfunctions to have q-exps in Z_p. # Normalising the q-term to be 1 doesn't work for the Eisenstein series if # we're in the 0 component of weight-character space. But normalising the const term # to 1 works as *none of the small primes we deal with are irregular*! :-) else: raise ValueError("Constant and linear terms both zero!") # if this gets called something is very wrong. efunc = OverconvergentModularFormElement(self.base_extend(F), gexp=gexp) efunc._notify_eigen(r) assert efunc.is_integral() # This sometimes fails if n is too large -- last row of matrix fills # up with garbage. I don't know why. XXX FIX THIS XXX eigenfunctions.append((r.valuation(), efunc)) eigenfunctions.sort() # sort by slope return [f for _,f in eigenfunctions] def recurrence_matrix(self, use_smithline=True): r""" Return the recurrence matrix satisfied by the coefficients of `U`, that is a matrix `R =(r_{rs})_{r,s=1 \dots p}` such that `u_{ij} = \sum_{r,s=1}^p r_{rs} u_{i-r, j-s}`. Uses an elegant construction which I believe is due to Smithline. See [Loe2007]_. EXAMPLES:: sage: OverconvergentModularForms(2, 0, 0).recurrence_matrix() [ 48 1] [4096 0] sage: OverconvergentModularForms(2, 0, 1/2).recurrence_matrix() [48 64] [64 0] sage: OverconvergentModularForms(3, 0, 0).recurrence_matrix() [ 270 36 1] [ 26244 729 0] [531441 0 0] sage: OverconvergentModularForms(5, 0, 0).recurrence_matrix() [ 1575 1300 315 30 1] [ 162500 39375 3750 125 0] [ 4921875 468750 15625 0 0] [ 58593750 1953125 0 0 0] [244140625 0 0 0 0] sage: OverconvergentModularForms(7, 0, 0).recurrence_matrix() [ 4018 8624 5915 1904 322 28 1] [ 422576 289835 93296 15778 1372 49 0] [ 14201915 4571504 773122 67228 2401 0 0] [ 224003696 37882978 3294172 117649 0 0 0] [ 1856265922 161414428 5764801 0 0 0 0] [ 7909306972 282475249 0 0 0 0 0] [13841287201 0 0 0 0 0 0] sage: OverconvergentModularForms(13, 0, 0).recurrence_matrix() [ 15145 124852 354536 ... """ if self._cached_recurrence_matrix is not None: return self._cached_recurrence_matrix MM = OverconvergentModularForms(self.prime(), 0, 0, base_ring=QQ) m = MM._discover_recurrence_matrix(use_smithline = True).base_extend(self.base_ring()) r = diagonal_matrix([self._const**i for i in range(self.prime())]) self._cached_recurrence_matrix = (r**(-1)) * m * r self._cached_recurrence_matrix.set_immutable() return self._cached_recurrence_matrix def _discover_recurrence_matrix(self, use_smithline=True): r""" Does hard work of calculating recurrence matrix, which is cached to avoid doing this every time. EXAMPLES:: sage: o = OverconvergentModularForms(3,12,0) sage: o._discover_recurrence_matrix() == o.recurrence_matrix() True """ (f_ring, f) = PolynomialRing(self.base_ring(), "f").objgen() if use_smithline: # Compute Smithline's polynomial H_p jq = self._qsr(j_invariant_qexp(1+self.prime()).shift(1).power_series()) # avoid dividing by q so as not to instantiate a Laurent series h = self._uniformiser.shift(-1) * jq fi = self._qsr(1) coeffs = [] for i in range(self.prime()+2): if not h.valuation() >= i: raise ValueError("Something strange is happening here") coeffs.append(h[i] / fi[i]) h = h - coeffs[-1] * fi fi = fi*self._uniformiser SmiH = f_ring(coeffs) assert SmiH.degree() == self.prime() + 1 xyring = PolynomialRing(self.base_ring(), ["x","y"], 2) x,y = xyring.gens() cc = self.prime() ** (-12/(self.prime() - 1)) bigI = x*SmiH(y*cc)- y*cc*SmiH(x) smallI = xyring(bigI / (x - cc*y)) r = matrix(ZZ, self.prime(), self.prime()) for i in range(self.prime()): for j in range(self.prime()): r[i,j] = -smallI[i+1, j+1] return r else: # compute from U(f^j) for small j via Newton's identities # to be implemented when I can remember Newton's identities! raise NotImplementedError def cps_u(self, n, use_recurrence=False): r""" Compute the characteristic power series of `U_p` acting on self, using an n x n matrix. EXAMPLES:: sage: OverconvergentModularForms(3, 16, 1/2, base_ring=Qp(3)).cps_u(4) 1 + O(3^20) + (2 + 2*3 + 2*3^2 + 2*3^4 + 3^5 + 3^6 + 3^7 + 3^11 + 3^12 + 2*3^14 + 3^16 + 3^18 + O(3^19))*T + (2*3^3 + 3^5 + 3^6 + 3^7 + 2*3^8 + 2*3^9 + 2*3^10 + 3^11 + 3^12 + 2*3^13 + 2*3^16 + 2*3^18 + O(3^19))*T^2 + (2*3^15 + 2*3^16 + 2*3^19 + 2*3^20 + 2*3^21 + O(3^22))*T^3 + (3^17 + 2*3^18 + 3^19 + 3^20 + 3^22 + 2*3^23 + 2*3^25 + 3^26 + O(3^27))*T^4 sage: OverconvergentModularForms(3, 16, 1/2, base_ring=Qp(3), prec=30).cps_u(10) 1 + O(3^20) + (2 + 2*3 + 2*3^2 + 2*3^4 + 3^5 + 3^6 + 3^7 + 2*3^15 + O(3^16))*T + (2*3^3 + 3^5 + 3^6 + 3^7 + 2*3^8 + 2*3^9 + 2*3^10 + 2*3^11 + 2*3^12 + 2*3^13 + 3^14 + 3^15 + O(3^16))*T^2 + (3^14 + 2*3^15 + 2*3^16 + 3^17 + 3^18 + O(3^19))*T^3 + (3^17 + 2*3^18 + 3^19 + 3^20 + 3^21 + O(3^24))*T^4 + (3^29 + 2*3^32 + O(3^33))*T^5 + (2*3^44 + O(3^45))*T^6 + (2*3^59 + O(3^60))*T^7 + (2*3^78 + O(3^79))*T^8 .. NOTE:: Uses the Hessenberg form of the Hecke matrix to compute the characteristic polynomial. Because of the use of relative precision here this tends to give better precision in the p-adic coefficients. """ m = self.hecke_matrix(self.prime(), n, use_recurrence) A = PowerSeriesRing(self.base_ring(), 'T') # From a conversation with David Loeffler, apparently self.base_ring() # is either the field of rational numbers or some p-adic field. In the # first case we want to use the linbox algorithm, and in the second # case the Hessenberg form algorithm. # if self.base_ring().is_exact(): g = A(m.charpoly('T').reverse()) else: g = A(m.charpoly('T', "hessenberg").reverse()) return g class OverconvergentModularFormElement(ModuleElement): r""" A class representing an element of a space of overconvergent modular forms. EXAMPLES:: sage: K.<w> = Qp(5).extension(x^7 - 5); s = OverconvergentModularForms(5, 6, 1/21, base_ring=K).0 sage: s == loads(dumps(s)) True """ def __init__(self, parent, gexp=None, qexp=None): r""" Create an element of this space. EXAMPLES:: sage: OverconvergentModularForms(3, 2, 1/6,prec=5).an_element() # indirect doctest 3-adic overconvergent modular form of weight-character 2 with q-expansion 3*q + 72*q^2 + 810*q^3 + 6096*q^4 + O(q^5) """ ModuleElement.__init__(self, parent) self._p = self.parent().prime() #self.weight = self.parent().weight if (gexp is None and qexp is None) or (gexp is not None and qexp is not None): raise ValueError("Must supply exactly one of a q-expansion and a g-expansion") if gexp is not None: self._gexp = gexp.add_bigoh(self.parent().prec()) self._qexp = sum([self.parent()._basis_cache[i] * gexp[i] for i in range(min(gexp.prec(), self.parent().prec()))]) self._qexp = self._qexp.add_bigoh(self._gexp.prec()) else: # qexp is not None self._qexp = qexp.add_bigoh(self.parent().prec()) self._gexp = self.parent()._convert_to_basis(self._qexp) self._is_eigen = False self._eigenvalue = None self._slope = None def _add_(self, other): r""" Add self to other (where other has the same parent as self). EXAMPLES:: sage: M = OverconvergentModularForms(2, 12, 1/6) sage: f = M.0 sage: f + f # indirect doctest 2-adic overconvergent modular form of weight-character 12 with q-expansion 2 - 131040/1414477*q ... """ return OverconvergentModularFormElement(self.parent(), gexp = self.gexp() + other.gexp()) def _lmul_(self, x): r""" Left multiplication by other. EXAMPLES:: sage: M = OverconvergentModularForms(2, 12, 1/6) sage: f = M.0 sage: 2*f # indirect doctest 2-adic overconvergent modular form of weight-character 12 with q-expansion 2 - 131040/1414477*q ... """ return OverconvergentModularFormElement(self.parent(), gexp = x * self.gexp()) def _rmul_(self, x): r""" Right multiplication by other. EXAMPLES:: sage: M = OverconvergentModularForms(2, 12, 1/6) sage: f = M.0 sage: f * 3 # indirect doctest 2-adic overconvergent modular form of weight-character 12 with q-expansion 3 - 196560/1414477*q ... """ return OverconvergentModularFormElement(self.parent(), gexp = x * self.gexp()) def prec(self): r""" Return the series expansion precision of this overconvergent modular form. (This is not the same as the `p`-adic precision of the coefficients.) EXAMPLES:: sage: OverconvergentModularForms(5, 6, 1/3,prec=15).gen(1).prec() 15 """ return self.gexp().prec() def is_eigenform(self): r""" Return True if this is an eigenform. At present this returns False unless this element was explicitly flagged as an eigenform, using the _notify_eigen function. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: f = M.eigenfunctions(3)[1] sage: f.is_eigenform() True sage: M.gen(4).is_eigenform() False """ return self._is_eigen def slope(self): r""" Return the slope of this eigenform, i.e. the valuation of its `U_p`-eigenvalue. Raises an error unless this element was explicitly flagged as an eigenform, using the _notify_eigen function. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: f = M.eigenfunctions(3)[1] sage: f.slope() 2 sage: M.gen(4).slope() Traceback (most recent call last): ... TypeError: slope only defined for eigenfunctions """ if not self.is_eigenform(): raise TypeError("slope only defined for eigenfunctions") return self._slope def eigenvalue(self): r""" Return the `U_p`-eigenvalue of this eigenform. Raises an error unless this element was explicitly flagged as an eigenform, using the _notify_eigen function. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: f = M.eigenfunctions(3)[1] sage: f.eigenvalue() 3^2 + 3^4 + 2*3^6 + 3^7 + 3^8 + 2*3^9 + 2*3^10 + 3^12 + 3^16 + 2*3^17 + 3^18 + 3^20 + 2*3^21 + 3^22 + 2*3^23 + 3^25 + 3^26 + 2*3^27 + 2*3^29 + 3^30 + 3^31 + 3^32 + 3^33 + 3^34 + 3^36 + 3^40 + 2*3^41 + 3^43 + 3^44 + 3^45 + 3^46 + 3^48 + 3^49 + 3^50 + 2*3^51 + 3^52 + 3^54 + 2*3^57 + 2*3^59 + 3^60 + 3^61 + 2*3^63 + 2*3^66 + 2*3^67 + 3^69 + 2*3^72 + 3^74 + 2*3^75 + 3^76 + 2*3^77 + 2*3^78 + 2*3^80 + 3^81 + 2*3^82 + 3^84 + 2*3^85 + 2*3^86 + 3^87 + 3^88 + 2*3^89 + 2*3^91 + 3^93 + 3^94 + 3^95 + 3^96 + 3^98 + 2*3^99 + O(3^100) sage: M.gen(4).eigenvalue() Traceback (most recent call last): ... TypeError: eigenvalue only defined for eigenfunctions """ if not self.is_eigenform(): raise TypeError("eigenvalue only defined for eigenfunctions") return self._eigenvalue def q_expansion(self, prec=None): r""" Return the `q`-expansion of self, to as high precision as it is known. EXAMPLES:: sage: OverconvergentModularForms(3, 4, 1/2).gen(0).q_expansion() 1 - 120/13*q - 1080/13*q^2 - 120/13*q^3 - 8760/13*q^4 - 15120/13*q^5 - 1080/13*q^6 - 41280/13*q^7 - 5400*q^8 - 120/13*q^9 - 136080/13*q^10 - 159840/13*q^11 - 8760/13*q^12 - 263760/13*q^13 - 371520/13*q^14 - 15120/13*q^15 - 561720/13*q^16 - 45360*q^17 - 1080/13*q^18 - 823200/13*q^19 + O(q^20) """ if prec is None: return self._qexp elif prec > self.prec(): raise ValueError else: return self._qexp.add_bigoh(prec) def gexp(self): r""" Return the formal power series in `g` corresponding to this overconvergent modular form (so the result is `F` where this modular form is `E_k^\ast \times F(g)`, where `g` is the appropriately normalised parameter of `X_0(p)`). EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: f = M.eigenfunctions(3)[1] sage: f.gexp() (3^-3 + O(3^95))*g + (3^-1 + 1 + 2*3 + 3^2 + 2*3^3 + 3^5 + 3^7 + 3^10 + 3^11 + 3^14 + 3^15 + 3^16 + 2*3^19 + 3^21 + 3^22 + 2*3^23 + 2*3^24 + 3^26 + 2*3^27 + 3^29 + 3^31 + 3^34 + 2*3^35 + 2*3^36 + 3^38 + 2*3^39 + 3^41 + 2*3^42 + 2*3^43 + 2*3^44 + 2*3^46 + 2*3^47 + 3^48 + 2*3^49 + 2*3^50 + 3^51 + 2*3^54 + 2*3^55 + 2*3^56 + 3^57 + 2*3^58 + 2*3^59 + 2*3^60 + 3^61 + 3^62 + 3^63 + 3^64 + 2*3^65 + 3^67 + 3^68 + 2*3^69 + 3^70 + 2*3^71 + 2*3^74 + 3^76 + 2*3^77 + 3^78 + 2*3^79 + 2*3^80 + 3^84 + 2*3^85 + 2*3^86 + 3^88 + 2*3^89 + 3^91 + 3^92 + 2*3^94 + 3^95 + O(3^97))*g^2 + O(g^3) """ return self._gexp def coordinates(self, prec=None): r""" Return the coordinates of this modular form in terms of the basis of this space. EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2, prec=15) sage: f = (M.0 + M.3); f.coordinates() [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] sage: f.coordinates(6) [1, 0, 0, 1, 0, 0] sage: OverconvergentModularForms(3, 0, 1/6)(f).coordinates(6) [1, 0, 0, 729, 0, 0] sage: f.coordinates(100) Traceback (most recent call last): ... ValueError: Precision too large for space """ if prec > self.prec(): raise ValueError("Precision too large for space") if prec is None: prec = self.prec() return self._gexp.padded_list(prec) def prime(self): r""" If this is a `p`-adic modular form, return `p`. EXAMPLES:: sage: OverconvergentModularForms(2, 0, 1/2).an_element().prime() 2 """ return self._p def _notify_eigen(self, eigenvalue): """ Flags this element as an eigenform. It then remembers some extra data. EXAMPLES:: sage: OverconvergentModularForms(3, 16, 1/3).eigenfunctions(4) # indirect doctest [...] """ self._is_eigen = True self._eigenvalue = eigenvalue self._slope = eigenvalue.normalized_valuation() def is_integral(self): r""" Test whether or not this element has `q`-expansion coefficients that are `p`-adically integral. This should always be the case with eigenfunctions, but sometimes if n is very large this breaks down for unknown reasons! EXAMPLES:: sage: M = OverconvergentModularForms(2, 0, 1/3) sage: q = QQ[['q']].gen() sage: M(q - 17*q^2 + O(q^3)).is_integral() True sage: M(q - q^2/2 + 6*q^7 + O(q^9)).is_integral() False """ for co in self.q_expansion().list(): if (co * (1 + O(self.prime()))).valuation() < 0: # have to force it into ZZ_p return False return True def _repr_(self): r""" String representation of self. EXAMPLES:: sage: o=OverconvergentModularForms(3, 0, 1/2) sage: o([1, 0, 1, 3])._repr_() '3-adic overconvergent modular form of weight-character 0 with q-expansion 1 + 729*q^2 + 76545*q^3 + O(q^4)' """ return "%s-adic overconvergent modular form of weight-character %s with q-expansion %s" % (self.prime(), self.weight(), self.q_expansion()) def _richcmp_(self, other, op): r""" Compare self to other. EXAMPLES:: sage: o = OverconvergentModularForms(3, 0, 1/2) sage: o([1, 1, 1, 0, 0, 0, 0]) == o([2, 1, 0]) False sage: o([1, 1, 1, 0, 0, 0, 0]) == o([1,1]) True """ return richcmp(self.gexp(), other.gexp(), op) def r_ord(self, r): r""" The `p`-adic valuation of the norm of self on the `r`-overconvergent region. EXAMPLES:: sage: o=OverconvergentModularForms(3, 0, 1/2) sage: t = o([1, 1, 1/3]) sage: t.r_ord(1/2) 1 sage: t.r_ord(2/3) 3 """ ord = -Infinity p = self.prime() s = self.parent().radius() F = self.parent().base_ring() if not is_pAdicField(F): F = pAdicField(p) for i in range(self.prec()): ord = max( ord, 12/ZZ(p - 1)*i*(r - s) - F(self.gexp()[i]).normalized_valuation()) return ord def valuation(self): r""" Return the `p`-adic valuation of this form (i.e. the minimum of the `p`-adic valuations of its coordinates). EXAMPLES:: sage: M = OverconvergentModularForms(3, 0, 1/2) sage: (M.7).valuation() 0 sage: (3^18 * (M.2)).valuation() 18 """ if is_pAdicField(self.parent().base_ring()): v = lambda u: u.normalized_valuation() else: v = lambda u: u.valuation(self.parent().prime()) return min([v(x) for x in self.gexp().list()]) def governing_term(self, r): r""" The degree of the series term with largest norm on the `r`-overconvergent region. EXAMPLES:: sage: o=OverconvergentModularForms(3, 0, 1/2) sage: f=o.eigenfunctions(10)[1] sage: f.governing_term(1/2) 1 """ p = self.prime() F = self.parent().base_ring() if not is_pAdicField(F): F = pAdicField(p) s = self.parent().radius() p = self.prime() for i in range(self.gexp().prec()): if 12/ZZ(p - 1)*i*(r - s) - F(self.gexp()[i]).normalized_valuation() == self.r_ord(r): return i raise RuntimeError("Can't get here") def valuation_plot(self, rmax = None): r""" Draw a graph depicting the growth of the norm of this overconvergent modular form as it approaches the boundary of the overconvergent region. EXAMPLES:: sage: o=OverconvergentModularForms(3, 0, 1/2) sage: f=o.eigenfunctions(4)[1] sage: f.valuation_plot() Graphics object consisting of 1 graphics primitive """ if rmax is None: rmax = ZZ(self.prime())/ZZ(1 + self.prime()) return plot(self.r_ord, (0, rmax) ) def weight(self): r""" Return the weight of this overconvergent modular form. EXAMPLES:: sage: M = OverconvergentModularForms(13, 10, 1/2, base_ring = Qp(13).extension(x^2 - 13,names='a')) sage: M.gen(0).weight() 10 """ return self.parent().weight() def additive_order(self): r""" Return the additive order of this element (required attribute for all elements deriving from sage.modules.ModuleElement). EXAMPLES:: sage: M = OverconvergentModularForms(13, 10, 1/2, base_ring = Qp(13).extension(x^2 - 13,names='a')) sage: M.gen(0).additive_order() +Infinity sage: M(0).additive_order() 1 """ from sage.rings.infinity import Infinity if self.is_zero(): return ZZ(1) else: return Infinity def base_extend(self, R): r""" Return a copy of self but with coefficients in the given ring. EXAMPLES:: sage: M = OverconvergentModularForms(7, 10, 1/2, prec=5) sage: f = M.1 sage: f.base_extend(Qp(7, 4)) 7-adic overconvergent modular form of weight-character 10 with q-expansion (7 + O(7^5))*q + (6*7 + 4*7^2 + 7^3 + 6*7^4 + O(7^5))*q^2 + (5*7 + 5*7^2 + 7^4 + O(7^5))*q^3 + (7^2 + 4*7^3 + 3*7^4 + 2*7^5 + O(7^6))*q^4 + O(q^5) """ S = self.parent().base_extend(R) return S(self) def __pari__(self): r""" Return the Pari object corresponding to self, which is just the `q`-expansion of self as a formal power series. EXAMPLES:: sage: f = OverconvergentModularForms(3, 0, 1/2).1 sage: pari(f) # indirect doctest 27*q + 324*q^2 + 2430*q^3 + 13716*q^4 + 64557*q^5 + 265356*q^6 + 983556*q^7 + 3353076*q^8 + 10670373*q^9 + 32031288*q^10 + 91455804*q^11 + 249948828*q^12 + 657261999*q^13 + 1669898592*q^14 + 4113612864*q^15 + 9853898292*q^16 + 23010586596*q^17 + 52494114852*q^18 + 117209543940*q^19 + O(q^20) sage: pari(f.base_extend(Qp(3))) # indirect doctest (3^3 + O(3^23))*q + (3^4 + 3^5 + O(3^24))*q^2 + (3^5 + 3^7 + O(3^25))*q^3 + (3^3 + 3^4 + 2*3^5 + 2*3^8 + O(3^23))*q^4 + (2*3^4 + 3^5 + 3^6 + 2*3^7 + 3^10 + O(3^24))*q^5 + (3^6 + 3^7 + 3^8 + 3^9 + 3^10 + 3^11 + O(3^26))*q^6 + (2*3^3 + 3^4 + 2*3^6 + 2*3^7 + 2*3^8 + 3^9 + 3^10 + 2*3^11 + 3^12 + O(3^23))*q^7 + (2*3^4 + 3^5 + 3^8 + 2*3^9 + 2*3^10 + 2*3^13 + O(3^24))*q^8 + (3^7 + 2*3^9 + 2*3^12 + 2*3^14 + O(3^27))*q^9 + (2*3^5 + 3^8 + 3^9 + 2*3^10 + 2*3^13 + 2*3^15 + O(3^25))*q^10 + (3^4 + 2*3^5 + 2*3^6 + 3^8 + 2*3^9 + 3^12 + 3^14 + 2*3^16 + O(3^24))*q^11 + (3^5 + 3^6 + 2*3^8 + 2*3^9 + 2*3^10 + 2*3^12 + 3^14 + 2*3^15 + 2*3^16 + 3^17 + O(3^25))*q^12 + (2*3^3 + 2*3^4 + 2*3^5 + 3^8 + 2*3^9 + 2*3^11 + 3^13 + 2*3^14 + 2*3^17 + 3^18 + O(3^23))*q^13 + (2*3^4 + 2*3^6 + 2*3^7 + 3^8 + 2*3^9 + 3^10 + 3^12 + 3^14 + 2*3^15 + 2*3^16 + 3^18 + 3^19 + O(3^24))*q^14 + (2*3^6 + 3^7 + 3^9 + 3^10 + 3^11 + 2*3^14 + 3^15 + 2*3^16 + 3^17 + 3^18 + 3^20 + O(3^26))*q^15 + (3^3 + 2*3^4 + 2*3^7 + 2*3^8 + 3^9 + 3^10 + 2*3^11 + 3^12 + 2*3^14 + 2*3^15 + 3^17 + 3^18 + 2*3^19 + 2*3^20 + O(3^23))*q^16 + (2*3^5 + 2*3^7 + 2*3^8 + 3^10 + 3^11 + 2*3^12 + 2*3^13 + 3^14 + 3^15 + 3^17 + 2*3^18 + 3^19 + 2*3^21 + O(3^25))*q^17 + (3^8 + 3^9 + 2*3^10 + 2*3^11 + 3^12 + 3^14 + 3^15 + 3^16 + 3^17 + 2*3^21 + 3^22 + O(3^28))*q^18 + (2*3^3 + 3^5 + 2*3^6 + 2*3^8 + 2*3^9 + 3^11 + 2*3^12 + 3^13 + 3^14 + 2*3^15 + 3^16 + 3^17 + 2*3^18 + 3^19 + 2*3^21 + O(3^23))*q^19 + O(q^20) """ return self.q_expansion().__pari__()
py
1a39e832253b1494af238a4fca91613cc3e33228
import cv2 import os import scipy as scp import scipy.misc import matplotlib from sklearn.cluster import KMeans import numpy as np import evaluationClass_tools as evTools import random from sklearn import svm from sklearn import preprocessing import pickle import triangle_detection as triang def oneClass(image_seg): rows, cols = image_seg.shape[:2] color = [0, 0, 0] for i in range(rows): for j in range(cols): if (image_seg[i][j][0] == 0 and image_seg[i][j][1] == 0 and image_seg[i][j][2] == 0): continue else: if (color[0] == 0 and color[1] == 0 and color[2] == [0]): color[0] = image_seg[i][j][0] color[1] = image_seg[i][j][1] color[2] = image_seg[i][j][2] continue if (image_seg[i][j][0] != color[0] or image_seg[i][j][1] != color[1] and image_seg[i][j][2] != color[2]): return False return True, color # def triangStats(img, singleColor = True): # imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, imbw = cv2.threshold(imggray, 10, 255, 0) # _, contours, _ = cv2.findContours(imbw, 1, 2) # maxArea = 0; # Ax = Ay = Bx = By = Cx = Cy = 0 # areaCnt = 0 # maxCnt = None # idx = -1 # for cnt in contours: # idx += 1 # retval, triangle = cv2.minEnclosingTriangle(cnt) # if (triangle is None): # continue # areaCnt = cv2.contourArea(cnt) # if (areaCnt <= maxArea): # continue # maxArea = areaCnt # maxCnt = idx # Ax = triangle[0][0][0] # Ay = triangle[0][0][1] # Bx = triangle[1][0][0] # By = triangle[1][0][1] # Cx = triangle[2][0][0] # Cy = triangle[2][0][1] # if (maxArea < 0.1 * imggray.shape[0] * imggray.shape[1]): # return False, None, None, None # v1x = 0 # v1y = 0 # v2x = 0 # v2y = 0 # v3x = 0 # v3y = 0 # imgCnt = np.zeros((img.shape[0], img.shape[1], 3), np.uint8) # mask = np.zeros((img.shape[0], img.shape[1], 3), np.uint8) # cv2.drawContours(mask, contours, maxCnt, color=(255, 255, 255), thickness=cv2.FILLED) # color = [0, 0, 0] # contActivePixels = 0 # valret = True # for i in range(mask.shape[0]): # for j in range(mask.shape[1]): # if (mask[i, j, 0] == 255 and mask[i, j, 1] == 255 and mask[i, j, 2] == 255): # if(img[i, j, 0] != 0 or img[i, j, 1] != 0 or img[i, j, 2] != 0): # contActivePixels+=1 # if (color[0] == 0 and color[1] == 0 and color[2] == 0): # color[0] = int(img[i][j][0]) # color[1] = int(img[i][j][1]) # color[2] = int(img[i][j][2]) # else: # if (img[i][j][0] != color[0] or img[i][j][1] != color[1] or img[i][j][2] != color[2]): # valret = False # if(singleColor == True and valret == False): # return False, None, None, None # cv2.drawContours(imgCnt, contours, maxCnt, color=color, thickness=cv2.FILLED) # if (Cy < By and Cy < Ay): # v1y = Cy # v1x = Cx # if (Ax < Bx): # v2x = Ax # v2y = Ay # v3x = Bx # v3y = By # else: # v2x = Bx # v2y = By # v3x = Ax # v3y = Ay # elif (By < Cy and By < Ay): # v1y = By # v1x = Bx # if (Ax < Cx): # v2x = Ax # v2y = Ay # v3x = Cx # v3y = Cy # else: # v2x = Cx # v2y = Cy # v3x = Ax # v3y = Ay # else: # v1y = Ay # v1x = Ax # if (Bx < Cx): # v2x = Bx # v2y = By # v3x = Cx # v3y = Cy # else: # v2x = Cx # v2y = Cy # v3x = Bx # v3y = By # # (x,y),radius = cv2.minEnclosingCircle(cnt) # triangleArea = abs((v2x * (v1y - v3y) + v1x * (v3y - v2y) + v3x * (v2y - v1y)) / 2) # # print(f"({v1x},{v1y}) ({v2x},{v2y}) ({v3x},{v3y}) {maxArea} {triangleArea}") # # a=input('pare') # # center = (int(x),int(y)) # # radius = int(radius) # # cv2.circle(img,center,radius,(255,255,0),2) # #desc = [maxArea / triangleArea, 0 if v3y - v1y == 0 else (v2y - v1y) / (v3y - v1y), # #1 if v1x - v2x > 0 and v3x - v1x > 0 else 0, np.rad2deg(np.arctan( abs(v3y-v2y) / (v3x - v2x)))] # desc = [contActivePixels/triangleArea, np.rad2deg(np.arctan(abs(v3y - v2y) / (v3x - v2x))), 1 if v1x - v2x > 0 and v3x - v1x > 0 else 0 ] # return True, np.array([desc]), imgCnt, color def applySmv(desc, svmModel): return svmModel.predict(desc) def sortImgByNumberOfActivePixels(elem): return elem[1] def sortImgByFilledTriangPerc(elem): return elem[1] def allPxDominantStreet(image_seg, avgPavedPx, avgRockPx, avgNonPavedPx, th): rows,cols = image_seg.shape[:2] endLoop = 0 validNonZeroPx = 0 for i in range(rows): for j in range(cols): if (image_seg[i][j][0] == 0 and image_seg[i][j][1] == 0 and image_seg[i][j][2] == 0): continue if avgPavedPx >= th: if (image_seg[i][j][0] != 0 or image_seg[i][j][1] != 0 or image_seg[i][j][2] != 255): return False else: validNonZeroPx = validNonZeroPx + 1 if avgRockPx >= th: if (image_seg[i][j][0] != 255 or image_seg[i][j][1] != 0 or image_seg[i][j][2] != 0): return False else: validNonZeroPx = validNonZeroPx + 1 if avgNonPavedPx >= th: if (image_seg[i][j][0] != 0 or (image_seg[i][j][1] != 255 and image_seg[i][j][1] != 100) or image_seg[i][j][2] != 0): return False else: validNonZeroPx = validNonZeroPx + 1 if validNonZeroPx == 0: return False, 0 else: return True, validNonZeroPx file_smv_model = 'svm_model3.sav' svm_model = pickle.load(open(file_smv_model, 'rb')) baseGTtrainfile = 'gt_image_4_balanced_train.txt' baseGTvalfile = 'gt_image_4_balanced_val.txt' file_scaler = 'scaler3.sav' scaler = pickle.load(open(file_scaler, 'rb')) classes = evTools.ClassesDef.PAVED_NONPAVED_ROCK basedirretrain='retrain_SVM_balanced_novo_1' dir_test = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\data\\dataset_Olinda_varHeading_fov90\\teste2\\'; dir_segmented = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\RUNS\\'+basedirretrain+'\\results\\'; path_retraindataset = 'C:\\Pesquisa\\codigos\\KittiSeg_shivam\\KittiSeg\\data\\data_road\\training\\' dir_retraindataset = 'image_4retrain' dir_gt_retraindataset = 'retrain_SVM_balanced_novo' dirpath = 'RUNS\\'+basedirretrain txt_retrain_name = 'retrain_SVM_balanced_novo_2_train.txt' txt_val_retrain_name = 'retrain_SVM_balanced_novo_2_val.txt' txtBestResults = 'retrain_SVM_balanced_novo_updatedResults.txt'; firstRetrain = False try: fileBestResults = open(txtBestResults, 'r') except IOError: firstRetrain = True fileBestResults = open(txtBestResults, 'w') fileBestResults.close() fileBestResults = open(txtBestResults, 'r') fileBestResults.close() newBestResults = [] resFile = open(os.path.join(dirpath,'results.txt'),'r', encoding="utf8") line = resFile.readline() labelGT = '?' count = 0 streetsPaved = [] streetsRock = [] streetsNP = [] while line: streetname = line.replace('\t',' ') streetname = streetname.split(' [')[0] print('PROCESSING STREET: '+streetname) th = 0.99 fileBestResults = open(txtBestResults, 'r') lineBestResult = fileBestResults.readline() newLineBestResult = line bestResultUpdated = False while lineBestResult: streetnameBestResult = lineBestResult.replace('\t',' ') streetnameBestResult = streetnameBestResult.split(' [')[0] if streetname == streetnameBestResult: break lineBestResult = fileBestResults.readline() fileBestResults.close() # label, npav, nrock, nnonp, avgPavedPx, avgRockPx, avgNonPavedPx = evTools.getNumberOfImagesFromClass(line,classes,0) # labelBR, npavBR, nrockBR, nnonpBR, avgPavedPxBR, avgRockPxBR, avgNonPavedPxBR = evTools.getNumberOfImagesFromClass(lineBestResult,classes,0) # print(lineBestResult) # if (avgPavedPxBR >= th and avgPavedPxBR > avgPavedPx) or (avgRockPxBR >= th and avgRockPxBR > avgRockPx) or (avgNonPavedPxBR >= th and avgNonPavedPxBR >= avgNonPavedPx): # newLineBestResult = lineBestResult # if avgNonPavedPx < th and avgRockPx < th and avgPavedPx < th and avgNonPavedPxBR < th and avgRockPxBR < th and avgPavedPxBR < th: # line = resFile.readline() # newBestResults.append(newLineBestResult) # continue streetpath_test = os.path.join(dir_test,streetname) streetpath_seg = os.path.join(dir_segmented,streetname) for filename in os.listdir(streetpath_test): filename_seg = filename.replace('.png','_raw.png'); # print(filename_seg) dirToSaveResult = path_retraindataset+dir_gt_retraindataset+'\\'+streetname+'\\'; # currentImgForegPix = 0 # BRimgForegPix = 0 # try: # current_image_seg = scp.misc.imread(streetpath_seg+'\\'+filename_seg,mode='') # if avgNonPavedPx < th and avgRockPx < th and avgPavedPx < th: # currentimgOK = False # print('currentimgok = -false') # else: # currentimgOK, currentImgForegPix = allPxDominantStreet(current_image_seg, avgPavedPx, avgRockPx, avgNonPavedPx, th) # #currentimgOK = currentimgOK and evTools.good_res_image(current_image_seg) # print('currentimgok = '+str(currentimgOK)) # except Exception as e: # print('exc curr: '+ str(e)) # print('error to read curr img: '+os.path.join(streetpath_seg,filename_seg)) # currentimgOK = False # try: # BR_image_seg = scp.misc.imread(dirToSaveResult+filename_seg,mode='') # if avgNonPavedPxBR < th and avgRockPxBR < th and avgPavedPxBR < th: # BRimgOK = False # print('brimgok = -false') # else: # BRimgOK, BRimgForegPix = allPxDominantStreet(BR_image_seg, avgPavedPxBR, avgRockPxBR, avgNonPavedPxBR, th) # #BRimgOK = BRimgOK and evTools.good_res_image(BR_image_seg) # print('brimgok = '+str(BRimgOK)) # except Exception as e: # print('Exc: '+ str(e)) # print('error to read BR: '+os.path.join(dirToSaveResult+filename_seg)) # BRimgOK = False # if currentimgOK == False and BRimgOK == False: # print('continue') # continue try: current_image_seg = scp.misc.imread(streetpath_seg+'\\'+filename_seg,mode='') currentimgOK = True except Exception as e: currentimgOK = False try: BR_image_seg = scp.misc.imread(dirToSaveResult+filename_seg,mode='') BRimgOK = True except Exception as e: BRimgOK = False perfCurrentImage = 0 if currentimgOK: oneClassContour,descCurrImg, _, curImgTriang, colorCurrImg = triang.triangStats(current_image_seg) if(oneClassContour): goodCurImg = applySmv(scaler.transform(descCurrImg),svm_model) if(goodCurImg==1): perfCurrentImage = descCurrImg[0][0] perfBRImage = 0 if BRimgOK: oneClassContour, descBRImg,_, brImgTriang, colorBRImg = triang.triangStats(BR_image_seg) #goodBRImage = applySmv(scaler.transform(descBRImg),svm_model) #if(goodBRImage==1): perfBRImage = descBRImg[0][0] #if avgPavedPx >= th or avgPavedPxBR >= th: if perfCurrentImage > perfBRImage: if not os.path.exists(dirToSaveResult): os.makedirs(dirToSaveResult) print('image updated') print('path saved to file:') print('training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n') arr = ['training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n',perfCurrentImage] if (colorCurrImg[0] == 255 and colorCurrImg[1] == 0 and colorCurrImg[2] == 0): streetsPaved.append(arr) elif colorCurrImg[0] == 0 and colorCurrImg[1] == 255 and colorCurrImg[2] == 0: streetsNP.append(arr) else: streetsRock.append(arr) scp.misc.imsave(dirToSaveResult+filename_seg, curImgTriang) elif BRimgOK: arr = ['training/'+ dir_retraindataset +'/'+streetname+ '/' +filename+ ' '+'training/'+ dir_gt_retraindataset+'/'+streetname+ '/' +filename_seg+'\n',perfBRImage] if (colorBRImg[0] == 255 and colorBRImg[1] == 0 and colorBRImg[2] == 0): streetsPaved.append(arr) elif (colorBRImg[0] == 0 and colorBRImg[1] == 255 and colorBRImg[2] == 0): streetsNP.append(arr) else: streetsRock.append(arr) #scp.misc.imsave(dirToSaveResult+filename_seg, BR_image_seg) newBestResults.append(newLineBestResult) line = resFile.readline() fileBestResults = open(txtBestResults, 'w') for l in newBestResults: fileBestResults.write(l) fileBestResults.close() #trainfile = open(os.path.join(dirpath,'train4_retrainpercsemlateral3.txt'),'a') #valfile = open(os.path.join(dirpath,'val4_retrainpercsemlateral3.txt'),'a') trainfile = open(os.path.join(dirpath,txt_retrain_name),'w') valfile = open(os.path.join(dirpath,txt_val_retrain_name),'w') with open(baseGTtrainfile) as f: for line in f: trainfile.write(line) trainfile.write('\n') with open(baseGTvalfile) as f: for line in f: valfile.write(line) valfile.write('\n') #streetsPaved.sort(key=sortImgByFilledTriangPerc, reverse=True) #streetsNP.sort(key=sortImgByFilledTriangPerc, reverse=True) #streetsRock.sort(key=sortImgByFilledTriangPerc, reverse=True) print(len(streetsPaved)) print(len(streetsNP)) print(len(streetsRock)) #minClass = min(len(streetsPaved),len(streetsNP)) #minClass = min(minClass,len(streetsRock)) # for i in range(minClass): # if i % 3 != 0: # trainfile.write(streetsPaved[i][0]) # trainfile.write(streetsNP[i][0]) # #trainfile.write(streetsRock[i][0]) # else: # valfile.write(streetsPaved[i][0]) # valfile.write(streetsNP[i][0]) # #valfile.write(streetsRock[i][0]) # maxClass = max(len(streetsPaved),len(streetsNP)) # maxClass= max(maxClass,len(streetsRock)) # for i in range(maxClass): # if i % 3 != 0: # if i < len(streetsPaved): # trainfile.write(streetsPaved[i][0]) # print(f"train paved: {streetsPaved[i][0]}") # if i < len(streetsRock): # trainfile.write(streetsRock[i][0]) # print(f"train rock: {streetsRock[i][0]}") # if i < len(streetsNP): # trainfile.write(streetsNP[i][0]) # print(f"train nonpaved: {streetsNP[i][0]}") # else: # if i < len(streetsPaved): # valfile.write(streetsPaved[i][0]) # print(f"val paved: {streetsPaved[i][0]}") # if i < len(streetsRock): # valfile.write(streetsRock[i][0]) # print(f"val rock: {streetsRock[i][0]}") # if i < len(streetsNP): # valfile.write(streetsNP[i][0]) # print(f"val nonpaved: {streetsNP[i][0]}") minClass = min(len(streetsPaved)+len(streetsRock),len(streetsNP)) flagRock = False ipaved = 0 irock = 0 inonpaved = 0 for i in range(minClass): if i % 3 != 0: if( i % 2 == 0): trainfile.write(streetsNP[inonpaved][0]) inonpaved += 1 else: if(flagRock == True or ipaved >= len(streetsPaved)): trainfile.write(streetsRock[irock][0]) flagRock = False irock += 1 elif(flagRock == False or irock >= len(streetsRock)): trainfile.write(streetsPaved[ipaved][0]) flagRock = True ipaved += 1 else: if( i % 2 == 0): valfile.write(streetsNP[inonpaved][0]) inonpaved += 1 else: if(flagRock == True or ipaved >= len(streetsPaved)): valfile.write(streetsRock[irock][0]) flagRock = False irock += 1 elif(flagRock == False or irock >= len(streetsRock)): valfile.write(streetsPaved[ipaved][0]) flagRock = True ipaved += 1 trainfile.close(); valfile.close();
py
1a39e85d3ef5725ff5b30875f4dced382897f2f8
# encoding: utf-8 import datetime from django.test import TestCase from haystack import connections from haystack.inputs import AltParser, Exact from haystack.models import SearchResult from haystack.query import SQ, SearchQuerySet from ..core.models import AnotherMockModel, MockModel class SolrSearchQueryTestCase(TestCase): fixtures = ["base_data"] def setUp(self): super(SolrSearchQueryTestCase, self).setUp() self.sq = connections["solr"].get_query() def test_build_query_all(self): self.assertEqual(self.sq.build_query(), "*:*") def test_build_query_single_word(self): self.sq.add_filter(SQ(content="hello")) self.assertEqual(self.sq.build_query(), "(hello)") def test_build_query_boolean(self): self.sq.add_filter(SQ(content=True)) self.assertEqual(self.sq.build_query(), "(true)") def test_build_query_datetime(self): self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28))) self.assertEqual(self.sq.build_query(), "(2009-05-08T11:28:00Z)") def test_build_query_multiple_words_and(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_filter(SQ(content="world")) self.assertEqual(self.sq.build_query(), "((hello) AND (world))") def test_build_query_multiple_words_not(self): self.sq.add_filter(~SQ(content="hello")) self.sq.add_filter(~SQ(content="world")) self.assertEqual(self.sq.build_query(), "(NOT ((hello)) AND NOT ((world)))") def test_build_query_multiple_words_or(self): self.sq.add_filter(~SQ(content="hello")) self.sq.add_filter(SQ(content="hello"), use_or=True) self.assertEqual(self.sq.build_query(), "(NOT ((hello)) OR (hello))") def test_build_query_multiple_words_mixed(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(content="hello"), use_or=True) self.sq.add_filter(~SQ(content="world")) self.assertEqual( self.sq.build_query(), "(((why) OR (hello)) AND NOT ((world)))" ) def test_build_query_phrase(self): self.sq.add_filter(SQ(content="hello world")) self.assertEqual(self.sq.build_query(), "(hello AND world)") self.sq.add_filter(SQ(content__exact="hello world")) self.assertEqual( self.sq.build_query(), '((hello AND world) AND ("hello world"))' ) def test_build_query_boost(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_boost("world", 5) self.assertEqual(self.sq.build_query(), "(hello) world^5") def test_correct_exact(self): self.sq.add_filter(SQ(content=Exact("hello world"))) self.assertEqual(self.sq.build_query(), '("hello world")') def test_build_query_multiple_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00"))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00"))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:([* TO "2009-02-10 01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12 12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))', ) def test_build_complex_altparser_query(self): self.sq.add_filter(SQ(content=AltParser("dismax", "Don't panic", qf="text"))) self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00"))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00"))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) query = self.sq.build_query() self.assertTrue('(_query_:"{!dismax qf=text}Don\'t panic")' in query) self.assertTrue('pub_date:([* TO "2009-02-10 01:59:00"])' in query) self.assertTrue('author:({"daniel" TO *})' in query) self.assertTrue('created:({* TO "2009-02-12 12:13:00"})' in query) self.assertTrue('title:(["B" TO *])' in query) self.assertTrue('id:("1" OR "2" OR "3")' in query) self.assertTrue('rating:(["3" TO "5"])' in query) def test_build_query_multiple_filter_types_with_datetimes(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0))) self.sq.add_filter(SQ(author__gt="daniel")) self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0))) self.sq.add_filter(SQ(title__gte="B")) self.sq.add_filter(SQ(id__in=[1, 2, 3])) self.sq.add_filter(SQ(rating__range=[3, 5])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:([* TO "2009-02-10T01:59:00Z"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12T12:13:00Z"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))', ) def test_build_query_in_filter_multiple_words(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"])) self.assertEqual( self.sq.build_query(), '((why) AND title:("A Famous Paper" OR "An Infamous Article"))', ) def test_build_query_in_filter_datetime(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)])) self.assertEqual( self.sq.build_query(), '((why) AND pub_date:("2009-07-06T01:56:21Z"))' ) def test_build_query_in_with_set(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=set(["A Famous Paper", "An Infamous Article"]))) query = self.sq.build_query() self.assertTrue("(why)" in query) # Because ordering in Py3 is now random. if 'title:("A ' in query: self.assertTrue( 'title:("A Famous Paper" OR "An Infamous Article")' in query ) else: self.assertTrue( 'title:("An Infamous Article" OR "A Famous Paper")' in query ) def test_build_query_with_contains(self): self.sq.add_filter(SQ(content="circular")) self.sq.add_filter(SQ(title__contains="haystack")) self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack*))") def test_build_query_with_endswith(self): self.sq.add_filter(SQ(content="circular")) self.sq.add_filter(SQ(title__endswith="haystack")) self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack))") def test_build_query_wildcard_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__startswith="haystack")) self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack*))") def test_build_query_fuzzy_filter_types(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__fuzzy="haystack")) self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack~))") def test_clean(self): self.assertEqual(self.sq.clean("hello world"), "hello world") self.assertEqual(self.sq.clean("hello AND world"), "hello and world") self.assertEqual( self.sq.clean( 'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / world' ), 'hello and or not to \\+ \\- \\&& \\|| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\" \\~ \\* \\? \\: \\\\ \\/ world', ) self.assertEqual( self.sq.clean("so please NOTe i am in a bAND and bORed"), "so please NOTe i am in a bAND and bORed", ) def test_build_query_with_models(self): self.sq.add_filter(SQ(content="hello")) self.sq.add_model(MockModel) self.assertEqual(self.sq.build_query(), "(hello)") self.sq.add_model(AnotherMockModel) self.assertEqual(self.sq.build_query(), "(hello)") def test_set_result_class(self): # Assert that we're defaulting to ``SearchResult``. self.assertTrue(issubclass(self.sq.result_class, SearchResult)) # Custom class. class IttyBittyResult(object): pass self.sq.set_result_class(IttyBittyResult) self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult)) # Reset to default. self.sq.set_result_class(None) self.assertTrue(issubclass(self.sq.result_class, SearchResult)) def test_in_filter_values_list(self): self.sq.add_filter(SQ(content="why")) self.sq.add_filter(SQ(title__in=MockModel.objects.values_list("id", flat=True))) self.assertEqual(self.sq.build_query(), '((why) AND title:("1" OR "2" OR "3"))') def test_narrow_sq(self): sqs = SearchQuerySet(using="solr").narrow(SQ(foo="moof")) self.assertTrue(isinstance(sqs, SearchQuerySet)) self.assertEqual(len(sqs.query.narrow_queries), 1) self.assertEqual(sqs.query.narrow_queries.pop(), "foo:(moof)") def test_query__in(self): sqs = SearchQuerySet(using="solr").filter(id__in=[1, 2, 3]) self.assertEqual(sqs.query.build_query(), 'id:("1" OR "2" OR "3")') def test_query__in_empty_list(self): """Confirm that an empty list avoids a Solr exception""" sqs = SearchQuerySet(using="solr").filter(id__in=[]) self.assertEqual(sqs.query.build_query(), "id:(!*:*)")
py
1a39e85f2ff65ac2065bc019cb520c0c0c09617e
from select import select from scapy.all import conf, ETH_P_ALL, MTU, plist # Stop sniff() asynchronously # Source: https://github.com/secdev/scapy/issues/989#issuecomment-380044430 def sniff(store=False, prn=None, lfilter=None, stop_event=None, refresh=.1, *args, **kwargs): """Sniff packets sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) store: wether to store sniffed packets or discard them prn: function to apply to each packet. If something is returned, it is displayed. Ex: ex: prn = lambda x: x.summary() lfilter: python function applied to each packet to determine if further action may be done ex: lfilter = lambda x: x.haslayer(Padding) stop_event: Event that stops the function when set refresh: check stop_event.set() every refresh seconds """ s = conf.L2listen(type=ETH_P_ALL, *args, **kwargs) lst = [] try: while True: if stop_event and stop_event.is_set(): break sel = select([s], [], [], refresh) if s in sel[0]: # if the packet s is ready to be read from p = s.recv(MTU) # recieve from somewhere () if p is None: break if lfilter and not lfilter(p): continue if store: lst.append(p) if prn: r = prn(p) if r is not None: print(r) except KeyboardInterrupt: pass finally: s.close() return plist.PacketList(lst, "Sniffed")
py
1a39e9a9d81e187fae028ec673101cc5b4d43472
#!/usr/bin/env python import unittest from framework import VppTestCase, VppTestRunner from vpp_ip import DpoProto from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \ MRouteItfFlags, MRouteEntryFlags, VppIpTable from scapy.packet import Raw from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP, getmacbyip from scapy.layers.inet6 import IPv6, getmacbyip6 # # The number of packets sent is set to 91 so that when we replicate more than 3 # times, which we do for some entries, we will generate more than 256 packets # to the next node in the VLIB graph. Thus we are testing the code's # correctness handling this over-flow. # It's also an odd number so we hit any single loops. # N_PKTS_IN_STREAM = 91 class TestMFIB(VppTestCase): """ MFIB Test Case """ def setUp(self): super(TestMFIB, self).setUp() def test_mfib(self): """ MFIB Unit Tests """ error = self.vapi.cli("test mfib") if error: self.logger.critical(error) self.assertEqual(error.find("Failed"), -1) class TestIPMcast(VppTestCase): """ IP Multicast Test Case """ def setUp(self): super(TestIPMcast, self).setUp() # create 8 pg interfaces self.create_pg_interfaces(range(9)) # setup interfaces for i in self.pg_interfaces[:8]: i.admin_up() i.config_ip4() i.config_ip6() i.resolve_arp() i.resolve_ndp() # one more in a vrf tbl4 = VppIpTable(self, 10) tbl4.add_vpp_config() self.pg8.set_table_ip4(10) self.pg8.config_ip4() tbl6 = VppIpTable(self, 10, is_ip6=1) tbl6.add_vpp_config() self.pg8.set_table_ip6(10) self.pg8.config_ip6() def tearDown(self): for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.admin_down() self.pg8.set_table_ip4(0) self.pg8.set_table_ip6(0) super(TestIPMcast, self).tearDown() def create_stream_ip4(self, src_if, src_ip, dst_ip, payload_size=0): pkts = [] # default to small packet sizes p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / IP(src=src_ip, dst=dst_ip) / UDP(sport=1234, dport=1234)) if not payload_size: payload_size = 64 - len(p) p = p / Raw('\xa5' * payload_size) for i in range(0, N_PKTS_IN_STREAM): pkts.append(p) return pkts def create_stream_ip6(self, src_if, src_ip, dst_ip): pkts = [] for i in range(0, N_PKTS_IN_STREAM): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / IPv6(src=src_ip, dst=dst_ip) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def verify_filter(self, capture, sent): if not len(capture) == len(sent): # filter out any IPv6 RAs from the captur for p in capture: if (p.haslayer(IPv6)): capture.remove(p) return capture def verify_capture_ip4(self, rx_if, sent, dst_mac=None): rxd = rx_if.get_capture(len(sent)) try: capture = self.verify_filter(rxd, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] eth = rx[Ether] self.assertEqual(eth.type, 0x800) tx_ip = tx[IP] rx_ip = rx[IP] if dst_mac is None: dst_mac = getmacbyip(rx_ip.dst) # check the MAC address on the RX'd packet is correctly formed self.assertEqual(eth.dst, dst_mac) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) except: raise def verify_capture_ip6(self, rx_if, sent): capture = rx_if.get_capture(len(sent)) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] # check the MAC address on the RX'd packet is correctly formed self.assertEqual(eth.dst, getmacbyip6(rx_ip.dst)) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) def test_ip_mcast(self): """ IP Multicast Replication """ # # a stream that matches the default route. gets dropped. # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on default route") # # A (*,G). # one accepting interface, pg0, 7 forwarding interfaces # many forwarding interfaces test the case where the replicare DPO # needs to use extra cache lines for the buckets. # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg3.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg4.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg5.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg6.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg7.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_1_1_1_1_232_1_1_1 = VppIpMRoute( self, "1.1.1.1", "232.1.1.1", 64, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_1_1_1_1_232_1_1_1.add_vpp_config() # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # that use unicast next-hops # route_1_1_1_1_232_1_1_2 = VppIpMRoute( self, "1.1.1.1", "232.1.1.2", 64, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, nh=self.pg1.remote_ip4), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, nh=self.pg2.remote_ip4)]) route_1_1_1_1_232_1_1_2.add_vpp_config() # # An (*,G/m). # one accepting interface, pg0, 1 forwarding interfaces # route_232 = VppIpMRoute( self, "0.0.0.0", "232.0.0.0", 8, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232.add_vpp_config() # # a stream that matches the route for (1.1.1.1,232.1.1.1) # small packets # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.assertEqual(route_1_1_1_1_232_1_1_1.get_stats()['packets'], len(tx)) # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (1.1.1.1,232.1.1.1) # large packets # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1", payload_size=1024) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.assertEqual(route_1_1_1_1_232_1_1_1.get_stats()['packets'], 2*len(tx)) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream to the unicast next-hops # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.2") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx, dst_mac=self.pg1.remote_mac) self.verify_capture_ip4(self.pg2, tx, dst_mac=self.pg2.remote_mac) # no replications on Pg0 nor pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (*,232.0.0.0/8) # Send packets with the 9th bit set so we test the correct clearing # of that bit in the mac rewrite # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.255.255.255") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1 only self.verify_capture_ip4(self.pg1, tx) self.assertEqual(route_232.get_stats()['packets'], len(tx)) # no replications on Pg0, Pg2 not Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg2.assert_nothing_captured( remark="IP multicast packets forwarded on PG2") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # a stream that matches the route for (*,232.1.1.1) # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "1.1.1.2", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1->7 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.verify_capture_ip4(self.pg3, tx) self.verify_capture_ip4(self.pg4, tx) self.verify_capture_ip4(self.pg5, tx) self.verify_capture_ip4(self.pg6, tx) self.verify_capture_ip4(self.pg7, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") def test_ip6_mcast(self): """ IPv6 Multicast Replication """ # # a stream that matches the default route. gets dropped. # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.assert_nothing_captured( remark="IPv6 multicast packets forwarded on default route") # # A (*,G). # one accepting interface, pg0, 3 forwarding interfaces # route_ff01_1 = VppIpMRoute( self, "::", "ff01::1", 128, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg3.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_ff01_1.add_vpp_config() # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_2001_ff01_1 = VppIpMRoute( self, "2001::1", "ff01::1", 256, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_2001_ff01_1.add_vpp_config() # # An (*,G/m). # one accepting interface, pg0, 1 forwarding interface # route_ff01 = VppIpMRoute( self, "::", "ff01::", 16, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_ff01.add_vpp_config() # # a stream that matches the route for (*, ff01::/16) # sent on the non-accepting interface # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg1, "2002::1", "ff01:2::255") self.send_and_assert_no_replies(self.pg1, tx, "RPF miss") # # a stream that matches the route for (*, ff01::/16) # sent on the accepting interface # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2002::1", "ff01:2::255") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1 self.verify_capture_ip6(self.pg1, tx) # no replications on Pg0, Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg2.assert_nothing_captured( remark="IP multicast packets forwarded on PG2") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") # # Bounce the interface and it should still work # self.pg1.admin_down() self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg1.assert_nothing_captured( remark="IP multicast packets forwarded on down PG1") self.pg1.admin_up() self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.verify_capture_ip6(self.pg1, tx) # # a stream that matches the route for (*,ff01::1) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2002::2", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, 3. self.verify_capture_ip6(self.pg1, tx) self.verify_capture_ip6(self.pg2, tx) self.verify_capture_ip6(self.pg3, tx) # no replications on Pg0 self.pg0.assert_nothing_captured( remark="IPv6 multicast packets forwarded on PG0") # # a stream that matches the route for (2001::1, ff00::1) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, self.verify_capture_ip6(self.pg1, tx) self.verify_capture_ip6(self.pg2, tx) # no replications on Pg0, Pg3 self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") self.pg3.assert_nothing_captured( remark="IP multicast packets forwarded on PG3") def _mcast_connected_send_stream(self, dst_ip): self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, self.pg0.remote_ip4, dst_ip) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1. self.verify_capture_ip4(self.pg1, tx) return tx def test_ip_mcast_connected(self): """ IP Multicast Connected Source check """ # # A (*,G). # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED) # # Now the (*,G) is present, send from connected source # tx = self._mcast_connected_send_stream("232.1.1.1") # # Constrct a representation of the signal we expect on pg0 # signal_232_1_1_1_itf_0 = VppMFibSignal(self, route_232_1_1_1, self.pg0.sw_if_index, tx[0]) # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # reading the signal allows for the generation of another # so send more packets and expect the next signal # tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # A Second entry with connected check # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_2 = VppIpMRoute( self, "0.0.0.0", "232.1.1.2", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_2.add_vpp_config() route_232_1_1_2.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED) # # Send traffic to both entries. One read should net us two signals # signal_232_1_1_2_itf_0 = VppMFibSignal(self, route_232_1_1_2, self.pg0.sw_if_index, tx[0]) tx = self._mcast_connected_send_stream("232.1.1.1") tx2 = self._mcast_connected_send_stream("232.1.1.2") # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(2, len(signals)) signal_232_1_1_1_itf_0.compare(signals[1]) signal_232_1_1_2_itf_0.compare(signals[0]) route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE) route_232_1_1_2.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE) def test_ip_mcast_signal(self): """ IP Multicast Signal """ # # A (*,G). # one accepting interface, pg0, 1 forwarding interfaces # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_SIGNAL) # # Now the (*,G) is present, send from connected source # tx = self._mcast_connected_send_stream("232.1.1.1") # # Constrct a representation of the signal we expect on pg0 # signal_232_1_1_1_itf_0 = VppMFibSignal(self, route_232_1_1_1, self.pg0.sw_if_index, tx[0]) # # read the only expected signal # signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # reading the signal allows for the generation of another # so send more packets and expect the next signal # tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # Set the negate-signal on the accepting interval - the signals # should stop # route_232_1_1_1.update_path_flags( self.pg0.sw_if_index, (MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_NEGATE_SIGNAL)) self.vapi.cli("clear trace") tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(0, len(signals)) # # Clear the SIGNAL flag on the entry and the signals should # come back since the interface is still NEGATE-SIGNAL # route_232_1_1_1.update_entry_flags( MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE) tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(1, len(signals)) signal_232_1_1_1_itf_0.compare(signals[0]) # # Lastly remove the NEGATE-SIGNAL from the interface and the # signals should stop # route_232_1_1_1.update_path_flags(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT) tx = self._mcast_connected_send_stream("232.1.1.1") signals = self.vapi.mfib_signal_dump() self.assertEqual(0, len(signals)) def test_ip_mcast_vrf(self): """ IP Multicast Replication in non-default table""" # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_1_1_1_1_232_1_1_1 = VppIpMRoute( self, "1.1.1.1", "232.1.1.1", 64, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg8.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], table_id=10) route_1_1_1_1_232_1_1_1.add_vpp_config() # # a stream that matches the route for (1.1.1.1,232.1.1.1) # small packets # self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg8, "1.1.1.1", "232.1.1.1") self.pg8.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1 & 2 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) def test_ip6_mcast_vrf(self): """ IPv6 Multicast Replication in non-default table""" # # An (S,G). # one accepting interface, pg0, 2 forwarding interfaces # route_2001_ff01_1 = VppIpMRoute( self, "2001::1", "ff01::1", 256, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg8.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, proto=DpoProto.DPO_PROTO_IP6)], table_id=10, is_ip6=1) route_2001_ff01_1.add_vpp_config() # # a stream that matches the route for (2001::1, ff00::1) # self.vapi.cli("clear trace") tx = self.create_stream_ip6(self.pg8, "2001::1", "ff01::1") self.pg8.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, self.verify_capture_ip6(self.pg1, tx) self.verify_capture_ip6(self.pg2, tx) def test_bidir(self): """ IP Multicast Bi-directional """ # # A (*,G). The set of accepting interfaces matching the forwarding # route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", "232.1.1.1", 32, MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, [VppMRoutePath(self.pg0.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg1.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg2.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), VppMRoutePath(self.pg3.sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT | MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1") self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # We expect replications on Pg1, 2, 3, but not on pg0 self.verify_capture_ip4(self.pg1, tx) self.verify_capture_ip4(self.pg2, tx) self.verify_capture_ip4(self.pg3, tx) self.pg0.assert_nothing_captured( remark="IP multicast packets forwarded on PG0") if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)
py
1a39e9d6625f856625b0b787215d54e78cf23a1e
import logging as log from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model from django_keycloak.keycloak import Connect class Command(BaseCommand): help = "Synchronize users with keycloak" def handle(self, *args, **options): keycloak = Connect() User = get_user_model() remote_users = set([user.get("id") for user in keycloak.get_users()]) local_users = set(str(_u.id) for _u in User.objects.all()) users_to_remove = local_users.difference(remote_users) users_to_add = remote_users.difference(local_users) # Delete users that are no longer in keycloak User.objects.filter(id__in=list(users_to_remove)).delete() log.info( "Removed {} users".format(len(users_to_remove)), "and there are {} new users in keycloak that are not" " locally".format(len(users_to_add)), )
py
1a39ea85dbfd0eec1f8681faf30cdf664614a7d1
# Auto generated by generator.py. Delete this line if you make modification. from scrapy.spiders import Rule from scrapy.linkextractors import LinkExtractor XPATH = { 'name' : "//div[@id='team_images']/div[@class='mid']/ul/li[1]/img/@alt", 'price' : "//div[@class='deal-buy']/p[@class='deal-price']/strong", 'category' : "", 'description' : "//div[@id='diem-noi-bat']/div[@class='digest']/p", 'images' : "//div[@id='team_images']/div[@class='mid']/ul/li/img/@src", 'canonical' : "", 'base_url' : "", 'brand' : "" } name = 'sieuthithoitrang.vn' allowed_domains = ['sieuthithoitrang.vn'] start_urls = ['http://sieuthithoitrang.vn'] tracking_url = '' sitemap_urls = [''] sitemap_rules = [('', 'parse_item')] sitemap_follow = [] rules = [ Rule(LinkExtractor(allow=['/\d+$']), 'parse_item'), Rule(LinkExtractor(allow=['/danh-muc/']), 'parse'), #Rule(LinkExtractor(), 'parse_item_and_links'), ]
py
1a39ead249bdf4fee1ac5679700e67989c23afd2
import os import logging from functools import partial import pandas as pd from solarforecastarbiter.io.fetch import eia from solarforecastarbiter.io.reference_observations import ( common, default_forecasts) from requests.exceptions import HTTPError logger = logging.getLogger('reference_data') def initialize_site_observations(api, site): """Creates an observation at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession API Session object, authenticated for the Reference user. site : solarforecastarbiter.datamodel.Site The site object for which to create the Observations. Notes ----- Currently only creates observations for net load [MW] (`f"EBA.{eia_site_id}.D.H"`), but EIA contains other variables that may be incorporated later (e.g. solar generation: `f"EBA.{eia_site_id}.NG.SUN.H"`). """ sfa_var = "net_load" logger.info(f'Creating {sfa_var} at {site.name}') try: common.create_observation(api, site, sfa_var) except HTTPError as e: logger.error(f'Could not create Observation for "{sfa_var}" ' f'at EIA site {site.name}') logger.debug(f'Error: {e.response.text}') def initialize_site_forecasts(api, site): """Creates a forecast at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession API Session object, authenticated for the Reference user. site : solarforecastarbiter.datamodel.Site The site object for which to create the Observations. """ common.create_forecasts( api, site, ["net_load"], default_forecasts.TEMPLATE_NETLOAD_PERSISTENCE_FORECASTS) def fetch(api, site, start, end, *, eia_api_key): """Retrieve observation data for a EIA site between start and end. Parameters ---------- api : solarforecastarbiter.io.APISession Unused but conforms to common.update_site_observations call site : solarforecastarbiter.datamodel.Site Site object with the appropriate metadata. start : datetime The beginning of the period to request data for. end : datetime The end of the period to request data for. eia_api_key : str API key for api.eia.gov Returns ------- data : pandas.DataFrame All of the requested data as a single DataFrame. Notes ----- Currently only fetches observations for net load [MW] (`f"EBA.{eia_site_id}.D.H"`), but EIA contains other variables that may be incorporated later (e.g. solar generation: `f"EBA.{eia_site_id}.NG.SUN.H"`). """ try: site_extra_params = common.decode_extra_parameters(site) except ValueError: return pd.DataFrame() eia_site_id = site_extra_params['network_api_id'] series_id = f"EBA.{eia_site_id}.D.H" # hourly net load (demand) obs_df = eia.get_eia_data( series_id, eia_api_key, start, end ) if obs_df.empty: logger.warning(f'Data for site {site.name} contained no ' f'entries from {start} to {end}.') return pd.DataFrame() obs_df = obs_df.rename(columns={"value": "net_load"}) return obs_df def update_observation_data(api, sites, observations, start, end): """Retrieve data from the network, and then format and post it to each observation at the site. Parameters ---------- api : solarforecastarbiter.io.api.APISession An active Reference user session. sites: list of solarforecastarbiter.datamodel.Site List of all reference sites. observations: list of solarforecastarbiter.datamodel.Observation List of all reference observations. start : datetime The beginning of the period to request data for. end : datetime The end of the period to request data for. Raises ------ KeyError If EIA_API_KEY environmental variable is not set. """ eia_api_key = os.getenv("EIA_API_KEY") if eia_api_key is None: raise KeyError('"EIA_API_KEY" environment variable must be ' 'set to update EIA observation data.') eia_sites = common.filter_by_networks(sites, ['EIA']) for site in eia_sites: common.update_site_observations( api, partial(fetch, eia_api_key=eia_api_key), site, observations, start, end)
py
1a39eadfcd6195b2245d62fe16281d8853bbe621
""" Uhh... Here we import stuff """ from .do import do from .api3 import API as api3 from .api3 import JWT as jwt from .sub import blueprint as subs
py
1a39eafa45528d91971755bbf825ff6496015779
from django.contrib import admin from .models import Listing class ListingAdmin(admin.ModelAdmin): list_display = ('id', 'title', 'price', 'is_published', 'list_date', 'realtor') list_display_links = ('id', 'title') list_filter = ('realtor',) list_editable = ('is_published',) search_fields = ('title', 'description', 'address', 'city', 'state', 'zipcode', 'price') list_per_page = 25 admin.site.register(Listing, ListingAdmin)
py
1a39eb548b61d6fbc6303d2827c9c2d9705df980
import math file = open('day-5.input') result = 0 # F, ==> lower half ==> [min, math.floor((max - min) / 2)] # B,R ==> upper half def get_row(expression): min = 0 max = 127 for i in range(7): selector = expression[i] if selector == 'F': max = math.floor((max+min)/ 2) elif selector == 'B': min = math.ceil((max + min) / 2) return max def get_column(expression): min = 0 max = 7 for i in range(3): selector = expression[i] if selector == 'L': max = math.floor((max+min)/ 2) elif selector == 'R': min = math.ceil((max + min) / 2) return max def get_seat_id(seat_row, seat_column): return 8 * seat_row + seat_column all_seats =[] min_found_seat_row = 127 max_found_seat_row = 0 for row in range(1,126): #exclude very front and very back rows for column in range(8): all_seats.append((row, column)) for line in file: line = line.strip() seat_row = get_row(line[0:7]) if seat_row < min_found_seat_row: min_found_seat_row = seat_row if seat_row > max_found_seat_row: max_found_seat_row = seat_row seat_column = get_column(line[-3:]) all_seats.remove((seat_row, seat_column)) for (row, column) in all_seats[:]: if row < min_found_seat_row + 1: all_seats.remove((row, column)) if row > max_found_seat_row - 1: all_seats.remove((row, column)) print(min_found_seat_row) print(max_found_seat_row) found_seat = all_seats[0] result = get_seat_id(found_seat[0], found_seat[1]) print(result)
py
1a39eb72cb6ccf2f02e7e6aa60c05ea03196f4dd
# coding: utf-8 """ NiFi Rest Api The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. OpenAPI spec version: 1.11.1-SNAPSHOT Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class PositionDTO(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'x': 'float', 'y': 'float' } attribute_map = { 'x': 'x', 'y': 'y' } def __init__(self, x=None, y=None): """ PositionDTO - a model defined in Swagger """ self._x = None self._y = None if x is not None: self.x = x if y is not None: self.y = y @property def x(self): """ Gets the x of this PositionDTO. The x coordinate. :return: The x of this PositionDTO. :rtype: float """ return self._x @x.setter def x(self, x): """ Sets the x of this PositionDTO. The x coordinate. :param x: The x of this PositionDTO. :type: float """ self._x = x @property def y(self): """ Gets the y of this PositionDTO. The y coordinate. :return: The y of this PositionDTO. :rtype: float """ return self._y @y.setter def y(self, y): """ Sets the y of this PositionDTO. The y coordinate. :param y: The y of this PositionDTO. :type: float """ self._y = y def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, PositionDTO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
py
1a39eba12b1e4088a4b3b81fcd9aee9902347b20
#!/usr/bin/env python3 #------------------------------------------------------------------------------- # ============LICENSE_START======================================================= # Copyright (C) 2018 Sven van der Meer. All rights reserved. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 # ============LICENSE_END========================================================= #------------------------------------------------------------------------------- ## ## acronyms-val - validates YAML files of SKB acronyms ## ## @author Sven van der Meer <[email protected]> ## @version v0.0.0 ## ## ## Includes, all we need ## import yaml ## parsing YAML files import os ## operating system, e.g. file handling from os import walk ## for walking directories import functools ## some tools for functions import sys, getopt ## system for exit, getopt for CLI parsing import glob ## gobal globbing to get YAML files recursively import pathlib ## mkdirs in Python import datetime ## to get date/time for ADOC files ## ## Global variables ## task_level = "warn" ## warning level yaml_dir = '' ## YAML directory acronyms = {} ## dictionary of acronyms ## ## DO NOT CHANGE CODE BELOW, unless you know what you are doing ## ## ## function: print help, for empty or wrong command line ## def help(): print("") print("acronyms-val - validates YAML files of SKB acronyms\n") print(" Usage: acronyms-val [options]\n") print(" Options") print(" [-h | --help] - this help screen") print(" [-T | --task-level] <level> - task log level: error, warn, warn-strict, info, debug, trace") print(" [-y | --yaml-directory] <dir> - top YAML directory") print("\n") ## ## function: parse command line ## def cli(argv): global yaml_dir global task_level try: opts, args = getopt.getopt(argv,"hT:y:",["yaml-directory=","help","task-level="]) except getopt.GetoptError: help() sys.exit(70) for opt, arg in opts: if opt in ("-h", "--help"): help() sys.exit(0) elif opt in ("-T", "--task-level"): task_level = arg elif opt in ("-y", "--yaml-directory"): yaml_dir = arg ## ## function: validates a single YAML file ## def validate_file(file, entries, key): ## check for required keys found_keys = True expected_keys = ( 'short' , 'short-target', 'long', 'long-target', 'description', 'notes', 'urls') errors = "" if not 'short' in entries: errors += " --> did not find key 'short'\n" found_keys = False else: if len(entries['short']) == 0: errors += " --> key 'short' with no entry\n" found_keys = False if not 'long' in entries: errors += " --> did not find key 'long'\n" found_keys = False else: if len(entries['long']) == 0: errors += " --> key 'long' with no entry\n" found_keys = False if 'long-target' in entries and len(entries['long-target']) == 0: errors += " --> key 'long-target' with no entry\n" found_keys = False if 'description' in entries and len(entries['description']) == 0: errors += " --> key 'description' with no entry\n" found_keys = False if 'notes' in entries and len(entries['notes']) == 0: errors += " --> key 'notes' with no entry\n" found_keys = False if 'urls' in entries and len(entries['urls']) == 0: errors += " --> key 'urls' with no entry\n" found_keys = False if not all(elem in expected_keys for elem in entries): errors += " --> unknown key\n" found_keys = False file_short = file[len(yaml_dir)+1:] dir_short = file_short.rsplit('/', 1)[0] key_short = key.rsplit('/', 1)[0] if not key_short == dir_short: errors += " --> something wrong in key path (" + key_short + ") and directory (" + dir_short + ")\n" found_keys = False if found_keys == False: print(" -> validation failed") print("%s" % errors) sys.exit(80) ## ## function: process a single YAML file ## def process_file(file): file_exists = os.path.isfile(file) if file_exists == True: stream = open(file,'r') data = yaml.load(stream) stream.close() entries = data[list(data.keys())[0]] ## dictionary with all entries key = list(data.keys())[0] ## key name of the YAML spec validate_file(file, entries, key) if not key in acronyms: entries['src-file'] = file acronyms[key] = entries else: print(" -> key %s already in dictionary, defined in %s" % (key, acronyms[key]['src-file'])) sys.exit(80) else: print("error: could not open file: %s" % file) sys.exit(72) ## ## function: main function ## def main(argv): cli(argv) print(" > YAML directory: %s" % yaml_dir) dir_exists = os.path.isdir(yaml_dir) if dir_exists == True: files = glob.glob(yaml_dir + '/**/*.yaml', recursive=True) for file in files: print("\n > processing: .../%s" % file[len(yaml_dir)+1:]) process_file(file) print("\n > processed %d YAML files, found %d acronyms" % (len(files), len(acronyms))) else: print("error: could not open YAML directory: %s" % yaml_dir) sys.exit(71) ## ## Call main ## if __name__ == "__main__": main(sys.argv[1:]) print(" > done")
py
1a39eba6c9dd2c862004673052d25edec6e29ad2
# Generated by Django 4.0 on 2022-01-11 17:28 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('account', '0006_alter_business_options'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(blank=True, null=True)), ('updated_on', models.DateTimeField(blank=True, null=True)), ('created_by', models.CharField(blank=True, max_length=255, null=True)), ('updated_by', models.CharField(blank=True, max_length=255, null=True)), ('total_amount', models.DecimalField(decimal_places=2, default=0, max_digits=9)), ('net_amount', models.DecimalField(decimal_places=2, default=0, max_digits=9)), ('paid_amount', models.DecimalField(decimal_places=2, default=0, max_digits=9)), ('discount', models.DecimalField(decimal_places=1, default=0, max_digits=4)), ('delivery_date', models.DateTimeField()), ('order_status', models.CharField(default='Not yet started', max_length=64)), ('comments', models.TextField(blank=True, default='', max_length=1024)), ('is_one_time_delivery', models.BooleanField(default=True)), ('is_deleted', models.BooleanField(default=False)), ('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='account.user')), ], options={ 'ordering': ('-created_on',), }, ), migrations.CreateModel( name='OrderItem', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(blank=True, null=True)), ('updated_on', models.DateTimeField(blank=True, null=True)), ('created_by', models.CharField(blank=True, max_length=255, null=True)), ('updated_by', models.CharField(blank=True, max_length=255, null=True)), ('item_type', models.CharField(max_length=255)), ('item_price', models.DecimalField(decimal_places=2, default=0, max_digits=7)), ('quantity', models.IntegerField(default=1)), ('status', models.CharField(default='Not yet started', max_length=64)), ('delivery_date', models.DateTimeField()), ('comments', models.TextField(blank=True, default='', max_length=512)), ('is_deleted', models.BooleanField(default=False)), ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', related_query_name='order_item', to='order.order')), ], options={ 'ordering': ('-id',), }, ), ]
py
1a39ec048da7f24aa9147424d449ace62f3d8826
""" The Sponge Roll Problem with Columnwise Column Generation for the PuLP Modeller Authors: Antony Phillips, Dr Stuart Mitchell 2008 """ # Import Column Generation functions from CGcolumnwise import * # The Master Problem is created prob, obj, constraints = createMaster() # A list of starting patterns is created newPatterns = [[1,0,0],[0,1,0],[0,0,1]] # New patterns will be added until newPatterns is an empty list while newPatterns: # The new patterns are added to the problem addPatterns(obj,constraints,newPatterns) # The master problem is solved, and the dual variables are returned duals = masterSolve(prob) # The sub problem is solved and a new pattern will be returned if there is one # which can reduce the master objective function newPatterns = subSolve(duals) # The master problem is solved with Integer Constraints not relaxed solution, varsdict = masterSolve(prob,relax = False) # Display Solution for i,j in list(varsdict.items()): print(i, "=", j) print("objective = ", solution)
py
1a39ec7e2c003f2805f3a3931a3ca6337af5393a
# Generated by Django 2.2.9 on 2021-11-05 01:51 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('posts', '0005_auto_20211104_0012'), ] operations = [ migrations.AlterModelOptions( name='group', options={'ordering': ['title']}, ), migrations.AlterModelOptions( name='post', options={'ordering': ['-pub_date', 'author']}, ), migrations.AlterField( model_name='group', name='description', field=models.TextField(blank=True, default='', verbose_name='Описание'), ), migrations.AlterField( model_name='group', name='slug', field=models.SlugField(max_length=200, unique=True, verbose_name='Подзаголовок'), ), migrations.AlterField( model_name='group', name='title', field=models.CharField(max_length=200, verbose_name='Заголовок'), ), migrations.AlterField( model_name='post', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL, verbose_name='Автор'), ), migrations.AlterField( model_name='post', name='group', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='groups', to='posts.Group', verbose_name='Группа'), ), migrations.AlterField( model_name='post', name='pub_date', field=models.DateTimeField(auto_now_add=True, verbose_name='Дата'), ), migrations.AlterField( model_name='post', name='text', field=models.TextField(verbose_name='Текст'), ), migrations.AddIndex( model_name='group', index=models.Index(fields=['title'], name='title_idx'), ), migrations.AddIndex( model_name='post', index=models.Index(fields=['author'], name='author_idx'), ), migrations.AddIndex( model_name='post', index=models.Index(fields=['text'], name='search_text_idx'), ), migrations.AlterModelTable( name='group', table='groups', ), migrations.AlterModelTable( name='post', table='posts', ), ]
py
1a39ed0b29a68325fe667fffcf8ba3dc488a5978
from django.conf.urls import url, include from front_end.views.user import sign_in, sign_up, settings from front_end.views.user import training_information from front_end.views.user import search_user from front_end.views.user import following_followers from front_end.views.user import user_group urlpatterns = [ url(r'^login/', sign_in, name='sign_in'), url(r'^register/', sign_up, name='sign_up'), url(r'^settings/(\S+)/', settings, name='user_settings'), url(r'^info/(\S+)/', training_information, name='user_training_info'), url(r'^follow/(\S+)/', following_followers, name='following_followers'), url(r'^group/(\S+)/', user_group, name='user_group'), url(r'^search/(\S+)/(\d+)/(\d+)/', search_user, name='search_user'), url(r'^submissions/', include('front_end.url.submissions')), url(r'^problems/', include('front_end.url.problems')), url(r'^categories/', include('front_end.url.categories')), ]
py
1a39ee4b48491a7784db9b304b583e188e934161
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. import re # noqa: F401 import sys # noqa: F401 from datadog_api_client.v2.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) class LogsListRequestPage(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = { ("limit",): { "inclusive_maximum": 1000, }, } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "cursor": (str,), # noqa: E501 "limit": (int,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { "cursor": "cursor", # noqa: E501 "limit": "limit", # noqa: E501 } _composed_schemas = {} required_properties = set( [ "_data_store", "_check_type", "_spec_property_naming", "_path_to_item", "_configuration", "_visited_composed_classes", ] ) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """LogsListRequestPage - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) cursor (str): List following results with a cursor provided in the previous query.. [optional] # noqa: E501 limit (int): Maximum number of logs in the response.. [optional] if omitted the server will use the default value of 10 # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) _path_to_item = kwargs.pop("_path_to_item", ()) _configuration = kwargs.pop("_configuration", None) _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
py
1a39ef7fc27b09c1cb763099bbe711822a642bc1
# Copyright 2021, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file # This modules disables the Pytype analyzer, see # https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more # information. """A set of utility methods for serializing Value protos using pybind11 bindings.""" import collections import os import os.path import tempfile from typing import Any, Collection, List, Mapping, Optional, Sequence, Tuple, Union import warnings import zipfile import numpy as np import tensorflow as tf from tensorflow_federated.proto.v0 import computation_pb2 from tensorflow_federated.proto.v0 import executor_pb2 from tensorflow_federated.python.common_libs import py_typecheck from tensorflow_federated.python.common_libs import structure from tensorflow_federated.python.common_libs import tracing from tensorflow_federated.python.core.impl.computation import computation_impl from tensorflow_federated.python.core.impl.executors import executor_bindings from tensorflow_federated.python.core.impl.executors import executor_utils from tensorflow_federated.python.core.impl.types import computation_types from tensorflow_federated.python.core.impl.types import placements from tensorflow_federated.python.core.impl.types import type_analysis from tensorflow_federated.python.core.impl.types import type_conversions from tensorflow_federated.python.core.impl.types import type_serialization from tensorflow_federated.python.core.impl.types import type_transformations from tensorflow_federated.python.core.impl.utils import tensorflow_utils _SerializeReturnType = Tuple[executor_pb2.Value, computation_types.Type] _DeserializeReturnType = Tuple[Any, computation_types.Type] # The maximum size allowed for serialized sequence values. Sequence that # serialize to values larger than this will result in errors being raised. This # likely occurs when the sequence is dependent on, and thus pulling in, many of # variables from the graph. _DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES = 20 * (1024**2) # 20 MB class DatasetSerializationError(Exception): """Error raised during Dataset serialization or deserialization.""" pass @tracing.trace def _serialize_computation( comp: computation_pb2.Computation, type_spec: Optional[computation_types.Type]) -> _SerializeReturnType: """Serializes a TFF computation.""" type_spec = executor_utils.reconcile_value_type_with_type_spec( type_serialization.deserialize_type(comp.type), type_spec) return executor_pb2.Value(computation=comp), type_spec @tracing.trace def _serialize_tensor_value( value: Any, type_spec: computation_types.TensorType ) -> Tuple[executor_pb2.Value, computation_types.TensorType]: """Serializes a tensor value into `executor_pb2.Value`. Args: value: A Numpy array or other object understood by `tf.make_tensor_proto`. type_spec: A `tff.TensorType`. Returns: A tuple `(value_proto, ret_type_spec)` in which `value_proto` is an instance of `executor_pb2.Value` with the serialized content of `value`, and `ret_type_spec` is the type of the serialized value. The `ret_type_spec` is the same as the argument `type_spec` if that argument was not `None`. If the argument was `None`, `ret_type_spec` is a type determined from `value`. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ original_value = value if tf.is_tensor(value): if isinstance(value, tf.Variable): value = value.read_value() if tf.executing_eagerly(): value = value.numpy() else: # Attempt to extract the value using the current graph context. with tf.compat.v1.Session() as sess: value = sess.run(value) # If we got a string or bytes scalar, wrap it in numpy so it has a dtype and # shape. if isinstance(value, bytes): value = np.bytes_(value) elif isinstance(value, str): value = np.str_(value) else: value = np.asarray(value) if not tf.TensorShape(value.shape).is_compatible_with(type_spec.shape): raise TypeError(f'Cannot serialize tensor with shape {value.shape} to ' f'shape {type_spec.shape}.') if value.dtype != type_spec.dtype.as_numpy_dtype: try: value = value.astype(type_spec.dtype.as_numpy_dtype, casting='same_kind') except TypeError as te: value_type_string = py_typecheck.type_string(type(original_value)) raise TypeError( f'Failed to serialize value of Python type {value_type_string} to ' f'a tensor of type {type_spec}.\nValue: {original_value}') from te return executor_bindings.serialize_tensor_value(value), type_spec def _serialize_dataset( dataset, max_serialized_size_bytes=_DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES): """Serializes a `tf.data.Dataset` value into a `bytes` object. Args: dataset: A `tf.data.Dataset`. max_serialized_size_bytes: An `int` size in bytes designating the threshold on when to raise an error if the resulting serialization is too big. Returns: A `bytes` object that can be sent to `tensorflow_serialization.deserialize_dataset` to recover the original `tf.data.Dataset`. Raises: SerializationError: if there was an error in TensorFlow during serialization. """ py_typecheck.check_type(dataset, type_conversions.TF_DATASET_REPRESENTATION_TYPES) dataset_graph = tf.raw_ops.DatasetToGraphV2( input_dataset=tf.data.experimental.to_variant(dataset)) if tf.executing_eagerly(): dataset_graph_def_bytes = dataset_graph.numpy() else: dataset_graph_def_bytes = tf.compat.v1.Session().run(dataset_graph) if len(dataset_graph_def_bytes) > max_serialized_size_bytes: raise ValueError('Serialized size of Dataset ({:d} bytes) exceeds maximum ' 'allowed ({:d} bytes)'.format( len(dataset_graph_def_bytes), max_serialized_size_bytes)) return dataset_graph_def_bytes def _check_container_compat_with_tf_nest(type_spec: computation_types.Type): """Asserts that all `StructTypes` with names have OrderedDict containers.""" def _names_are_in_sorted_order(name_sequence: Sequence[str]) -> bool: return sorted(name_sequence) == name_sequence def _check_ordereddict_container_for_struct(type_to_check): if not type_to_check.is_struct(): return type_to_check, False # We can't use `dir` here, since it sorts the names before returning. We # also must filter to names which are actually present. names_in_sequence_order = structure.name_list(type_to_check) names_are_sorted = _names_are_in_sorted_order(names_in_sequence_order) has_no_names = not bool(names_in_sequence_order) if has_no_names or (names_in_sequence_order and names_are_sorted): # If alphabetical order matches sequence order, TFF's deserialization will # traverse the structure correctly; there is no ambiguity here. On the # other hand, if there are no names, sequence order is the only method of # traversal, so there is no ambiguity here either. return type_to_check, False elif not type_to_check.is_struct_with_python(): raise ValueError('Attempting to serialize a named struct type with ' 'ambiguous traversal order (sequence order distinct ' 'from alphabetical order) without a Python container; ' 'this is an unsafe operation, as TFF cannot determine ' 'the intended traversal order after deserializing the ' 'proto due to inconsistent behavior of tf.nest.') container_type = computation_types.StructWithPythonType.get_container_type( type_to_check) if (not names_are_sorted) and container_type is not collections.OrderedDict: raise ValueError('Attempted to serialize a dataset yielding named ' 'elements in non-sorted sequence order with ' f'non-OrderedDict container (type {container_type}). ' 'This is an ambiguous operation; `tf.nest` behaves in ' 'a manner which depends on the Python type of this ' 'container, so coercing the dataset reconstructed ' 'from the resulting Value proto depends on assuming a ' 'single Python type here. Please prefer to use ' '`collections.OrderedDict` containers for the elements ' 'your dataset yields.') return type_to_check, False type_transformations.transform_type_postorder( type_spec, _check_ordereddict_container_for_struct) @tracing.trace def _serialize_sequence_value( value: Union[Union[type_conversions.TF_DATASET_REPRESENTATION_TYPES], List[Any]], type_spec: computation_types.SequenceType ) -> computation_types.SequenceType: """Serializes a `tf.data.Dataset` value into `executor_pb2.Value`. Args: value: A `tf.data.Dataset`, or equivalent list of values convertible to (potentially structures of) tensors. type_spec: A `computation_types.Type` specifying the TFF sequence type of `value.` Returns: A tuple `(value_proto, type_spec)` in which `value_proto` is an instance of `executor_pb2.Value` with the serialized content of `value`, and `type_spec` is the type of the serialized value. """ if isinstance(value, list): value = tensorflow_utils.make_data_set_from_elements( None, value, type_spec.element) if not isinstance(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES): raise TypeError( 'Cannot serialize Python type {!s} as TFF type {!s}.'.format( py_typecheck.type_string(type(value)), type_spec if type_spec is not None else 'unknown')) element_type = computation_types.to_type(value.element_spec) _check_container_compat_with_tf_nest(element_type) value_type = computation_types.SequenceType(element_type) if not type_spec.is_assignable_from(value_type): raise TypeError( 'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.' .format(value_type, type_spec if type_spec is not None else 'unknown')) value_proto = executor_pb2.Value() # TFF must store the type spec here because TF will lose the ordering of the # names for `tf.data.Dataset` that return elements of # `collections.abc.Mapping` type. This allows TFF to preserve and restore the # key ordering upon deserialization. value_proto.sequence.serialized_graph_def = _serialize_dataset(value) value_proto.sequence.element_type.CopyFrom( type_serialization.serialize_type(element_type)) return value_proto, type_spec @tracing.trace def _serialize_struct_type( struct_typed_value: Any, type_spec: computation_types.StructType, ) -> computation_types.StructType: """Serializes a value of tuple type.""" value_structure = structure.from_container(struct_typed_value) if len(value_structure) != len(type_spec): raise TypeError('Cannot serialize a struct value of ' f'{len(value_structure)} elements to a struct type ' f'requiring {len(type_spec)} elements. Trying to serialize' f'\n{struct_typed_value!r}\nto\n{type_spec}.') type_elem_iter = structure.iter_elements(type_spec) val_elem_iter = structure.iter_elements(value_structure) elements = [] for (e_name, e_type), (_, e_val) in zip(type_elem_iter, val_elem_iter): e_value, _ = serialize_value(e_val, e_type) if e_name: element = executor_pb2.Value.Struct.Element(name=e_name, value=e_value) else: element = executor_pb2.Value.Struct.Element(value=e_value) elements.append(element) value_proto = executor_pb2.Value( struct=executor_pb2.Value.Struct(element=elements)) return value_proto, type_spec @tracing.trace def _serialize_federated_value( federated_value: Any, type_spec: computation_types.FederatedType ) -> computation_types.FederatedType: """Serializes a value of federated type.""" if type_spec.all_equal: value = [federated_value] else: value = federated_value py_typecheck.check_type(value, list) value_proto = executor_pb2.Value() for v in value: federated_value_proto, it_type = serialize_value(v, type_spec.member) type_spec.member.check_assignable_from(it_type) value_proto.federated.value.append(federated_value_proto) value_proto.federated.type.CopyFrom( type_serialization.serialize_type(type_spec).federated) return value_proto, type_spec @tracing.trace def serialize_value( value: Any, type_spec: Optional[computation_types.Type] = None, ) -> _SerializeReturnType: """Serializes a value into `executor_pb2.Value`. We use a switch/function pattern in the body here (and in `deserialize_value` below in order to persist more information in traces and profiling. Args: value: A value to be serialized. type_spec: Optional type spec, a `tff.Type` or something convertible to it. Returns: A 2-tuple of serialized value and `tff.Type` that represents the TFF type of the serialized value. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ type_spec = computation_types.to_type(type_spec) if isinstance(value, computation_pb2.Computation): return _serialize_computation(value, type_spec) elif isinstance(value, computation_impl.ConcreteComputation): return _serialize_computation( computation_impl.ConcreteComputation.get_proto(value), executor_utils.reconcile_value_with_type_spec(value, type_spec)) elif type_spec is None: raise TypeError('A type hint is required when serializing a value which ' 'is not a TFF computation. Asked to serialized value {v} ' ' of type {t} with None type spec.'.format( v=value, t=type(value))) elif type_spec.is_tensor(): return _serialize_tensor_value(value, type_spec) elif type_spec.is_sequence(): return _serialize_sequence_value(value, type_spec) elif type_spec.is_struct(): return _serialize_struct_type(value, type_spec) elif type_spec.is_federated(): return _serialize_federated_value(value, type_spec) else: raise ValueError( 'Unable to serialize value with Python type {} and {} TFF type.'.format( str(py_typecheck.type_string(type(value))), str(type_spec) if type_spec is not None else 'unknown')) @tracing.trace def _deserialize_computation( value_proto: executor_pb2.Value) -> _DeserializeReturnType: """Deserializes a TFF computation.""" return (value_proto.computation, type_serialization.deserialize_type(value_proto.computation.type)) @tracing.trace def _deserialize_tensor_value( value_proto: executor_pb2.Value) -> _DeserializeReturnType: """Deserializes a tensor value from `.Value`. Args: value_proto: An instance of `executor_pb2.Value`. Returns: A tuple `(value, type_spec)`, where `value` is a Numpy array that represents the deserialized value, and `type_spec` is an instance of `tff.TensorType` that represents its type. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ value = executor_bindings.deserialize_tensor_value(value_proto) value_type = computation_types.TensorType( dtype=value.dtype, shape=value.shape) if not value.shape: # Unwrap the scalar array as just a primitive numeric. value = value.dtype.type(value) return value, value_type def _deserialize_dataset_from_zipped_saved_model(serialized_bytes): """Deserializes a zipped SavedModel `bytes` object to a `tf.data.Dataset`. DEPRECATED: this method is deprecated and replaced by `_deserialize_dataset_from_graph_def`. Args: serialized_bytes: `bytes` object produced by older versions of `tensorflow_serialization.serialize_dataset` that produced zipped SavedModel `bytes` strings. Returns: A `tf.data.Dataset` instance. Raises: SerializationError: if there was an error in TensorFlow during serialization. """ py_typecheck.check_type(serialized_bytes, bytes) temp_dir = tempfile.mkdtemp('dataset') fd, temp_zip = tempfile.mkstemp('zip') os.close(fd) try: with open(temp_zip, 'wb') as f: f.write(serialized_bytes) with zipfile.ZipFile(temp_zip, 'r') as z: z.extractall(path=temp_dir) loaded = tf.saved_model.load(temp_dir) # TODO(b/156302055): Follow up here when bug is resolved, either remove # if this function call stops failing by default, or leave if this is # working as intended. with tf.device('cpu'): ds = loaded.dataset_fn() except Exception as e: # pylint: disable=broad-except raise DatasetSerializationError( 'Error deserializing tff.Sequence value. Inner error: {!s}'.format( e)) from e finally: tf.io.gfile.rmtree(temp_dir) tf.io.gfile.remove(temp_zip) return ds def _deserialize_dataset_from_graph_def(serialized_graph_def: bytes, element_type: computation_types.Type): """Deserializes a serialized `tf.compat.v1.GraphDef` to a `tf.data.Dataset`. Args: serialized_graph_def: `bytes` object produced by `tensorflow_serialization.serialize_dataset` element_type: a `tff.Type` object representing the type structure of the elements yielded from the dataset. Returns: A `tf.data.Dataset` instance. """ py_typecheck.check_type(element_type, computation_types.Type) type_analysis.check_tensorflow_compatible_type(element_type) def transform_to_tff_known_type( type_spec: computation_types.Type) -> Tuple[computation_types.Type, bool]: """Transforms `StructType` to `StructWithPythonType`.""" if type_spec.is_struct() and not type_spec.is_struct_with_python(): field_is_named = tuple( name is not None for name, _ in structure.iter_elements(type_spec)) has_names = any(field_is_named) is_all_named = all(field_is_named) if is_all_named: return computation_types.StructWithPythonType( elements=structure.iter_elements(type_spec), container_type=collections.OrderedDict), True elif not has_names: return computation_types.StructWithPythonType( elements=structure.iter_elements(type_spec), container_type=tuple), True else: raise TypeError('Cannot represent TFF type in TF because it contains ' f'partially named structures. Type: {type_spec}') return type_spec, False if element_type.is_struct(): # TF doesn't support `structure.Struct` types, so we must transform the # `StructType` into a `StructWithPythonType` for use as the # `tf.data.Dataset.element_spec` later. tf_compatible_type, _ = type_transformations.transform_type_postorder( element_type, transform_to_tff_known_type) else: # We've checked this is only a struct or tensors, so we know this is a # `TensorType` here and will use as-is. tf_compatible_type = element_type def type_to_tensorspec(t: computation_types.TensorType) -> tf.TensorSpec: return tf.TensorSpec(shape=t.shape, dtype=t.dtype) element_spec = type_conversions.structure_from_tensor_type_tree( type_to_tensorspec, tf_compatible_type) ds = tf.data.experimental.from_variant( tf.raw_ops.DatasetFromGraph(graph_def=serialized_graph_def), structure=element_spec) # If a serialized dataset had elements of nested structes of tensors (e.g. # `dict`, `OrderedDict`), the deserialized dataset will return `dict`, # `tuple`, or `namedtuple` (loses `collections.OrderedDict` in a conversion). # # Since the dataset will only be used inside TFF, we wrap the dictionary # coming from TF in an `OrderedDict` when necessary (a type that both TF and # TFF understand), using the field order stored in the TFF type stored during # serialization. return tensorflow_utils.coerce_dataset_elements_to_tff_type_spec( ds, tf_compatible_type) @tracing.trace def _deserialize_sequence_value( sequence_value_proto: executor_pb2.Value.Sequence, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a `tf.data.Dataset`. Args: sequence_value_proto: `Sequence` protocol buffer message. type_hint: A `computation_types.Type` that hints at what the value type should be for executors that only return values. If the `sequence_value_proto.element_type` field was not set, the `type_hint` is used instead. Returns: A tuple of `(tf.data.Dataset, tff.Type)`. """ if sequence_value_proto.HasField('element_type'): element_type = type_serialization.deserialize_type( sequence_value_proto.element_type) elif type_hint is not None: element_type = type_hint.element else: raise ValueError( 'Cannot deserialize a sequence Value proto that without one of ' '`element_type` proto field or `element_type_hint`') which_value = sequence_value_proto.WhichOneof('value') if which_value == 'zipped_saved_model': warnings.warn( 'Deserializng a sequence value that was encoded as a zipped SavedModel.' ' This is a deprecated path, please update the binary that is ' 'serializing the sequences.', DeprecationWarning) ds = _deserialize_dataset_from_zipped_saved_model( sequence_value_proto.zipped_saved_model) ds = tensorflow_utils.coerce_dataset_elements_to_tff_type_spec( ds, element_type) elif which_value == 'serialized_graph_def': ds = _deserialize_dataset_from_graph_def( sequence_value_proto.serialized_graph_def, element_type) else: raise NotImplementedError( 'Deserializing Sequences enocded as {!s} has not been implemented' .format(which_value)) return ds, computation_types.SequenceType(element=element_type) @tracing.trace def _deserialize_struct_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value of struct type.""" val_elems = [] type_elems = [] if type_hint is not None: element_types = tuple(type_hint) else: element_types = [None] * len(value_proto.struct.element) for e, e_type in zip(value_proto.struct.element, element_types): name = e.name if e.name else None e_val, e_type = deserialize_value(e.value, e_type) val_elems.append((name, e_val)) type_elems.append((name, e_type) if name else e_type) return (structure.Struct(val_elems), computation_types.StructType(type_elems)) def _ensure_deserialized_types_compatible( previous_type: Optional[computation_types.Type], next_type: computation_types.Type) -> computation_types.Type: """Ensures one of `previous_type` or `next_type` is assignable to the other. Returns the type which is assignable from the other. Args: previous_type: Instance of `computation_types.Type` or `None`. next_type: Instance of `computation_types.Type`. Returns: The supertype of `previous_type` and `next_type`. Raises: TypeError if neither type is assignable from the other. """ if previous_type is None: return next_type else: if next_type.is_assignable_from(previous_type): return next_type elif previous_type.is_assignable_from(next_type): return previous_type raise TypeError('Type mismatch checking member assignability under a ' 'federated value. Deserialized type {} is incompatible ' 'with previously deserialized {}.'.format( next_type, previous_type)) @tracing.trace def _deserialize_federated_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value of federated type.""" if not value_proto.federated.value: raise ValueError('Attempting to deserialize federated value with no data.') # The C++ runtime doesn't use the `all_equal` boolean (and doesn't report it # in returned values), however the type_hint on the computation may contain # it. if type_hint is not None: all_equal = type_hint.all_equal else: all_equal = value_proto.federated.type.all_equal placement_uri = value_proto.federated.type.placement.value.uri # item_type will represent a supertype of all deserialized member types in the # federated value. This will be the hint used for deserialize member values. if type_hint is not None: item_type_hint = type_hint.member else: item_type_hint = None item_type = None if all_equal: # As an optimization, we only deserialize the first value of an # `all_equal=True` federated value. items = [value_proto.federated.value[0]] else: items = value_proto.federated.value value = [] for item in items: item_value, next_item_type = deserialize_value(item, item_type_hint) item_type = _ensure_deserialized_types_compatible(item_type, next_item_type) value.append(item_value) type_spec = computation_types.FederatedType( item_type, placement=placements.uri_to_placement_literal(placement_uri), all_equal=all_equal) if all_equal: value = value[0] return value, type_spec @tracing.trace def deserialize_value( value_proto: executor_pb2.Value, type_hint: Optional[computation_types.Type] = None ) -> _DeserializeReturnType: """Deserializes a value (of any type) from `executor_pb2.Value`. Args: value_proto: An instance of `executor_pb2.Value`. type_hint: A `comptuations_types.Type` that hints at what the value type should be for executors that only return values. Returns: A tuple `(value, type_spec)`, where `value` is a deserialized representation of the transmitted value (e.g., Numpy array, or a `pb.Computation` instance), and `type_spec` is an instance of `tff.TensorType` that represents its type. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the value is malformed. """ if not hasattr(value_proto, 'WhichOneof'): raise TypeError('`value_proto` must be a protocol buffer message with a ' '`value` oneof field.') which_value = value_proto.WhichOneof('value') if which_value == 'tensor': return _deserialize_tensor_value(value_proto) elif which_value == 'computation': return _deserialize_computation(value_proto) elif which_value == 'sequence': return _deserialize_sequence_value(value_proto.sequence, type_hint) elif which_value == 'struct': return _deserialize_struct_value(value_proto, type_hint) elif which_value == 'federated': return _deserialize_federated_value(value_proto, type_hint) else: raise ValueError( 'Unable to deserialize a value of type {}.'.format(which_value)) CardinalitiesType = Mapping[placements.PlacementLiteral, int] def serialize_cardinalities( cardinalities: CardinalitiesType) -> List[executor_pb2.Cardinality]: serialized_cardinalities = [] for placement, cardinality in cardinalities.items(): cardinality_message = executor_pb2.Cardinality( placement=computation_pb2.Placement(uri=placement.uri), cardinality=cardinality) serialized_cardinalities.append(cardinality_message) return serialized_cardinalities def deserialize_cardinalities( serialized_cardinalities: Collection[executor_pb2.Cardinality] ) -> CardinalitiesType: cardinalities_dict = {} for cardinality_spec in serialized_cardinalities: literal = placements.uri_to_placement_literal( cardinality_spec.placement.uri) cardinalities_dict[literal] = cardinality_spec.cardinality return cardinalities_dict
py
1a39efc164cd0e339f9d879a1bf7c35098b436e4
#!/usr/bin/env python3 """ Utility functions for testing. """ import copy import numpy as np SEED = 42 def round_dict(d, precision=3): """Round all numerical values in a dictionary recursively.""" d = copy.deepcopy(d) if isinstance(d, dict): for k, v in d.items(): try: d[k] = round(v, precision) except TypeError: d[k] = round_dict(v) return d elif isinstance(d, list): return [round_dict(v) for v in d] elif isinstance(d, tuple): return tuple([round_dict(v) for v in d]) return d def random_real_series(x, add_null=False, limit_from=0, limit_to=5, seed=SEED): np.random.seed(seed) s = np.random.normal(x['mean'], x['std'], size=limit_to) s = np.minimum(np.maximum(s, x['minValue']), x['maxValue']) if add_null and len(s): s[np.random.choice(limit_to)] = None return list(s)[limit_from:] def random_integer_series(x, **kwargs): s = random_real_series(x, **kwargs) return [int(e) if e is not None else None for e in s] def random_nominal_series(x, add_null=False, limit_from=0, limit_to=5, seed=SEED): np.random.seed(seed) s = np.random.choice(x['type']['enumeration'], size=limit_to) if add_null and len(s): s[np.random.choice(limit_to)] = None return list(s)[limit_from:] def independent(include_real=True, include_integer=True, include_nominal=False, **kwargs): if 'add_independent_null' in kwargs: kwargs['add_null'] = kwargs.pop('add_independent_null') ret = [] if include_real: x = { 'name': 'subjectage', 'type': { 'name': 'real' }, 'series': [], 'mean': 70.4, 'std': 8.3, 'minValue': 30., 'maxValue': 90., 'label': 'Exact age' } x['series'] = random_real_series(x, seed=1, **kwargs) ret.append(x) if include_integer: x = { 'name': 'minimentalstate', 'type': { 'name': 'integer' }, 'series': [], 'mean': 24.4, 'std': 5.2, 'minValue': 0, 'maxValue': 30, 'label': 'MMSE Total scores' } x['series'] = random_integer_series(x, seed=2, **kwargs) ret.append(x) if include_nominal: x = { 'name': 'agegroup', 'type': { 'name': 'polynominal', 'enumeration': ['-50y', '50-59y'] }, 'label': 'Age Group', 'series': [] } x['series'] = random_nominal_series(x, seed=3, **kwargs) ret.append(x) return ret def inputs_regression(add_null=False, limit_from=0, limit_to=5, **kwargs): x = { 'name': 'lefthippocampus', 'label': 'Left Hippocampus', 'type': { 'name': 'real' }, 'series': [], 'mean': 3., 'std': 0.39, 'minValue': 1., 'maxValue': 5., } x['series'] = random_real_series(x, seed=4, add_null=add_null, limit_from=limit_from, limit_to=limit_to) return { 'data': { 'dependent': [x], 'independent': independent(limit_from=limit_from, limit_to=limit_to, **kwargs) }, 'parameters': [] } def inputs_classification(add_null=False, limit_from=0, limit_to=5, **kwargs): x = { 'name': 'adnicategory', 'label': 'ADNI category', 'type': { 'name': 'polynominal', 'enumeration': ['AD', 'CN', 'Other'], 'enumeration_labels': ['Alzheimers disease', 'Cognitively Normal', 'Other'] }, 'series': [] } x['series'] = random_nominal_series(x, seed=5, add_null=add_null, limit_from=limit_from, limit_to=limit_to) return { 'data': { 'dependent': [x], 'independent': independent(limit_from=limit_from, limit_to=limit_to, **kwargs) }, 'parameters': [] }
py
1a39f04e988e6466059ad098998e977b4b610afe
import random import os.path import sys import logging import gtk import gs import gs.ui.rtgraph as rtgraph import gs.config as config LOG = logging.getLogger("graph") class FieldChannel(rtgraph.Channel): def __init__(self, msg, field): rtgraph.Channel.__init__(self) i = 0 for f in msg.fields: if f.name == field.name: self._fidx = i i += 1 self._val = 0 def getValue(self): return self._val def update_msg_value(self, vals): self._val = vals[self._fidx] class RandomChannel(FieldChannel): def getValue(self): return random.random() class Graph(rtgraph.HScrollLineGraph): def __init__(self, source, msg, field, double_buffer, ymin=0.0, ymax=1.0, width=150, height=50, rate=30): rtgraph.HScrollLineGraph.__init__(self, scrollRate=rate, size=(width,height), range=(ymin,ymax), autoScale=True, axisLabel=True, channels=[FieldChannel(msg, field)], doubleBuffer=double_buffer ) self._source = source self._source.register_interest(self._on_msg, 0, msg.name) def _on_msg(self, msg, header, payload): vals = msg.unpack_values(payload) for f in self.channels: f.update_msg_value(vals) def get_scroll_rate_widget(self): return self.getTweakControls()[0] def delete(self): self._source.unregister_interest(self._on_msg) class _GraphRange(gtk.VBox): def __init__(self, graph): gtk.VBox.__init__(self) graph.connect("range-changed", self._on_range_changed) mal = gtk.Label("Max:") self.maxadj = gtk.Adjustment() self._update_adjustment(self.maxadj) masb = gtk.SpinButton(self.maxadj) masb.props.digits = 1 self.maxadj.connect("value-changed", self._on_adj_changed, graph, 1) mil = gtk.Label("Min:") self.minadj = gtk.Adjustment() self._update_adjustment(self.minadj) misb = gtk.SpinButton(self.minadj) misb.props.digits = 1 self.minadj.connect("value-changed", self._on_adj_changed, graph, 0) self.pack_start(mal, False) self.pack_start(masb, False) self.pack_start(mil, False) self.pack_start(misb, False) def _update_adjustment(self, adj, value=0.0, lower=0.0, upper=0.0): adj.lower = lower adj.page_increment = 1.0 adj.step_increment = 0.1 adj.upper = upper adj.value = value def _on_range_changed(self, graph, min_, max_): self._update_adjustment(self.maxadj, value=max_, lower=min_, upper=(max_*1.5)) self._update_adjustment(self.minadj, value=min_, lower=(min_*1.5), upper=max_) def _on_adj_changed(self, adj, graph, idx): graph.handler_block_by_func(self._on_range_changed) graph.rescale(adj.get_value(), idx) graph.handler_unblock_by_func(self._on_range_changed) class GraphHolder(gtk.HBox): """ Composite widget holding a rtgraph and controls graph is a hbox: frame | [\___ ] | vertical buttons (pause, remove, etc) [ \ ] | range widgets """ def __init__(self, g, name, adjustable, on_pause, on_print, on_remove, on_fullscreen, on_log_data): gtk.HBox.__init__(self, spacing=5) self.graph = g frame = gtk.Frame(name) vb = gtk.VBox() vb.pack_start(g, True, True) tweak = None if adjustable: tweak = g.get_scroll_rate_widget() vb.pack_start(tweak.widget, False, False) frame.add(vb) self.pack_start(frame) vb = gtk.VBox() bbox = gtk.VButtonBox() bbox.set_layout(gtk.BUTTONBOX_END) vb.pack_start(bbox, True, True) if on_pause: b = gs.ui.get_button(stock=gtk.STOCK_MEDIA_PAUSE, xalign=0) b.connect("clicked", on_pause, tweak) bbox.pack_start(b, False, False) if on_print: b = gs.ui.get_button(stock=gtk.STOCK_PRINT, xalign=0) b.connect("clicked", on_print, g, name) bbox.pack_start(b, False, False) if on_remove: b = gs.ui.get_button(stock=gtk.STOCK_REMOVE, xalign=0) b.connect("clicked", on_remove, name) bbox.pack_start(b, False, False) if on_fullscreen: b = gs.ui.get_button(stock=gtk.STOCK_FULLSCREEN, xalign=0) b.connect("clicked", on_fullscreen, name) bbox.pack_start(b, False, False) if on_log_data: b = gs.ui.get_button("Log Message",image_stock=gtk.STOCK_FILE, xalign=0) b.connect("clicked", on_log_data, name) bbox.pack_start(b, False, False) if adjustable: r = _GraphRange(g) vb.pack_start(r, False, False) self.pack_start(vb, False, False) self.show_all() class GraphManager(config.ConfigurableIface): CONFIG_SECTION = "GRAPHMANAGER" def __init__(self, conf, source, messages, box, main_window): config.ConfigurableIface.__init__(self, conf) self._source = source self._messages = messages self._box = box self._main_window = main_window self._graphs = {} def _on_log_data(self, sender, name): self._source.register_csv_logger(None, name.split(':')[0]) def _on_pause(self, sender, tweakScrollRate): if tweakScrollRate: tweakScrollRate.setValue(0) tweakScrollRate.refresh() def _on_remove(self, sender, name): gh = self._graphs[name] gh.graph.delete() self._box.remove(gh) del(self._graphs[name]) def _on_print(self, sender, graph, name): def on_print_page(operation, context, page_nr): cr = context.get_cairo_context() graph.drawIntoCairoContext(cr, name=name) print_op = gtk.PrintOperation() print_op.set_n_pages(1) print_op.connect("draw_page", on_print_page) res = print_op.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, None) def _on_fs_window_closed(self, widget, event, name, btn): gh = self._graphs[name] gh.hide() gh.reparent(self._box) gh.show_all() btn.set_sensitive(True) def _on_fullscreen(self, btn, name): gh = self._graphs[name] w = gtk.Window() w.connect("delete-event", self._on_fs_window_closed, name, btn) w.set_title(name) gh.hide() gh.reparent(w) w.show_all() btn.set_sensitive(False) def update_state_from_config(self): num = self.config_get("num_graphs", 0) if num: LOG.info("Restoring %s graphs" % num) for i in range(0, int(num)): name = self.config_get("graph_%d" % i, ":") try: msg_name, field_name = name.split(":") if msg_name and field_name: msg = self._messages.get_message_by_name(msg_name) field = msg.get_field_by_name(field_name) if msg and field: self.add_graph(msg, field) except Exception: LOG.warn("Error adding graph", exc_info=True) def update_config_from_state(self): self.config_delete_keys_in_section() num = 0 for name in self._graphs: self.config_set("graph_%d" % num, name) num += 1 LOG.info("Saved %s graphs" % num) self.config_set("num_graphs", num) def add_graph(self, msg, field, adjustable=True, double_buffer=False): name = "%s:%s" % (msg.name, field.name) if name not in self._graphs: LOG.info("Adding graph: %s" % name) gh = GraphHolder( Graph(self._source, msg, field, double_buffer), name, adjustable, self._on_pause, self._on_print, self._on_remove, self._on_fullscreen, self._on_log_data) self._box.pack_start(gh) self._graphs[name] = gh
py
1a39f0ac1b52e80601d37b28ec8d2bbc57258c03
#!/usr/bin/env python3 # coding: utf8 """ Description: Using fasta files (scaffold/chromosme/contig file, protein file), gff file, annotation tsv file and the species name this script writes a genbank file. The annotation tsv file contains association between gene and annotation (EC number, GO term, Interpro) to add information to the genbank. The species name needs to be compatible with the taxonomy of the EBI. Informations need a good formating: gene ID should be correctly written (like XXX_001 and no XXX_1 if you got more thant 100 genes). Currently when there is multiple GO terms/InterPro/EC the script split them when they are separated by ";" or by "," like GO:0006979;GO:0020037;GO:0004601, if you use another separator add to the re.split(',|;'). For the gff file ensure that the element start position is at least 1. If it's 0 gffutils will return an error (source : https://github.com/daler/gffutils/issues/104). Other informations can be added by adding a dictionary with gene ID as key and the information as value and adapt the condition used for the others annotations (EC, Interpro, Go term). Usage: gbk_creator_from_gff.py -fg <Genome fasta file> -fp <Protein Fasta file> -a <Annotation TSV file> -g <GFF file> -s <Species name> -o <GBK Output file name> """ import argparse import datetime import gffutils import numpy as np import os import pandas as pa import pronto import re import requests import shutil from Bio import SeqFeature as sf from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from collections import OrderedDict try: from Bio.Alphabet import IUPAC except ImportError: IUPAC = None def merging_mini_gff(gff_folder): """ Merge multiple gff files into one. Return the path to the merged file. """ mini_gff_path = os.path.dirname(os.path.realpath(os.listdir(gff_folder)[0])) + "/" + gff_folder + "/" gff_merged_path = mini_gff_path + 'merged_gff.gff' with open(gff_merged_path, 'w') as gff_file_merged: gff_files = os.listdir(gff_folder) gff_files.remove('merged_gff.gff') for mini_gff in gff_files: with open(mini_gff_path + mini_gff, 'rb') as mini_gff_file: shutil.copyfileobj(mini_gff_file, gff_file_merged) return gff_merged_path def create_GO_dataframes(): """ Use pronto to query the Gene Ontology and to create the Ontology. Create a dataframe which contains for all GO terms their GO namespaces (molecular_function, ..). Create a second dataframe containing alternative ID for some GO terms (deprecated ones). """ go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo') # For each GO terms look to the namespaces associated with them. go_namespaces = {} for go_term in go_ontology: if 'GO:' in go_term: go_namespaces[go_term] = go_ontology[go_term].namespace df_go_namespace = pa.DataFrame.from_dict(go_namespaces, orient='index') df_go_namespace.reset_index(inplace=True) df_go_namespace.columns = ['GO', 'namespace'] # For each GO terms look if there is an alternative ID fo them. go_alt_ids = {} for go_term in go_ontology: if go_ontology[go_term].alternate_ids != frozenset(): for go_alt in go_ontology[go_term].alternate_ids: go_alt_ids[go_alt] = go_term df_go_alternative = pa.DataFrame.from_dict(go_alt_ids, orient='index') df_go_alternative.reset_index(inplace=True) df_go_alternative.columns = ['GO', 'alternative_GO'] return df_go_namespace, df_go_alternative def create_taxonomic_data(species_name): """ Query the EBI with the species name to create a dictionary containing taxon id, taxonomy and some other informations. """ species_informations = {} species_name_url = species_name.replace(' ', '%20') url = 'https://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/scientific-name/' + species_name_url response = requests.get(url) temp_species_informations = response.json()[0] for temp_species_information in temp_species_informations: if temp_species_information == 'lineage': species_informations['taxonomy'] = temp_species_informations[temp_species_information].split('; ')[:-1] elif temp_species_information == 'division': species_informations['data_file_division'] = temp_species_informations[temp_species_information] elif temp_species_information == 'taxId': species_informations['db_xref'] = 'taxon:' + str(temp_species_informations[temp_species_information]) else: species_informations[temp_species_information] = temp_species_informations[temp_species_information] compatible_species_name = species_name.replace('/', '_') species_informations['description'] = compatible_species_name + ' genome' species_informations['organism'] = compatible_species_name species_informations['keywords'] = [compatible_species_name] return species_informations def find_column_of_interest(df): ''' Gene column is supposed to be the first one. Detect columns containing GO number, EC number and Interpro ID. To do this, regular expression are used, for each types of data. The occurrence of each regular expression is counted. Then the column containing the maximum of occurrence for a type of data is associated with it by returning it's name. ''' columns = df.columns.tolist() gene_column = columns[0] go_number_expression = r"[FPC]?:?GO[:_][\d]{7}" ec_expression = r"[Ee]?[Cc]?:?[\d]{1}[\.]{1}[\d]{,2}[\.]{,1}[\d]{,2}[\.]{,1}[\d]{,3}" ipr_expression = r"IPR[\d]{6}" go_number_columns = {} ec_columns = {} ipr_columns = {} for column in columns: df[column] = df[column].astype(str) go_number_columns[column] = len(df[df[column].str.match(go_number_expression)]) ec_columns[column] = len(df[df[column].str.match(ec_expression)]) ipr_columns[column] = len(df[df[column].str.match(ipr_expression)]) if go_number_columns: go_number_column = max(go_number_columns, key=go_number_columns.get) go_column = go_number_column if ec_columns != []: ec_column = max(ec_columns, key=ec_columns.get) else: ec_column = np.nan if ipr_columns != []: ipr_column = max(ipr_columns, key=ipr_columns.get) else: ipr_column = np.nan return gene_column, go_column, ec_column, ipr_column def contig_info(contig_id, contig_seq, species_informations): """ Create contig information from species_informations dictionary and contig id and contig seq. """ record = SeqRecord(contig_seq, id=contig_id, name=contig_id, description=species_informations['description'], annotations={"molecule_type": "DNA"}) if IUPAC: record.seq.alphabet = IUPAC.ambiguous_dna if 'data_file_division' in species_informations: record.annotations['data_file_division'] = species_informations['data_file_division'] record.annotations['date'] = datetime.date.today().strftime('%d-%b-%Y').upper() if 'topology' in species_informations: record.annotations['topology'] = species_informations['topology'] record.annotations['accessions'] = contig_id if 'organism' in species_informations: record.annotations['organism'] = species_informations['organism'] # Use of literal_eval for taxonomy and keywords to retrieve list. if 'taxonomy' in species_informations: record.annotations['taxonomy'] = species_informations['taxonomy'] if 'keywords' in species_informations: record.annotations['keywords'] = species_informations['keywords'] if 'source' in species_informations: record.annotations['source'] = species_informations['source'] new_feature_source = sf.SeqFeature(sf.FeatureLocation(1-1, len(contig_seq)), type="source") new_feature_source.qualifiers['scaffold'] = contig_id if 'isolate' in species_informations: new_feature_source.qualifiers['isolate'] = species_informations['isolate'] # db_xref corresponds to the taxon NCBI ID. # Important if you want to use Pathway Tools after. if 'db_xref' in species_informations: new_feature_source.qualifiers['db_xref'] = species_informations['db_xref'] if 'cell_type' in species_informations: new_feature_source.qualifiers['cell_type'] = species_informations['cell_type'] if 'dev_stage' in species_informations: new_feature_source.qualifiers['dev_stage'] = species_informations['dev_stage'] if 'mol_type' in species_informations: new_feature_source.qualifiers['mol_type'] = species_informations['mol_type'] record.features.append(new_feature_source) return record def strand_change(input_strand): """ The input is strand in str ('-', '+') modify it to be a strand in int (-1, +1) to be compatible with SeqIO strand reading. """ if isinstance(input_strand, str): if input_strand == '-': new_strand = -1 elif input_strand == '+': new_strand = +1 if input_strand == '.': new_strand = None elif input_strand == '?': new_strand = 0 elif isinstance(input_strand, int): if input_strand == -1: new_strand = input_strand elif input_strand == +1: new_strand = input_strand return new_strand def search_and_add_RNA(gff_database, gene_informations, record, type_RNA): """ Search in the gff_database if the gene have RNA of the (type_RNA). For the RNA it will add a feature to the contig record of the genbank. Then it returns the contig record. gene_informations contain: [0] -> gene feature [1] -> gene ID cleaned [2] -> gene start position [3] -> gene end postion [4] -> gene strand modified (str -> int) """ for rna in gff_database.children(gene_informations[0], featuretype=type_RNA, order_by='start'): new_feature_RNA = sf.SeqFeature(sf.FeatureLocation(gene_informations[2], gene_informations[3], gene_informations[4]), type=type_RNA) new_feature_RNA.qualifiers['locus_tag'] = gene_informations[1] record.features.append(new_feature_RNA) return record def search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq): """ Search in the gff_database if the gene is a pseudogene. Add it to the record. """ location_exons = [] for pseudogene in gff_database.children(gene, featuretype="pseudogene", order_by='start'): # Select exon corresponding to the gene. # Then iterate for each exon and extract information. df_temp = df_exons[df_exons['gene_id'] == pseudogene.id] for _, row in df_temp.iterrows(): new_feature_location_exons = sf.FeatureLocation(row['start'], row['end'], row['strand']) location_exons.append(new_feature_location_exons) if location_exons and len(location_exons)>=2: exon_compound_locations = sf.CompoundLocation(location_exons, operator='join') new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS') else: start_position = gene.start -1 end_position = gene.end strand = strand_change(gene.strand) new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="CDS") new_feature_cds.qualifiers['translation'] = gene_protein_seq[pseudogene.id] new_feature_cds.qualifiers['locus_tag'] = gene.id new_feature_cds.qualifiers['pseudo'] = None record.features.append(new_feature_cds) return record def gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out): """ From a genome fasta (containing each contigs of the genome), a protein fasta (containing each protein sequence), an annotation table (containing gene name associated with GO terms, InterPro and EC), a gff file (containing gene, exon, mRNA, ncRNA, tRNA), a contig information table (containing species name, taxon ID, ..) create a genbank file. """ print('Creating GFF database (gffutils)') # Create the gff database file. # gffutils use sqlite3 file-based database to access data inside GFF. # ':memory:' ask gffutils to keep database in memory instead of writting in a file. gff_database = gffutils.create_db(gff_file, ':memory:', force=True, keep_order=True, merge_strategy='merge', sort_attribute_values=True) # Length of your gene ID. # Catch it in the GFF database. # It's pretty dumb as we go into a loop for one information. # But I don't find another way to catch the length of gene_id. length_gene_id = 0 for gene in gff_database.features_of_type('gene'): length_gene_id = len(gene.id.replace('gene:', '')) break # Get the longest contig ID to check if all contig IDs have the # same length, if not add 0 (at the supposed position of the number). longest_contig_id = "" for contig_for_length_id in gff_database.features_of_type('sequence_assembly'): if len(longest_contig_id) < len(contig_for_length_id.id): longest_contig_id = contig_for_length_id.id print('Formatting fasta and annotation file') # Dictionary with scaffold/chromosome id as key and sequence as value. contig_seqs = OrderedDict() for record in SeqIO.parse(genome_fasta, "fasta"): id_contig = record.id contig_seqs[id_contig] = record.seq # Dictionary with gene id as key and protein sequence as value. gene_protein_seq = {} for record in SeqIO.parse(prot_fasta, "fasta"): gene_protein_seq[record.id] = record.seq # Create a taxonomy dictionary querying the EBI. species_informations = create_taxonomic_data(species_name) # Read a tsv file containing GO terms, Interpro and EC associated with gene name. mapping_data = pa.read_csv(annot_table, sep='\t') mapping_data.replace(np.nan, '', inplace=True) gene_column, go_column, ec_column, ipr_column = find_column_of_interest(mapping_data) mapping_data.set_index(gene_column, inplace=True) # Dictionary with gene id as key and GO terms/Interpro/EC as value. annot_GOs = mapping_data[go_column].to_dict() annot_IPRs = mapping_data[ipr_column].to_dict() annot_ECs = mapping_data[ec_column].to_dict() # Query Gene Ontology to extract namespaces and alternative IDs. df_go_namespace, df_go_alternative = create_GO_dataframes() # Dictionary GO id as term and GO namespace as value. df_go_namespace.set_index('GO', inplace=True) go_namespaces = df_go_namespace['namespace'].to_dict() # Dictionary GO id as term and GO alternatives id as value. df_go_alternative.set_index('GO', inplace=True) go_alternatives = df_go_alternative['alternative_GO'].to_dict() # Create a dataframe containing each exon with informations (gene, start, end and strand) df_exons = pa.DataFrame(columns=['exon_id', 'gene_id', 'start', 'end', 'strand']) print('Searching for exons') temporary_datas = [] # Search for all exons in gff database and extract start position (have to minus one to get the right position) # the end position, the strand (have to change from str to int) and the gene ID. # Then add it to a list of dictionary that will be added to the dataframe. for exon in gff_database.features_of_type('exon'): start_position = exon.start - 1 end_position = exon.end strand = strand_change(exon.strand) gene_id = exon.id.replace('exon:', '')[:-2] temporary_datas.append({'exon_id': exon.id, 'gene_id': gene_id, 'start': start_position, 'end':end_position, 'strand': strand}) df_exons = df_exons.append(temporary_datas) # All SeqRecord objects will be stored in a list and then give to the SeqIO writer to create the genbank. seq_objects = [] print('Assembling Genbank informations') # Iterate through each contig. # Then iterate through gene and throug RNA linked with the gene. # Then look if protein informations are available. for contig_id in sorted(contig_seqs): # Data for each contig. record = contig_info(contig_id, contig_seqs[contig_id], species_informations) for gene in gff_database.features_of_type('gene'): gene_contig = gene.chrom if gene_contig == contig_id: id_gene = gene.id start_position = gene.start -1 end_position = gene.end strand = strand_change(gene.strand) new_feature_gene = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="gene") new_feature_gene.qualifiers['locus_tag'] = id_gene # Add gene information to contig record. record.features.append(new_feature_gene) # Search and add RNAs. gene_informations = [gene, id_gene, start_position, end_position, strand] record = search_and_add_RNA(gff_database, gene_informations, record, 'mRNA') record = search_and_add_RNA(gff_database, gene_informations, record,'tRNA') record = search_and_add_RNA(gff_database, gene_informations, record, 'ncRNA') record = search_and_add_RNA(gff_database, gene_informations, record, 'lncRNA') # Search for pseudogene and add them. record = search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq) # Create CDS using exons, if no exon use gene information location_exons = [] # Use parent mRNA in gff to find CDS. # With this we take the isoform of gene. for mrna in gff_database.children(gene, featuretype="mRNA", order_by='start'): mrna_id = mrna.id # Select exon corresponding to the gene. # Then iterate for each exon and extract information. df_temp = df_exons[df_exons['gene_id'] == mrna_id] for _, row in df_temp.iterrows(): new_feature_location_exons = sf.FeatureLocation(row['start'], row['end'], row['strand']) location_exons.append(new_feature_location_exons) if location_exons and len(location_exons)>=2: exon_compound_locations = sf.CompoundLocation(location_exons, operator='join') new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS') else: new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position, end_position, strand), type="CDS") new_feature_cds.qualifiers['translation'] = gene_protein_seq[mrna_id] new_feature_cds.qualifiers['locus_tag'] = id_gene # Add GO annotation according to the namespace. if mrna_id in annot_GOs: gene_gos = re.split(';|,', annot_GOs[mrna_id]) if gene_gos != [""]: go_components = [] go_functions = [] go_process = [] for go in gene_gos: # Check if GO term is not a deprecated one. # If yes take the corresponding one in alternative GO. if go not in go_namespaces: go_test = go_alternatives[go] else: go_test = go if go_namespaces[go_test] == 'cellular_component': go_components.append(go) if go_namespaces[go_test] == 'molecular_function': go_functions.append(go) if go_namespaces[go_test] == 'biological_process': go_process.append(go) new_feature_cds.qualifiers['go_component'] = go_components new_feature_cds.qualifiers['go_function'] = go_functions new_feature_cds.qualifiers['go_process'] = go_process # Add InterPro annotation. if mrna_id in annot_IPRs: gene_iprs = re.split(';|,', annot_IPRs[mrna_id]) if gene_iprs != [""]: new_feature_cds.qualifiers['db_xref'] = ["InterPro:"+interpro for interpro in gene_iprs] # Add EC annotation. if mrna_id in annot_ECs: gene_ecs = re.split(';|,', annot_ECs[mrna_id]) if gene_ecs != [""]: new_feature_cds.qualifiers['EC_number'] = [ec.replace('ec:', '') for ec in gene_ecs] # Add CDS information to contig record record.features.append(new_feature_cds) seq_objects.append(record) # Create Genbank with the list of SeqRecord. SeqIO.write(seq_objects, gbk_out, 'genbank') def main(genome_fasta, prot_fasta, annot_table, gff_file_folder, species_name, gbk_out): # Check if gff is a file or is multiple files in a folder. # If it's multiple files, it wil merge them in one. if os.path.isfile(gff_file_folder): gff_file = gff_file_folder if not os.path.isfile(gff_file_folder): gff_file = merging_mini_gff(gff_file_folder) gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out) def run(): parser = argparse.ArgumentParser(prog = "gbk_creator_from_gff.py") parser.add_argument("-fg", "--fgen", dest = "genome_fasta", metavar = "FILE", help = "contig fasta file", required = True) parser.add_argument("-fp", "--fprot", dest = "prot_fasta", metavar = "FILE", help = "protein fasta file", required = True) parser.add_argument("-a", "--annot", dest = "annot_table", metavar = "FILE", help = "annotation tsv file", required = True) parser.add_argument("-g", "--gff", dest = "gff_file_folder", metavar = "FILE or FOLDER", help = "gff file or folder containing multiple gff", required = True) parser.add_argument("-s", "--speciesname", dest = "species_name", metavar = "STRING", help = "species scientific name", required = True) parser.add_argument("-o", "--output", dest = "gbk_out", metavar = "FILE", help = "output file", default = "mygbk.gbk") args = parser.parse_args() main(genome_fasta=args.genome_fasta, prot_fasta=args.prot_fasta, annot_table=args.annot_table, gff_file_folder=args.gff_file_folder, species_name=args.species_name, gbk_out=args.gbk_out) if __name__ == '__main__': run()
py
1a39f11c5db39744e265d89e2a0b7a7754a16662
#! /usr/bin/env python # -*- coding: utf-8 -*- from django.conf.urls import url, include from config import views urlpatterns = [ url(r'^$', views.index, name='config'), url(r'^config_save/$', views.config_save, name='config_save'), url(r'^token/', views.get_token, name='token'), ]
py
1a39f14776c3795f540806eef713f13dd691ec99
from functools import partial import pandas as pd from cellphonedb.src.core.core_logger import core_logger from cellphonedb.src.core.exceptions.AllCountsFilteredException import AllCountsFilteredException from cellphonedb.src.core.exceptions.NoInteractionsFound import NoInteractionsFound from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper def call(meta: pd.DataFrame, counts: pd.DataFrame, counts_data: str, interactions: pd.DataFrame, genes: pd.DataFrame, complexes: pd.DataFrame, complex_compositions: pd.DataFrame, pvalue: float, separator: str, iterations: int = 1000, threshold: float = 0.1, threads: int = 4, debug_seed: int = -1, result_precision: int = 3, ) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame): core_logger.info( '[Cluster Statistical Analysis] ' 'Threshold:{} Iterations:{} Debug-seed:{} Threads:{} Precision:{}'.format(threshold, iterations, debug_seed, threads, result_precision)) if debug_seed >= 0: pd.np.random.seed(debug_seed) core_logger.warning('Debug random seed enabled. Setted to {}'.format(debug_seed)) cells_names = sorted(counts.columns) interactions.set_index('id_interaction', drop=True, inplace=True) interactions_reduced = interactions[['multidata_1_id', 'multidata_2_id']].drop_duplicates() complex_compositions.set_index('id_complex_composition', inplace=True, drop=True) # Add id multidata to counts input counts: pd.DataFrame = counts.merge(genes[['id_multidata', 'ensembl', 'gene_name', 'hgnc_symbol']], left_index=True, right_on=counts_data) counts_relations = counts[['id_multidata', 'ensembl', 'gene_name', 'hgnc_symbol']].copy() counts.set_index('id_multidata', inplace=True, drop=True) counts = counts[cells_names] counts = counts.astype('float32') counts = counts.groupby(counts.index).mean() if counts.empty: raise AllCountsFilteredException(hint='Are you using human data?') # End add id multidata interactions_filtered, counts_filtered, complex_composition_filtered = \ cpdb_statistical_analysis_helper.prefilters(interactions_reduced, counts, complexes, complex_compositions) if interactions_filtered.empty: raise NoInteractionsFound() clusters = cpdb_statistical_analysis_helper.build_clusters(meta, counts_filtered, complex_composition_filtered) core_logger.info('Running Real Analysis') cluster_interactions = cpdb_statistical_analysis_helper.get_cluster_combinations(clusters['names']) base_result = cpdb_statistical_analysis_helper.build_result_matrix(interactions_filtered, cluster_interactions, separator) real_mean_analysis = cpdb_statistical_analysis_helper.mean_analysis(interactions_filtered, clusters, cluster_interactions, base_result, separator) real_percents_analysis = cpdb_statistical_analysis_helper.percent_analysis(clusters, threshold, interactions_filtered, cluster_interactions, base_result, separator) core_logger.info('Running Statistical Analysis') statistical_mean_analysis = cpdb_statistical_analysis_helper.shuffled_analysis(iterations, meta, counts_filtered, interactions_filtered, cluster_interactions, complex_composition_filtered, base_result, threads, separator) result_percent = cpdb_statistical_analysis_helper.build_percent_result(real_mean_analysis, real_percents_analysis, statistical_mean_analysis, interactions_filtered, cluster_interactions, base_result, separator) pvalues_result, means_result, significant_means, deconvoluted_result = build_results( interactions_filtered, interactions, counts_relations, real_mean_analysis, result_percent, clusters['means'], complex_composition_filtered, counts, genes, result_precision, pvalue, counts_data ) return pvalues_result, means_result, significant_means, deconvoluted_result def build_results(interactions: pd.DataFrame, interactions_original: pd.DataFrame, counts_relations: pd.DataFrame, real_mean_analysis: pd.DataFrame, result_percent: pd.DataFrame, clusters_means: pd.DataFrame, complex_compositions: pd.DataFrame, counts: pd.DataFrame, genes: pd.DataFrame, result_precision: int, pvalue: float, counts_data: str ) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame): """ Sets the results data structure from method generated data. Results documents are defined by specs. """ core_logger.info('Building results') interactions: pd.DataFrame = interactions_original.loc[interactions.index] interactions['interaction_index'] = interactions.index interactions = interactions.merge(counts_relations, how='left', left_on='multidata_1_id', right_on='id_multidata', ) interactions = interactions.merge(counts_relations, how='left', left_on='multidata_2_id', right_on='id_multidata', suffixes=('_1', '_2')) interactions.set_index('interaction_index', inplace=True, drop=True) interacting_pair = cpdb_statistical_analysis_helper.interacting_pair_build(interactions) def simple_complex_indicator(interaction: pd.Series, suffix: str) -> str: """ Add simple/complex prefixes to interaction components """ if interaction['is_complex{}'.format(suffix)]: return 'complex:{}'.format(interaction['name{}'.format(suffix)]) return 'simple:{}'.format(interaction['name{}'.format(suffix)]) interactions['partner_a'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_1'), axis=1) interactions['partner_b'] = interactions.apply(lambda interaction: simple_complex_indicator(interaction, '_2'), axis=1) significant_mean_rank, significant_means = cpdb_statistical_analysis_helper.build_significant_means( real_mean_analysis, result_percent, pvalue) significant_means = significant_means.round(result_precision) gene_columns = ['{}_{}'.format(counts_data, suffix) for suffix in ('1', '2')] gene_renames = {column: 'gene_{}'.format(suffix) for column, suffix in zip(gene_columns, ['a', 'b'])} # Remove useless columns interactions_data_result = pd.DataFrame( interactions[['id_cp_interaction', 'partner_a', 'partner_b', 'receptor_1', 'receptor_2', *gene_columns, 'annotation_strategy']].copy()) interactions_data_result = pd.concat([interacting_pair, interactions_data_result], axis=1, sort=False) interactions_data_result['secreted'] = (interactions['secreted_1'] | interactions['secreted_2']) interactions_data_result['is_integrin'] = (interactions['integrin_1'] | interactions['integrin_2']) interactions_data_result.rename( columns={**gene_renames, 'receptor_1': 'receptor_a', 'receptor_2': 'receptor_b'}, inplace=True) # Dedupe rows and filter only desired columns interactions_data_result.drop_duplicates(inplace=True) means_columns = ['id_cp_interaction', 'interacting_pair', 'partner_a', 'partner_b', 'gene_a', 'gene_b', 'secreted', 'receptor_a', 'receptor_b', 'annotation_strategy', 'is_integrin'] interactions_data_result = interactions_data_result[means_columns] real_mean_analysis = real_mean_analysis.round(result_precision) significant_means = significant_means.round(result_precision) # Round result decimals for key, cluster_means in clusters_means.items(): clusters_means[key] = cluster_means.round(result_precision) # Document 1 pvalues_result = pd.concat([interactions_data_result, result_percent], axis=1, join='inner', sort=False) # Document 2 means_result = pd.concat([interactions_data_result, real_mean_analysis], axis=1, join='inner', sort=False) # Document 3 significant_means_result = pd.concat([interactions_data_result, significant_mean_rank, significant_means], axis=1, join='inner', sort=False) # Document 5 deconvoluted_result = deconvoluted_complex_result_build(clusters_means, interactions, complex_compositions, counts, genes, counts_data) return pvalues_result, means_result, significant_means_result, deconvoluted_result def deconvoluted_complex_result_build(clusters_means: pd.DataFrame, interactions: pd.DataFrame, complex_compositions: pd.DataFrame, counts: pd.DataFrame, genes: pd.DataFrame, counts_data: str) -> pd.DataFrame: genes_counts = list(counts.index) genes_filtered = genes[genes['id_multidata'].apply(lambda gene: gene in genes_counts)] deconvoluted_complex_result_1 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, '_1', counts_data) deconvoluted_simple_result_1 = deconvolute_interaction_component(interactions, '_1', counts_data) deconvoluted_complex_result_2 = deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, '_2', counts_data) deconvoluted_simple_result_2 = deconvolute_interaction_component(interactions, '_2', counts_data) deconvoluted_result = deconvoluted_complex_result_1.append( [deconvoluted_simple_result_1, deconvoluted_complex_result_2, deconvoluted_simple_result_2], sort=False) deconvoluted_result.set_index('multidata_id', inplace=True, drop=True) deconvoluted_columns = ['gene_name', 'name', 'is_complex', 'protein_name', 'complex_name', 'id_cp_interaction', 'gene'] deconvoluted_result = deconvoluted_result[deconvoluted_columns] deconvoluted_result.rename({'name': 'uniprot'}, axis=1, inplace=True) deconvoluted_result = pd.concat([deconvoluted_result, clusters_means], axis=1, join='inner', sort=False) deconvoluted_result.set_index('gene', inplace=True, drop=True) deconvoluted_result.drop_duplicates(inplace=True) return deconvoluted_result def deconvolute_interaction_component(interactions, suffix, counts_data): interactions = interactions[~interactions['is_complex{}'.format(suffix)]] deconvoluted_result = pd.DataFrame() deconvoluted_result['gene'] = interactions['{}{}'.format(counts_data, suffix)] deconvoluted_result[ ['multidata_id', 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor']] = \ interactions[ ['multidata{}_id'.format(suffix), 'protein_name{}'.format(suffix), 'gene_name{}'.format(suffix), 'name{}'.format(suffix), 'is_complex{}'.format(suffix), 'id_cp_interaction', 'receptor{}'.format(suffix)]] deconvoluted_result['complex_name'] = pd.np.nan return deconvoluted_result def deconvolute_complex_interaction_component(complex_compositions, genes_filtered, interactions, suffix, counts_data): return_properties = [counts_data, 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor', 'complex_name'] if complex_compositions.empty: return pd.DataFrame( columns=return_properties) deconvoluted_result = pd.DataFrame() component = pd.DataFrame() component[counts_data] = interactions['{}{}'.format(counts_data, suffix)] component[[counts_data, 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'id_multidata', 'receptor']] = \ interactions[['{}{}'.format(counts_data, suffix), 'protein_name{}'.format(suffix), 'gene_name{}'.format(suffix), 'name{}'.format(suffix), 'is_complex{}'.format(suffix), 'id_cp_interaction', 'multidata{}_id'.format(suffix), 'receptor{}'.format(suffix)]] deconvolution_complex = pd.merge(complex_compositions, component, left_on='complex_multidata_id', right_on='id_multidata') deconvolution_complex = pd.merge(deconvolution_complex, genes_filtered, left_on='protein_multidata_id', right_on='protein_multidata_id', suffixes=['_complex', '_simple']) deconvoluted_result['gene'] = deconvolution_complex['{}_simple'.format(counts_data)] deconvoluted_result[ ['multidata_id', 'protein_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction', 'receptor', 'complex_name']] = \ deconvolution_complex[ ['complex_multidata_id', 'protein_name_simple', 'gene_name_simple', 'name_simple', 'is_complex_complex', 'id_cp_interaction', 'receptor_simple', 'name_complex']] return deconvoluted_result
py
1a39f2f1a3c2d523cf7fe76dc58c70e9b2e269a4
#!/usr/bin/python # -*- coding:utf-8 -*- """ CNN/Convnets/Convolutional neural networks keras tensorflow """ from keras.datasets import cifar10 from keras.models import Sequential from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers import Dense from keras.constraints import maxnorm from keras.layers import Dropout from keras.layers import Flatten from keras.utils import np_utils from keras.optimizers import SGD def train(): """ 训练 """ epochs = 10 lrate = 0.01 decay = lrate/epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model = create_model() model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(model.summary()) model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=epochs, batch_size=32) return model def create_model(): """ 创建模型 """ model = Sequential() # 52.59%的准确率(3, 32, 32) #model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(3, 32, 32), padding = "same", kernel_constraint=maxnorm(3))) #model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(3, 32, 32), padding="same", kernel_constraint=maxnorm(3))) # 68.28%的准确率(32, 32, 3) model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3), padding = "same", kernel_constraint=maxnorm(3))) model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(32, 32, 3), padding="same", kernel_constraint=maxnorm(3))) # 防止过拟合 # model.add(Dropout(0.2)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3))) # model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) return model if __name__ == '__main__': # 加载测试数据,一堆32*32*3的图片 # 10类 # 50000训练数据 # 10000测试数据 (X_train,Y_train),(X_test,Y_test)=cifar10.load_data() print(X_train.shape) print(X_test.shape) # 将数据转换为0-1的浮点数 X_train=X_train/255.0 X_test=X_test/255.0 # 将Y转换为标签矩阵 # 属于哪一类,哪一列为1,其余为0 Y_train=np_utils.to_categorical(Y_train) Y_test=np_utils.to_categorical(Y_test) # reshape for tf #X_train = X_train.reshape(X_train.shape[0], 3, 32, 32) #X_test = X_test.reshape(-1, 3, 32, 32) # 训练 model = train() # 模型准群率评估 # 52.59%的准确率(3, 32, 32) # 68.28%的准确率(32, 32, 3) scores = model.evaluate(X_test, Y_test, verbose=0) print("Final Accuracy: %.2f%%" % (scores[1]*100)) #保存模型及训练结果 jsonFile=model.to_json() with open('output/cifar10.json','w') as file: file.write(jsonFile) model.save_weights('output/cifar10.h5')
py
1a39f300cf3f4f88ec8a92c915557820289101d4
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2014, Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- # Author: Nicolas P .Rougier # Date: 04/03/2014 # ----------------------------------------------------------------------------- from vispy import gloo, app from vispy.gloo import Program vertex = """ uniform float theta; attribute vec4 color; attribute vec2 position; varying vec4 v_color; void main() { float ct = cos(theta); float st = sin(theta); float x = 0.75* (position.x*ct - position.y*st); float y = 0.75* (position.x*st + position.y*ct); gl_Position = vec4(x, y, 0.0, 1.0); v_color = color; } """ fragment = """ varying vec4 v_color; void main() { gl_FragColor = v_color; } """ class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, size=(512, 512), title='Rotating quad', close_keys='escape') self.timer = app.Timer(1./60., self.on_timer) def on_initialize(self, event): # Build program & data self.program = Program(vertex, fragment, count=4) self.program['color'] = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1)] self.program['position'] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)] self.clock = 0 self.timer.start() def on_draw(self, event): gloo.set_clear_color('white') gloo.clear(color=True) self.program.draw('triangle_strip') def on_resize(self, event): gloo.set_viewport(0, 0, *event.size) def on_timer(self, event): self.clock += 0.001 * 1000.0 / 60. self.program['theta'] = self.clock self.update() if __name__ == '__main__': c = Canvas() c.show() app.run()
py
1a39f30758f9d0333f038a543d420b026351e722
from sklearn.mixture import GaussianMixture import operator import numpy as np import math class GMMSet: def __init__(self, gmm_order = 32): self.gmms = [] self.gmm_order = gmm_order self.y = [] def fit_new(self, x, label): self.y.append(label) gmm = GaussianMixture(self.gmm_order) # gmm = GaussianMixture(n_components=8, max_iter=200, covariance_type='diag', n_init=3) gmm.fit(x) self.gmms.append(gmm) def gmm_score(self, gmm, x): return np.sum(gmm.score(x)) @staticmethod def softmax(scores): scores_sum = sum([math.exp(i) for i in scores]) score_max = math.exp(max(scores)) return round(score_max / scores_sum, 3) def predict_one(self, x): scores = [self.gmm_score(gmm, x) / len(x) for gmm in self.gmms] p = sorted(enumerate(scores), key=operator.itemgetter(1), reverse=True) p = [(str(self.y[i]), y, p[0][1] - y) for i, y in p] result = [(self.y[index], value) for (index, value) in enumerate(scores)] p = max(result, key=operator.itemgetter(1)) softmax_score = self.softmax(scores) return p[0], softmax_score def before_pickle(self): pass def after_pickle(self): pass
py
1a39f31396e7884d51334d5506878486168d0235
from icalendar import vCalAddress from app.config import ICAL_VERSION, PRODUCT_ID from app.routers.export import ( create_ical_calendar, create_ical_event, event_to_ical ) class TestExport: def test_create_ical_calendar(self): cal = create_ical_calendar() assert cal.get('version') == ICAL_VERSION assert cal.get('prodid') == PRODUCT_ID def test_create_ical_event(self, event): ical_event = create_ical_event(event) assert event.owner.email in ical_event.get('organizer') assert ical_event.get('summary') == event.title def test_add_attendees(self, event, user): ical_event = create_ical_event(event) ical_event.add( 'attendee', vCalAddress(f'MAILTO:{user.email}'), encode=0 ) attendee = vCalAddress(f'MAILTO:{user.email}') assert attendee == ical_event.get('attendee') def test_event_to_ical(self, user, event): ical_event = event_to_ical(event, [user.email]) def does_contain(item: str) -> bool: """Returns if calendar contains item.""" return bytes(item, encoding='utf8') in bytes(ical_event) assert does_contain(ICAL_VERSION) assert does_contain(PRODUCT_ID) assert does_contain(event.owner.email) assert does_contain(event.title)
py
1a39f404371ab6d21a1008ed4bfd7497324a6e8d
# Generated by Django 3.0.1 on 2019-12-23 03:29 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('question_text', models.CharField(max_length=200)), ('pub_date', models.DateTimeField(verbose_name='date published')), ], ), migrations.CreateModel( name='Choice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('choice_text', models.CharField(max_length=200)), ('votes', models.IntegerField(default=0)), ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')), ], ), ]
py
1a39f5de32c488c1917936547a0035ebafb93869
# step 1. imports from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, ForeignKey, Float, DateTime) from sqlalchemy.orm import sessionmaker, mapper, relationship from sqlalchemy.ext.horizontal_shard import ShardedSession from sqlalchemy.sql import operators, visitors import datetime # step 2. databases echo = True db1 = create_engine('sqlite://', echo=echo) db2 = create_engine('sqlite://', echo=echo) db3 = create_engine('sqlite://', echo=echo) db4 = create_engine('sqlite://', echo=echo) # step 3. create session function. this binds the shard ids # to databases within a ShardedSession and returns it. create_session = sessionmaker(class_=ShardedSession) create_session.configure(shards={ 'north_america':db1, 'asia':db2, 'europe':db3, 'south_america':db4 }) # step 4. table setup. meta = MetaData() # we need a way to create identifiers which are unique across all # databases. one easy way would be to just use a composite primary key, where one # value is the shard id. but here, we'll show something more "generic", an # id generation function. we'll use a simplistic "id table" stored in database # #1. Any other method will do just as well; UUID, hilo, application-specific, etc. ids = Table('ids', meta, Column('nextid', Integer, nullable=False)) def id_generator(ctx): # in reality, might want to use a separate transaction for this. c = db1.connect() nextid = c.execute(ids.select(for_update=True)).scalar() c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1})) return nextid # table setup. we'll store a lead table of continents/cities, # and a secondary table storing locations. # a particular row will be placed in the database whose shard id corresponds to the # 'continent'. in this setup, secondary rows in 'weather_reports' will # be placed in the same DB as that of the parent, but this can be changed # if you're willing to write more complex sharding functions. weather_locations = Table("weather_locations", meta, Column('id', Integer, primary_key=True, default=id_generator), Column('continent', String(30), nullable=False), Column('city', String(50), nullable=False) ) weather_reports = Table("weather_reports", meta, Column('id', Integer, primary_key=True), Column('location_id', Integer, ForeignKey('weather_locations.id')), Column('temperature', Float), Column('report_time', DateTime, default=datetime.datetime.now), ) # create tables for db in (db1, db2, db3, db4): meta.drop_all(db) meta.create_all(db) # establish initial "id" in db1 db1.execute(ids.insert(), nextid=1) # step 5. define sharding functions. # we'll use a straight mapping of a particular set of "country" # attributes to shard id. shard_lookup = { 'North America':'north_america', 'Asia':'asia', 'Europe':'europe', 'South America':'south_america' } def shard_chooser(mapper, instance, clause=None): """shard chooser. looks at the given instance and returns a shard id note that we need to define conditions for the WeatherLocation class, as well as our secondary Report class which will point back to its WeatherLocation via its 'location' attribute. """ if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location) def id_chooser(query, ident): """id chooser. given a primary key, returns a list of shards to search. here, we don't have any particular information from a pk so we just return all shard ids. often, youd want to do some kind of round-robin strategy here so that requests are evenly distributed among DBs. """ return ['north_america', 'asia', 'europe', 'south_america'] def query_chooser(query): """query chooser. this also returns a list of shard ids, which can just be all of them. but here we'll search into the Query in order to try to narrow down the list of shards to query. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids for column, operator, value in _get_query_comparisons(query): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object # and occur when using ORM-mapped attributes like # "WeatherLocation.continent"). A simpler comparison, though less accurate, # would be "column.key == 'continent'". if column.shares_lineage(weather_locations.c.continent): if operator == operators.eq: ids.append(shard_lookup[value]) elif operator == operators.in_op: ids.extend(shard_lookup[v] for v in value) if len(ids) == 0: return ['north_america', 'asia', 'europe', 'south_america'] else: return ids def _get_query_comparisons(query): """Search an orm.Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form (column, operator, values). "values" is a single value or tuple of values depending on the operator. """ binds = {} clauses = set() comparisons = [] def visit_bindparam(bind): # visit a bind parameter. Below we ensure # that we get the value whether it was specified # as part of query.params(), or is directly embedded # in the bind's "value" attribute. value = query._params.get(bind.key, bind.value) # some ORM functions place the bind's value as a # callable for deferred evaulation. Get that # actual value here. if callable(value): value = value() binds[bind] = value def visit_column(column): clauses.add(column) def visit_binary(binary): # special handling for "col IN (params)" if binary.left in clauses and \ binary.operator == operators.in_op and \ hasattr(binary.right, 'clauses'): comparisons.append( (binary.left, binary.operator, tuple(binds[bind] for bind in binary.right.clauses) ) ) elif binary.left in clauses and binary.right in binds: comparisons.append( (binary.left, binary.operator,binds[binary.right]) ) elif binary.left in binds and binary.right in clauses: comparisons.append( (binary.right, binary.operator,binds[binary.left]) ) # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. if query._criterion is not None: visitors.traverse_depthfirst(query._criterion, {}, {'bindparam':visit_bindparam, 'binary':visit_binary, 'column':visit_column } ) return comparisons # further configure create_session to use these functions create_session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, query_chooser=query_chooser ) # step 6. mapped classes. class WeatherLocation(object): def __init__(self, continent, city): self.continent = continent self.city = city class Report(object): def __init__(self, temperature): self.temperature = temperature # step 7. mappers mapper(WeatherLocation, weather_locations, properties={ 'reports':relationship(Report, backref='location') }) mapper(Report, weather_reports) # save and load objects! tokyo = WeatherLocation('Asia', 'Tokyo') newyork = WeatherLocation('North America', 'New York') toronto = WeatherLocation('North America', 'Toronto') london = WeatherLocation('Europe', 'London') dublin = WeatherLocation('Europe', 'Dublin') brasilia = WeatherLocation('South America', 'Brasila') quito = WeatherLocation('South America', 'Quito') tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) quito.reports.append(Report(85)) sess = create_session() for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]: sess.add(c) sess.flush() sess.expunge_all() t = sess.query(WeatherLocation).get(tokyo.id) assert t.city == tokyo.city assert t.reports[0].temperature == 80.0 north_american_cities = sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America') assert [c.city for c in north_american_cities] == ['New York', 'Toronto'] asia_and_europe = sess.query(WeatherLocation).filter(WeatherLocation.continent.in_(['Europe', 'Asia'])) assert set([c.city for c in asia_and_europe]) == set(['Tokyo', 'London', 'Dublin'])
py
1a39f652c1e7d8b0d52c47514921783be6094100
from setuptools import setup import os VERSION = "2.8.3" def get_long_description(): with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"), encoding="utf8", ) as fp: return fp.read() setup( name="github-to-sqlite", description="Save data from GitHub to a SQLite database", long_description=get_long_description(), long_description_content_type="text/markdown", author="Simon Willison", url="https://github.com/dogsheep/github-to-sqlite", license="Apache License, Version 2.0", version=VERSION, packages=["github_to_sqlite"], entry_points=""" [console_scripts] github-to-sqlite=github_to_sqlite.cli:cli """, install_requires=["sqlite-utils>=2.7.2", "requests", "PyYAML"], extras_require={"test": ["pytest", "requests-mock", "bs4"]}, tests_require=["github-to-sqlite[test]"], )
py
1a39f684a948ad4e3627d47937c19e8ca3ab0aee
class Team: def __init__(self, NO): self.NO = NO self.fighter_list = None self.order = None # previous index of the order self.fight_cnt = 0 @property def fighter_list(self): return self._fighter_list @fighter_list.setter def fighter_list(self, fighter_list): self._fighter_list = fighter_list def set_order(self, order): self.order = [] for a_order in order: self.order.append(int(a_order)) self.fight_cnt = 0 def get_next_fighter(self): if self.fight_cnt >= len(self.order): return None prev_fighter_idx = self.order[self.fight_cnt] fighter = None for _fighter in self.fighter_list: if _fighter.properties["NO"] == prev_fighter_idx: fighter = _fighter break self.fight_cnt += 1 return fighter
py
1a39f6fe966ac65d75ba23506478fa045cf2dbef
import asyncio import logging import signal import sys from functools import partial from typing import Union, List, Callable, Tuple import serial from bleak import BleakClient from serial_asyncio import open_serial_connection from genki_wave.callbacks import WaveCallback from genki_wave.constants import API_CHAR_UUID, BAUDRATE from genki_wave.data.writing import get_start_api_package from genki_wave.protocols import ProtocolAsyncio, ProtocolThread, CommunicateCancel from genki_wave.utils import get_serial_port, get_or_create_event_loop logging.basicConfig(format="%(levelname).4s:%(asctime)s [%(filename)s:%(lineno)d] - %(message)s ") logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def prepare_protocol_as_bleak_callback_asyncio(protocol: ProtocolAsyncio) -> Callable: async def _inner(sender: str, data: bytearray) -> None: # NOTE: `bleak` expects a function with this signature await protocol.data_received(data) return _inner def prepare_protocol_as_bleak_callback(protocol: ProtocolThread) -> Callable: def _inner(sender: str, data: bytearray) -> None: # NOTE: `bleak` expects a function with this signature protocol.data_received(data) return _inner def bleak_callback(protocol: ProtocolAsyncio) -> Callable: """Wraps our protocol as a callback with the correct signature bleak expects NOTE: 1) Bleak checks if a function is a co-routine so we need to wrap the class method into an `async` function and 2) we need to take care that `asyncio.Queue` is correctly handled so we have 2 different wrappers, one for a regular `queue.Queue` and one for `asyncio.Queue`. """ if isinstance(protocol, ProtocolAsyncio): callback = prepare_protocol_as_bleak_callback_asyncio(protocol) elif isinstance(protocol, ProtocolThread): callback = prepare_protocol_as_bleak_callback(protocol) else: raise ValueError(f"Unknown protocol type {type(protocol)}") return callback def make_disconnect_callback(comm: CommunicateCancel): def cb(client): if not comm.cancel: print(f"Client {client.address} disconnected unexpectedly, exiting") sys.exit(1) return cb async def producer_bluetooth( protocol: Union[ProtocolAsyncio, ProtocolThread], comm: CommunicateCancel, ble_address: str, ) -> None: """Receives data from a serially connected wave ring and passes it to the `protocol` Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process ble_address: Address of the bluetooth device to connect to. E.g. 'D5:73:DB:85:B4:A1' Note: The producer doesn't return a value, but the data gets added to the `protocol` that can be accessed from other parts of the program i.e. some `consumer` """ print(f"Connecting to wave at address {ble_address}") callback = bleak_callback(protocol) async with BleakClient(ble_address, disconnected_callback=make_disconnect_callback(comm)) as client: await client.start_notify(API_CHAR_UUID, callback) await client.write_gatt_char(API_CHAR_UUID, get_start_api_package(), False) print("Connected to Wave") while True: # This `while` loop and `asyncio.sleep` statement is some magic that is required to continually fetch # the data from the bluetooth device. await asyncio.sleep(0.1) if comm.cancel: print("Recieved a cancel signal, stopping ble client") break await client.stop_notify(API_CHAR_UUID) async def producer_serial(protocol: ProtocolAsyncio, comm: CommunicateCancel, serial_port: str): """Receives data from a serially connected wave ring and passes it to the `protocol` Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process serial_port: The serial port to read from Note: The producer doesn't return a value, but the data gets added to the `protocol` that can be accessed from other parts of the program i.e. some `consumer` """ reader, writer = await open_serial_connection(url=serial_port, baudrate=BAUDRATE, parity=serial.PARITY_EVEN) writer.write(get_start_api_package()) while True: # The number of bytes read here is an arbitrary power of 2 on the order of a size of a single package packet = await reader.read(n=128) await protocol.data_received(packet) if comm.cancel: print("Recieved a cancel signal, stopping serial connection") break async def consumer( protocol: ProtocolAsyncio, comm: CommunicateCancel, callbacks: Union[List[WaveCallback], Tuple[WaveCallback]], ) -> None: """Consumes the data from a producer via a protocol Args: protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. comm: An object that allows `producer` and `consumer` to communicate when to cancel the process callbacks: A list/tuple of callbacks that handle the data passed from the wave ring when available """ while True: package = await protocol.queue.get() if comm.is_cancel(package) or comm.cancel: print("Got a cancel message. Exiting consumer loop...") comm.cancel = True break for callback in callbacks: callback(package) def make_sigint_handler(comm: CommunicateCancel): """Create a signal handler to cancel an asyncio loop using signals.""" def handler(*args): comm.cancel = True return handler def _run_asyncio( callbacks: List[WaveCallback], producer: Union[producer_bluetooth, producer_serial], protocol: ProtocolAsyncio ) -> None: """Runs a producer and a consumer, hooking into the data using the supplied callbacks Args: callbacks: See docs for `consumer` producer: A callable that takes 2 arguments, a protocol and a communication object protocol: An object that knows how to process the raw data sent from the Wave ring into a structured format and passes it along between `producer` and `consumer`. """ # A singleton that sends messages about whether the data transfer has been canceled. comm = CommunicateCancel() loop = get_or_create_event_loop() loop.add_signal_handler(signal.SIGINT, make_sigint_handler(comm)) # Note: The consumer and the producer send the data via the instance of `protocol` tasks = asyncio.gather(producer(protocol, comm), consumer(protocol, comm, callbacks)) loop.run_until_complete(tasks) def run_asyncio_bluetooth(callbacks: List[WaveCallback], ble_address) -> None: """Runs an async `consumer-producer` loop using user supplied callbacks for a bluetooth device Args: callbacks: A list/tuple of callbacks that handle the data passed from the wave ring ble_address: Address of the bluetooth device to connect to. E.g. 'D5:73:DB:85:B4:A1' """ _run_asyncio(callbacks, partial(producer_bluetooth, ble_address=ble_address), ProtocolAsyncio()) def run_asyncio_serial(callbacks: List[WaveCallback], serial_port: str = None) -> None: """Runs an async `consumer-producer` loop using user supplied callbacks for a serial device Args: callbacks: A list/tuple of callbacks that handle the data passed from the wave ring serial_port: The serial port to read from. If `None` will try to determine it automatically based on the operating system the script is running on """ serial_port = get_serial_port() if serial_port is None else serial_port _run_asyncio(callbacks, partial(producer_serial, serial_port=serial_port), ProtocolAsyncio())
py
1a39f746e55ce11f555b45f2ad1634750df6bbd9
# Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the mixin interface class for creating differentiable quantum tapes with TensorFlow. """ # pylint: disable=protected-access, attribute-defined-outside-init import numpy as np import tensorflow as tf try: from tensorflow.python.eager.tape import should_record_backprop except ImportError: from tensorflow.python.eager.tape import should_record as should_record_backprop from pennylane.tape.queuing import AnnotatedQueue class TFInterface(AnnotatedQueue): """Mixin class for applying an TensorFlow interface to a :class:`~.JacobianTape`. TensorFlow-compatible quantum tape classes can be created via subclassing: .. code-block:: python class MyTFQuantumTape(TFInterface, JacobianTape): Alternatively, the TensorFlow interface can be dynamically applied to existing quantum tapes via the :meth:`~.apply` class method. This modifies the tape **in place**. Once created, the TensorFlow interface can be used to perform quantum-classical differentiable programming. .. note:: If using a device that supports native TensorFlow computation and backpropagation, such as :class:`~.DefaultQubitTF`, the TensorFlow interface **does not need to be applied**. It is only applied to tapes executed on non-TensorFlow compatible devices. **Example** Once a TensorFlow quantum tape has been created, it can be differentiated using the gradient tape: .. code-block:: python dev = qml.device("default.qubit", wires=1) p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64) with tf.GradientTape() as tape: with TFInterface.apply(JacobianTape()) as qtape: qml.Rot(p[0], p[1] ** 2 + p[0] * p[2], p[1] * tf.sin(p[2]), wires=0) expval(qml.PauliX(0)) result = qtape.execute(dev) >>> print(result) tf.Tensor([0.06982072], shape=(1,), dtype=float64) >>> grad = tape.gradient(result, p) >>> print(grad) tf.Tensor([0.29874274 0.39710271 0.09958091], shape=(3,), dtype=float64) The TensorFlow interface defaults to ``tf.float64`` output. This can be modified by providing the ``dtype`` argument when applying the interface: >>> p = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float32) >>> with tf.GradientTape() as tape: ... TFInterface.apply(qtape, dtype=tf.float32) # reusing the previous qtape ... result = qtape.execute(dev) >>> print(result) tf.Tensor([0.06982072], shape=(1,), dtype=float32) >>> grad = tape.gradient(result, p) >>> print(grad) tf.Tensor([0.2895088 0.38464668 0.09645163], shape=(3,), dtype=float32) """ dtype = tf.float64 @property def interface(self): # pylint: disable=missing-function-docstring return "tf" def _update_trainable_params(self): params = self.get_parameters(trainable_only=False) trainable_params = set() for idx, p in enumerate(params): # Determine which input tensors/Variables are being recorded for backpropagation. # The function should_record_backprop, documented here: # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/eager/tape.py#L167 # accepts lists of *Tensors* (not Variables), returning True if all are being watched by one or more # existing gradient tapes, False if not. if isinstance(p, (tf.Variable, tf.Tensor)) and should_record_backprop( # we need to convert any Variable objects to Tensors here, otherwise # should_record_backprop will raise an error [tf.convert_to_tensor(p)] ): trainable_params.add(idx) self.trainable_params = trainable_params @staticmethod def convert_to_numpy(tensors): """Converts any TensorFlow tensors in a sequence to NumPy arrays. Args: tensors (Sequence[Any, tf.Variable, tf.Tensor]): input sequence Returns: list[Any, array]: list with all tensors converted to NumPy arrays """ return [i.numpy() if isinstance(i, (tf.Variable, tf.Tensor)) else i for i in tensors] @tf.custom_gradient def _execute(self, params, **input_kwargs): # unwrap free parameters args = self.convert_to_numpy(params) # unwrap constant parameters all_params = self.get_parameters(trainable_only=False) all_params_unwrapped = self.convert_to_numpy(all_params) self.set_parameters(all_params_unwrapped, trainable_only=False) res = self.execute_device(args, input_kwargs["device"]) self.set_parameters(all_params, trainable_only=False) def grad(grad_output, **tfkwargs): variables = tfkwargs.get("variables", None) self.set_parameters(all_params_unwrapped, trainable_only=False) jacobian = self.jacobian(input_kwargs["device"], params=args, **self.jacobian_options) self.set_parameters(all_params, trainable_only=False) jacobian = tf.constant(jacobian, dtype=self.dtype) # Reshape gradient output array as a 2D row-vector. grad_output_row = tf.reshape(grad_output, [1, -1]) # Calculate the vector-Jacobian matrix product, and unstack the output. grad_input = tf.matmul(grad_output_row, jacobian) grad_input = tf.unstack(tf.reshape(grad_input, [-1])) if variables is not None: return grad_input, variables return grad_input if res.dtype == np.dtype("object"): res = np.hstack(res) return tf.convert_to_tensor(res, dtype=self.dtype), grad @classmethod def apply(cls, tape, dtype=tf.float64): """Apply the TensorFlow interface to an existing tape in-place. Args: tape (.JacobianTape): a quantum tape to apply the TF interface to dtype (tf.dtype): the dtype that the returned quantum tape should output **Example** >>> with JacobianTape() as tape: ... qml.RX(0.5, wires=0) ... expval(qml.PauliZ(0)) >>> TFInterface.apply(tape) >>> tape <TFQuantumTape: wires=<Wires = [0]>, params=1> """ tape_class = getattr(tape, "__bare__", tape.__class__) tape.__bare__ = tape_class tape.__class__ = type("TFQuantumTape", (cls, tape_class), {"dtype": dtype}) tape._update_trainable_params() return tape
py
1a39f814072364036e6675fc649669af76abf042
# Generated by Django 3.0.8 on 2020-07-23 02:57 import django.contrib.gis.db.models.fields import django.core.validators from django.db import migrations, models import django.db.models.deletion import nmmis.utils.generators class Migration(migrations.Migration): initial = True dependencies = [ ('municipal', '0001_initial'), ] operations = [ migrations.CreateModel( name='Building', fields=[ ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('id', models.CharField(default=nmmis.utils.generators.aphnum_random2, editable=False, max_length=12, primary_key=True, serialize=False)), ('name', models.CharField(max_length=80)), ('catg', models.CharField(max_length=100)), ('sub_catg', models.CharField(max_length=100)), ('building_no', models.CharField(max_length=100, unique=True)), ('land_area', models.IntegerField()), ('build_area', models.IntegerField()), ('build_date', models.DateField()), ('floor', models.IntegerField()), ('toilet', models.IntegerField()), ('roof_type', models.CharField(max_length=100)), ('road_access', models.BooleanField(default=False)), ('elect_access', models.BooleanField(default=False)), ('image', models.ImageField(blank=True, null=True, upload_to='places_images/%Y/%m/%d', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg'])])), ('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)), ('ward', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='municipal.Ward')), ], options={ 'verbose_name': 'Building', 'verbose_name_plural': 'Buildings', 'db_table': 'building', 'ordering': ['id'], }, ), ]
py
1a39f875e529e0478699cfaec5920033ddfd0011
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import pytest from playwright.async_api import Error, Page from tests.server import Server async def test_evaluate_handle(page, server): await page.goto(server.EMPTY_PAGE) main_frame = page.main_frame assert main_frame.page == page window_handle = await main_frame.evaluate_handle("window") assert window_handle async def test_frame_element(page, server, utils): await page.goto(server.EMPTY_PAGE) frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.attach_frame(page, "frame2", server.EMPTY_PAGE) frame3 = await utils.attach_frame(page, "frame3", server.EMPTY_PAGE) frame1handle1 = await page.query_selector("#frame1") frame1handle2 = await frame1.frame_element() frame3handle1 = await page.query_selector("#frame3") frame3handle2 = await frame3.frame_element() assert await frame1handle1.evaluate("(a, b) => a === b", frame1handle2) assert await frame3handle1.evaluate("(a, b) => a === b", frame3handle2) assert await frame1handle1.evaluate("(a, b) => a === b", frame3handle1) is False async def test_frame_element_with_content_frame(page, server, utils): await page.goto(server.EMPTY_PAGE) frame = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) handle = await frame.frame_element() content_frame = await handle.content_frame() assert content_frame == frame async def test_frame_element_throw_when_detached(page, server, utils): await page.goto(server.EMPTY_PAGE) frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await page.eval_on_selector("#frame1", "e => e.remove()") error = None try: await frame1.frame_element() except Error as e: error = e assert error.message == "Frame has been detached." async def test_evaluate_throw_for_detached_frames(page, server, utils): frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.detach_frame(page, "frame1") error = None try: await frame1.evaluate("7 * 8") except Error as e: error = e assert "Execution Context is not available in detached frame" in error.message async def test_evaluate_isolated_between_frames(page, server, utils): await page.goto(server.EMPTY_PAGE) await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) assert len(page.frames) == 2 [frame1, frame2] = page.frames assert frame1 != frame2 await asyncio.gather( frame1.evaluate("window.a = 1"), frame2.evaluate("window.a = 2") ) [a1, a2] = await asyncio.gather( frame1.evaluate("window.a"), frame2.evaluate("window.a") ) assert a1 == 1 assert a2 == 2 async def test_should_handle_nested_frames(page, server, utils): await page.goto(server.PREFIX + "/frames/nested-frames.html") assert utils.dump_frames(page.main_frame) == [ "http://localhost:<PORT>/frames/nested-frames.html", " http://localhost:<PORT>/frames/frame.html (aframe)", " http://localhost:<PORT>/frames/two-frames.html (2frames)", " http://localhost:<PORT>/frames/frame.html (dos)", " http://localhost:<PORT>/frames/frame.html (uno)", ] async def test_should_send_events_when_frames_are_manipulated_dynamically( page, server, utils ): await page.goto(server.EMPTY_PAGE) # validate frameattached events attached_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) await utils.attach_frame(page, "frame1", "./assets/frame.html") assert len(attached_frames) == 1 assert "/assets/frame.html" in attached_frames[0].url # validate framenavigated events navigated_frames = [] page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.evaluate( """() => { frame = document.getElementById('frame1') frame.src = './empty.html' return new Promise(x => frame.onload = x) }""" ) assert len(navigated_frames) == 1 assert navigated_frames[0].url == server.EMPTY_PAGE # validate framedetached events detached_frames = [] page.on("framedetached", lambda frame: detached_frames.append(frame)) await utils.detach_frame(page, "frame1") assert len(detached_frames) == 1 assert detached_frames[0].is_detached() async def test_framenavigated_when_navigating_on_anchor_urls(page, server): await page.goto(server.EMPTY_PAGE) async with page.expect_event("framenavigated"): await page.goto(server.EMPTY_PAGE + "#foo") assert page.url == server.EMPTY_PAGE + "#foo" async def test_persist_main_frame_on_cross_process_navigation(page, server): await page.goto(server.EMPTY_PAGE) main_frame = page.main_frame await page.goto(server.CROSS_PROCESS_PREFIX + "/empty.html") assert page.main_frame == main_frame async def test_should_not_send_attach_detach_events_for_main_frame(page, server): has_events = [] page.on("frameattached", lambda frame: has_events.append(True)) page.on("framedetached", lambda frame: has_events.append(True)) await page.goto(server.EMPTY_PAGE) assert has_events == [] async def test_detach_child_frames_on_navigation(page, server): attached_frames = [] detached_frames = [] navigated_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) page.on("framedetached", lambda frame: detached_frames.append(frame)) page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.goto(server.PREFIX + "/frames/nested-frames.html") assert len(attached_frames) == 4 assert len(detached_frames) == 0 assert len(navigated_frames) == 5 attached_frames = [] detached_frames = [] navigated_frames = [] await page.goto(server.EMPTY_PAGE) assert len(attached_frames) == 0 assert len(detached_frames) == 4 assert len(navigated_frames) == 1 async def test_framesets(page, server): attached_frames = [] detached_frames = [] navigated_frames = [] page.on("frameattached", lambda frame: attached_frames.append(frame)) page.on("framedetached", lambda frame: detached_frames.append(frame)) page.on("framenavigated", lambda frame: navigated_frames.append(frame)) await page.goto(server.PREFIX + "/frames/frameset.html") assert len(attached_frames) == 4 assert len(detached_frames) == 0 assert len(navigated_frames) == 5 attached_frames = [] detached_frames = [] navigated_frames = [] await page.goto(server.EMPTY_PAGE) assert len(attached_frames) == 0 assert len(detached_frames) == 4 assert len(navigated_frames) == 1 async def test_frame_from_inside_shadow_dom(page, server): await page.goto(server.PREFIX + "/shadow.html") await page.evaluate( """async url => { frame = document.createElement('iframe'); frame.src = url; document.body.shadowRoot.appendChild(frame); await new Promise(x => frame.onload = x); }""", server.EMPTY_PAGE, ) assert len(page.frames) == 2 assert page.frames[1].url == server.EMPTY_PAGE async def test_frame_name(page, server, utils): await utils.attach_frame(page, "theFrameId", server.EMPTY_PAGE) await page.evaluate( """url => { frame = document.createElement('iframe'); frame.name = 'theFrameName'; frame.src = url; document.body.appendChild(frame); return new Promise(x => frame.onload = x); }""", server.EMPTY_PAGE, ) assert page.frames[0].name == "" assert page.frames[1].name == "theFrameId" assert page.frames[2].name == "theFrameName" async def test_frame_parent(page, server, utils): await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await utils.attach_frame(page, "frame2", server.EMPTY_PAGE) assert page.frames[0].parent_frame is None assert page.frames[1].parent_frame == page.main_frame assert page.frames[2].parent_frame == page.main_frame async def test_should_report_different_frame_instance_when_frame_re_attaches( page, server, utils ): frame1 = await utils.attach_frame(page, "frame1", server.EMPTY_PAGE) await page.evaluate( """() => { window.frame = document.querySelector('#frame1') window.frame.remove() }""" ) assert frame1.is_detached() async with page.expect_event("frameattached") as frame2_info: await page.evaluate("() => document.body.appendChild(window.frame)") frame2 = await frame2_info.value assert frame2.is_detached() is False assert frame1 != frame2 async def test_strict_mode(page: Page, server: Server): await page.goto(server.EMPTY_PAGE) await page.set_content( """ <button>Hello</button> <button>Hello</button> """ ) with pytest.raises(Error): await page.text_content("button", strict=True) with pytest.raises(Error): await page.query_selector("button", strict=True)
py
1a39f9d5227143b35f2e86fa91489fe7b52298da
# -*- coding: utf-8 -*- """ werkzeug ~~~~~~~~ Werkzeug is the Swiss Army knife of Python web development. It provides useful classes and functions for any WSGI application to make the life of a python web developer much easier. All of the provided classes are independent from each other so you can mix it with any other library. :copyright: 2007 Pallets :license: BSD-3-Clause """ import sys from types import ModuleType __version__ = "0.15.4" # This import magic raises concerns quite often which is why the implementation # and motivation is explained here in detail now. # # The majority of the functions and classes provided by Werkzeug work on the # HTTP and WSGI layer. There is no useful grouping for those which is why # they are all importable from "werkzeug" instead of the modules where they are # implemented. The downside of that is, that now everything would be loaded at # once, even if unused. # # The implementation of a lazy-loading module in this file replaces the # werkzeug package when imported from within. Attribute access to the werkzeug # module will then lazily import from the modules that implement the objects. # import mapping to objects in other modules all_by_module = { "werkzeug.debug": ["DebuggedApplication"], "werkzeug.local": [ "Local", "LocalManager", "LocalProxy", "LocalStack", "release_local", ], "werkzeug.serving": ["run_simple"], "werkzeug.test": ["Client", "EnvironBuilder", "create_environ", "run_wsgi_app"], "werkzeug.testapp": ["test_app"], "werkzeug.exceptions": ["abort", "Aborter"], "werkzeug.urls": [ "url_decode", "url_encode", "url_quote", "url_quote_plus", "url_unquote", "url_unquote_plus", "url_fix", "Href", "iri_to_uri", "uri_to_iri", ], "werkzeug.formparser": ["parse_form_data"], "werkzeug.utils": [ "escape", "environ_property", "append_slash_redirect", "redirect", "cached_property", "import_string", "dump_cookie", "parse_cookie", "unescape", "format_string", "find_modules", "header_property", "html", "xhtml", "HTMLBuilder", "validate_arguments", "ArgumentValidationError", "bind_arguments", "secure_filename", ], "werkzeug.wsgi": [ "get_current_url", "get_host", "pop_path_info", "peek_path_info", "ClosingIterator", "FileWrapper", "make_line_iter", "LimitedStream", "responder", "wrap_file", "extract_path_info", ], "werkzeug.datastructures": [ "MultiDict", "CombinedMultiDict", "Headers", "EnvironHeaders", "ImmutableList", "ImmutableDict", "ImmutableMultiDict", "TypeConversionDict", "ImmutableTypeConversionDict", "Accept", "MIMEAccept", "CharsetAccept", "LanguageAccept", "RequestCacheControl", "ResponseCacheControl", "ETags", "HeaderSet", "WWWAuthenticate", "Authorization", "FileMultiDict", "CallbackDict", "FileStorage", "OrderedMultiDict", "ImmutableOrderedMultiDict", ], "werkzeug.useragents": ["UserAgent"], "werkzeug.http": [ "parse_etags", "parse_date", "http_date", "cookie_date", "parse_cache_control_header", "is_resource_modified", "parse_accept_header", "parse_set_header", "quote_etag", "unquote_etag", "generate_etag", "dump_header", "parse_list_header", "parse_dict_header", "parse_authorization_header", "parse_www_authenticate_header", "remove_entity_headers", "is_entity_header", "remove_hop_by_hop_headers", "parse_options_header", "dump_options_header", "is_hop_by_hop_header", "unquote_header_value", "quote_header_value", "HTTP_STATUS_CODES", ], "werkzeug.wrappers": [ "BaseResponse", "BaseRequest", "Request", "Response", "AcceptMixin", "ETagRequestMixin", "ETagResponseMixin", "ResponseStreamMixin", "CommonResponseDescriptorsMixin", "UserAgentMixin", "AuthorizationMixin", "WWWAuthenticateMixin", "CommonRequestDescriptorsMixin", ], "werkzeug.middleware.dispatcher": ["DispatcherMiddleware"], "werkzeug.middleware.shared_data": ["SharedDataMiddleware"], "werkzeug.security": ["generate_password_hash", "check_password_hash"], # the undocumented easteregg ;-) "werkzeug._internal": ["_easteregg"], } # modules that should be imported when accessed as attributes of werkzeug attribute_modules = frozenset(["exceptions", "routing"]) object_origins = {} for module, items in all_by_module.items(): for item in items: object_origins[item] = module class module(ModuleType): """Automatically import objects from the modules.""" def __getattr__(self, name): if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) elif name in attribute_modules: __import__("werkzeug." + name) return ModuleType.__getattribute__(self, name) def __dir__(self): """Just show what we want to show.""" result = list(new_module.__all__) result.extend( ( "__file__", "__doc__", "__all__", "__docformat__", "__name__", "__path__", "__package__", "__version__", ) ) return result # keep a reference to this module so that it's not garbage collected old_module = sys.modules["werkzeug"] # setup the new module and patch it into the dict of loaded modules new_module = sys.modules["werkzeug"] = module("werkzeug") new_module.__dict__.update( { "__file__": __file__, "__package__": "werkzeug", "__path__": __path__, "__doc__": __doc__, "__version__": __version__, "__all__": tuple(object_origins) + tuple(attribute_modules), "__docformat__": "restructuredtext en", } ) # Due to bootstrapping issues we need to import exceptions here. # Don't ask :-( __import__("werkzeug.exceptions")
py
1a39fae4d0c26bf3c3172066f6b4f0ae1e255f31
# # Autogenerated by Thrift Compiler (0.13.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py:new_style # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys import beeswaxd.BeeswaxService import logging from .ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport all_structs = [] class Iface(beeswaxd.BeeswaxService.Iface): def Cancel(self, query_id): """ Parameters: - query_id """ pass def ResetCatalog(self): pass def ResetTable(self, request): """ Parameters: - request """ pass def GetRuntimeProfile(self, query_id): """ Parameters: - query_id """ pass def CloseInsert(self, handle): """ Parameters: - handle """ pass def PingImpalaService(self): pass def GetExecSummary(self, handle): """ Parameters: - handle """ pass class Client(beeswaxd.BeeswaxService.Client, Iface): def __init__(self, iprot, oprot=None): beeswaxd.BeeswaxService.Client.__init__(self, iprot, oprot) def Cancel(self, query_id): """ Parameters: - query_id """ self.send_Cancel(query_id) return self.recv_Cancel() def send_Cancel(self, query_id): self._oprot.writeMessageBegin('Cancel', TMessageType.CALL, self._seqid) args = Cancel_args() args.query_id = query_id args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_Cancel(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = Cancel_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.error is not None: raise result.error raise TApplicationException(TApplicationException.MISSING_RESULT, "Cancel failed: unknown result") def ResetCatalog(self): self.send_ResetCatalog() return self.recv_ResetCatalog() def send_ResetCatalog(self): self._oprot.writeMessageBegin('ResetCatalog', TMessageType.CALL, self._seqid) args = ResetCatalog_args() args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_ResetCatalog(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = ResetCatalog_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "ResetCatalog failed: unknown result") def ResetTable(self, request): """ Parameters: - request """ self.send_ResetTable(request) return self.recv_ResetTable() def send_ResetTable(self, request): self._oprot.writeMessageBegin('ResetTable', TMessageType.CALL, self._seqid) args = ResetTable_args() args.request = request args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_ResetTable(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = ResetTable_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "ResetTable failed: unknown result") def GetRuntimeProfile(self, query_id): """ Parameters: - query_id """ self.send_GetRuntimeProfile(query_id) return self.recv_GetRuntimeProfile() def send_GetRuntimeProfile(self, query_id): self._oprot.writeMessageBegin('GetRuntimeProfile', TMessageType.CALL, self._seqid) args = GetRuntimeProfile_args() args.query_id = query_id args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetRuntimeProfile(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = GetRuntimeProfile_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.error is not None: raise result.error raise TApplicationException(TApplicationException.MISSING_RESULT, "GetRuntimeProfile failed: unknown result") def CloseInsert(self, handle): """ Parameters: - handle """ self.send_CloseInsert(handle) return self.recv_CloseInsert() def send_CloseInsert(self, handle): self._oprot.writeMessageBegin('CloseInsert', TMessageType.CALL, self._seqid) args = CloseInsert_args() args.handle = handle args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_CloseInsert(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = CloseInsert_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.error is not None: raise result.error if result.error2 is not None: raise result.error2 raise TApplicationException(TApplicationException.MISSING_RESULT, "CloseInsert failed: unknown result") def PingImpalaService(self): self.send_PingImpalaService() return self.recv_PingImpalaService() def send_PingImpalaService(self): self._oprot.writeMessageBegin('PingImpalaService', TMessageType.CALL, self._seqid) args = PingImpalaService_args() args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_PingImpalaService(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = PingImpalaService_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "PingImpalaService failed: unknown result") def GetExecSummary(self, handle): """ Parameters: - handle """ self.send_GetExecSummary(handle) return self.recv_GetExecSummary() def send_GetExecSummary(self, handle): self._oprot.writeMessageBegin('GetExecSummary', TMessageType.CALL, self._seqid) args = GetExecSummary_args() args.handle = handle args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetExecSummary(self): iprot = self._iprot (fname, mtype, rseqid) = iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(iprot) iprot.readMessageEnd() raise x result = GetExecSummary_result() result.read(iprot) iprot.readMessageEnd() if result.success is not None: return result.success if result.error is not None: raise result.error if result.error2 is not None: raise result.error2 raise TApplicationException(TApplicationException.MISSING_RESULT, "GetExecSummary failed: unknown result") class Processor(beeswaxd.BeeswaxService.Processor, Iface, TProcessor): def __init__(self, handler): beeswaxd.BeeswaxService.Processor.__init__(self, handler) self._processMap["Cancel"] = Processor.process_Cancel self._processMap["ResetCatalog"] = Processor.process_ResetCatalog self._processMap["ResetTable"] = Processor.process_ResetTable self._processMap["GetRuntimeProfile"] = Processor.process_GetRuntimeProfile self._processMap["CloseInsert"] = Processor.process_CloseInsert self._processMap["PingImpalaService"] = Processor.process_PingImpalaService self._processMap["GetExecSummary"] = Processor.process_GetExecSummary self._on_message_begin = None def on_message_begin(self, func): self._on_message_begin = func def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() if self._on_message_begin: self._on_message_begin(name, type, seqid) if name not in self._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() return else: self._processMap[name](self, seqid, iprot, oprot) return True def process_Cancel(self, seqid, iprot, oprot): args = Cancel_args() args.read(iprot) iprot.readMessageEnd() result = Cancel_result() try: result.success = self._handler.Cancel(args.query_id) msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except beeswaxd.ttypes.BeeswaxException as error: msg_type = TMessageType.REPLY result.error = error except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("Cancel", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_ResetCatalog(self, seqid, iprot, oprot): args = ResetCatalog_args() args.read(iprot) iprot.readMessageEnd() result = ResetCatalog_result() try: result.success = self._handler.ResetCatalog() msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("ResetCatalog", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_ResetTable(self, seqid, iprot, oprot): args = ResetTable_args() args.read(iprot) iprot.readMessageEnd() result = ResetTable_result() try: result.success = self._handler.ResetTable(args.request) msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("ResetTable", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetRuntimeProfile(self, seqid, iprot, oprot): args = GetRuntimeProfile_args() args.read(iprot) iprot.readMessageEnd() result = GetRuntimeProfile_result() try: result.success = self._handler.GetRuntimeProfile(args.query_id) msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except beeswaxd.ttypes.BeeswaxException as error: msg_type = TMessageType.REPLY result.error = error except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("GetRuntimeProfile", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_CloseInsert(self, seqid, iprot, oprot): args = CloseInsert_args() args.read(iprot) iprot.readMessageEnd() result = CloseInsert_result() try: result.success = self._handler.CloseInsert(args.handle) msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except beeswaxd.ttypes.QueryNotFoundException as error: msg_type = TMessageType.REPLY result.error = error except beeswaxd.ttypes.BeeswaxException as error2: msg_type = TMessageType.REPLY result.error2 = error2 except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("CloseInsert", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_PingImpalaService(self, seqid, iprot, oprot): args = PingImpalaService_args() args.read(iprot) iprot.readMessageEnd() result = PingImpalaService_result() try: result.success = self._handler.PingImpalaService() msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("PingImpalaService", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetExecSummary(self, seqid, iprot, oprot): args = GetExecSummary_args() args.read(iprot) iprot.readMessageEnd() result = GetExecSummary_result() try: result.success = self._handler.GetExecSummary(args.handle) msg_type = TMessageType.REPLY except TTransport.TTransportException: raise except beeswaxd.ttypes.QueryNotFoundException as error: msg_type = TMessageType.REPLY result.error = error except beeswaxd.ttypes.BeeswaxException as error2: msg_type = TMessageType.REPLY result.error2 = error2 except TApplicationException as ex: logging.exception('TApplication exception in handler') msg_type = TMessageType.EXCEPTION result = ex except Exception: logging.exception('Unexpected exception in handler') msg_type = TMessageType.EXCEPTION result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') oprot.writeMessageBegin("GetExecSummary", msg_type, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() # HELPER FUNCTIONS AND STRUCTURES class Cancel_args(object): """ Attributes: - query_id """ def __init__(self, query_id=None,): self.query_id = query_id def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.query_id = beeswaxd.ttypes.QueryHandle() self.query_id.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Cancel_args') if self.query_id is not None: oprot.writeFieldBegin('query_id', TType.STRUCT, 1) self.query_id.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Cancel_args) Cancel_args.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'query_id', [beeswaxd.ttypes.QueryHandle, None], None, ), # 1 ) class Cancel_result(object): """ Attributes: - success - error """ def __init__(self, success=None, error=None,): self.success = success self.error = error def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = Status.ttypes.TStatus() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.error = beeswaxd.ttypes.BeeswaxException() self.error.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Cancel_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.error is not None: oprot.writeFieldBegin('error', TType.STRUCT, 1) self.error.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Cancel_result) Cancel_result.thrift_spec = ( (0, TType.STRUCT, 'success', [Status.ttypes.TStatus, None], None, ), # 0 (1, TType.STRUCT, 'error', [beeswaxd.ttypes.BeeswaxException, None], None, ), # 1 ) class ResetCatalog_args(object): def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('ResetCatalog_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(ResetCatalog_args) ResetCatalog_args.thrift_spec = ( ) class ResetCatalog_result(object): """ Attributes: - success """ def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = Status.ttypes.TStatus() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('ResetCatalog_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(ResetCatalog_result) ResetCatalog_result.thrift_spec = ( (0, TType.STRUCT, 'success', [Status.ttypes.TStatus, None], None, ), # 0 ) class ResetTable_args(object): """ Attributes: - request """ def __init__(self, request=None,): self.request = request def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.request = TResetTableReq() self.request.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('ResetTable_args') if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 1) self.request.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(ResetTable_args) ResetTable_args.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'request', [TResetTableReq, None], None, ), # 1 ) class ResetTable_result(object): """ Attributes: - success """ def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = Status.ttypes.TStatus() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('ResetTable_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(ResetTable_result) ResetTable_result.thrift_spec = ( (0, TType.STRUCT, 'success', [Status.ttypes.TStatus, None], None, ), # 0 ) class GetRuntimeProfile_args(object): """ Attributes: - query_id """ def __init__(self, query_id=None,): self.query_id = query_id def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.query_id = beeswaxd.ttypes.QueryHandle() self.query_id.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('GetRuntimeProfile_args') if self.query_id is not None: oprot.writeFieldBegin('query_id', TType.STRUCT, 1) self.query_id.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(GetRuntimeProfile_args) GetRuntimeProfile_args.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'query_id', [beeswaxd.ttypes.QueryHandle, None], None, ), # 1 ) class GetRuntimeProfile_result(object): """ Attributes: - success - error """ def __init__(self, success=None, error=None,): self.success = success self.error = error def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRING: self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.error = beeswaxd.ttypes.BeeswaxException() self.error.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('GetRuntimeProfile_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRING, 0) oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success) oprot.writeFieldEnd() if self.error is not None: oprot.writeFieldBegin('error', TType.STRUCT, 1) self.error.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(GetRuntimeProfile_result) GetRuntimeProfile_result.thrift_spec = ( (0, TType.STRING, 'success', 'UTF8', None, ), # 0 (1, TType.STRUCT, 'error', [beeswaxd.ttypes.BeeswaxException, None], None, ), # 1 ) class CloseInsert_args(object): """ Attributes: - handle """ def __init__(self, handle=None,): self.handle = handle def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.handle = beeswaxd.ttypes.QueryHandle() self.handle.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('CloseInsert_args') if self.handle is not None: oprot.writeFieldBegin('handle', TType.STRUCT, 1) self.handle.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(CloseInsert_args) CloseInsert_args.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'handle', [beeswaxd.ttypes.QueryHandle, None], None, ), # 1 ) class CloseInsert_result(object): """ Attributes: - success - error - error2 """ def __init__(self, success=None, error=None, error2=None,): self.success = success self.error = error self.error2 = error2 def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TInsertResult() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.error = beeswaxd.ttypes.QueryNotFoundException() self.error.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.error2 = beeswaxd.ttypes.BeeswaxException() self.error2.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('CloseInsert_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.error is not None: oprot.writeFieldBegin('error', TType.STRUCT, 1) self.error.write(oprot) oprot.writeFieldEnd() if self.error2 is not None: oprot.writeFieldBegin('error2', TType.STRUCT, 2) self.error2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(CloseInsert_result) CloseInsert_result.thrift_spec = ( (0, TType.STRUCT, 'success', [TInsertResult, None], None, ), # 0 (1, TType.STRUCT, 'error', [beeswaxd.ttypes.QueryNotFoundException, None], None, ), # 1 (2, TType.STRUCT, 'error2', [beeswaxd.ttypes.BeeswaxException, None], None, ), # 2 ) class PingImpalaService_args(object): def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('PingImpalaService_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(PingImpalaService_args) PingImpalaService_args.thrift_spec = ( ) class PingImpalaService_result(object): """ Attributes: - success """ def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TPingImpalaServiceResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('PingImpalaService_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(PingImpalaService_result) PingImpalaService_result.thrift_spec = ( (0, TType.STRUCT, 'success', [TPingImpalaServiceResp, None], None, ), # 0 ) class GetExecSummary_args(object): """ Attributes: - handle """ def __init__(self, handle=None,): self.handle = handle def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.handle = beeswaxd.ttypes.QueryHandle() self.handle.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('GetExecSummary_args') if self.handle is not None: oprot.writeFieldBegin('handle', TType.STRUCT, 1) self.handle.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(GetExecSummary_args) GetExecSummary_args.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'handle', [beeswaxd.ttypes.QueryHandle, None], None, ), # 1 ) class GetExecSummary_result(object): """ Attributes: - success - error - error2 """ def __init__(self, success=None, error=None, error2=None,): self.success = success self.error = error self.error2 = error2 def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = ExecStats.ttypes.TExecSummary() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.error = beeswaxd.ttypes.QueryNotFoundException() self.error.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.error2 = beeswaxd.ttypes.BeeswaxException() self.error2.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('GetExecSummary_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.error is not None: oprot.writeFieldBegin('error', TType.STRUCT, 1) self.error.write(oprot) oprot.writeFieldEnd() if self.error2 is not None: oprot.writeFieldBegin('error2', TType.STRUCT, 2) self.error2.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(GetExecSummary_result) GetExecSummary_result.thrift_spec = ( (0, TType.STRUCT, 'success', [ExecStats.ttypes.TExecSummary, None], None, ), # 0 (1, TType.STRUCT, 'error', [beeswaxd.ttypes.QueryNotFoundException, None], None, ), # 1 (2, TType.STRUCT, 'error2', [beeswaxd.ttypes.BeeswaxException, None], None, ), # 2 ) fix_spec(all_structs) del all_structs
py
1a39fae54697b0ea622631a47c55625441b90e2e
# global variables NORMAL = 0 FAST = 1 # global imports from dataclasses import dataclass, field from random import shuffle # local imports from .point import Point from .rooms import getRooms, getRandomRoom from .floor import Floor @dataclass(frozen = True, order = False) class Plotter: width: int = field() height: int = field() mode: int = field(default = NORMAL) def makePlan(self, mode, floors, maxFloors = 1): '''Uses the mode to construct floors, storing them in the floors input, then returns a tuple of how many basement floors were created and how many non-basement floors were created.''' # Get the number of floors to make # TODO: Allow multiple floors to be made floors.append(Floor(self.width, self.height, level = 0, mode = self.mode)) # Setup the room list and insertion points for the floor insertion_points = [Point(0, 0)] rooms = getRooms(mode) # Put rooms into the floor randomly # TODO: Allow multiple floors to be made cur_floor = floors[0] while len(insertion_points) > 0: #print(insertion_points) # Get room if we have inserted all the required ones if len(rooms) == 0: rooms.append(getRandomRoom()) # Insert room and check if it was inserted if cur_floor.insert(insertion_points[0], rooms[0]): insertion_points.append(insertion_points[0] + Point(rooms[0].width, 0)) insertion_points.append(insertion_points[0] + Point(0, rooms[0].height)) del rooms[0] del insertion_points[0] shuffle(insertion_points) # return the number of floors on the basement and upper levels # TODO: Allow multiple floors to be made return (0, 1)
py
1a39fda421ccfd02b1ced1c4f21af1e399acad80
from .gui import main main()
py
1a39feba06dde173619443f29774e916786e479e
import itertools import sys import requests from commands import * def loadconfig(filename): with open(filename) as file: content = file.read() config = json.loads(content) return config option_has_value = { 'u': True, 'p': True, 'c': True, 'i': False } def getoptionvalue(opt, param): if len(param) != 0 and option_has_value.get(opt): return param.pop(0) def getoptions(param): if len(param) == 0: return {} opt = param.pop(0) if opt[0] != '-': return getoptions(param) rs = {} idx = 0 for idx in range(1, len(opt) - 1): if idx == 1 and opt[idx] == '-': break rs.setdefault(opt[idx]) idx = idx + 1 opt = opt[idx:] rs.setdefault(opt, getoptionvalue(opt, param)) rrs = getoptions(param) return {**rs, **rrs} def authenticate(url, sess, opts): username = opts.get('u') or opts.get('username') password = opts.get('p') or opts.get('password') if username is not None and password is not None: params = {'username': username, 'password': password} valid = check_credentials_validity(url, sess, params) if valid: sess.auth = username, password return else: print('You\'re not authenticated') sess.auth = None, None def sessionconfig(sess, config, opts): cert = config['cert'] sess.verify = cert authenticate(opts['url'], sess, opts) def commandshelp(): print('Commands available:') print(' ', 'register username=<username> password=<password> name=<user\'s name>') print(' ', 'Register the user <user\'s name> with username <username> and password <password>') print(' ', 'login username=<username> password=<password>') print(' ', 'Login with username <username> and password <password>') print(' ', 'ls') print(' ', 'List files') print(' ', 'create name=<server_file_name>') print(' ', 'Create a remote file with name <server_file_name>') print(' ', 'change id=<file_id> [content=<new_file_content>] [name=<new_file_name>]') print(' ', 'Change content and/or name of the remote file <file_id>') print(' ', 'download id=<file_id> [file=<file_name>] [location=<dir_path>]') print(' ', 'Download remote file <file_id> and save as <file_name> in the directory <dir_path>') print(' ', 'Default <dir_path> is application folder downloads') print(' ', 'Default <file_name> is the remote file name') print(' ', 'delete id=<file_id>') print(' ', 'Delete remote file <file_id>') print(' ', 'upload name=<server_file_name> file=<file_name>') print(' ', 'Upload local file <file_name> as <server_file_name>') print(' ', 'check id=<file_id>') print(' ', 'Check user permissions over the file <file_id>') print(' ', 'manage fileId=<file_id> userId=<user_id> read=[True|False] write=[True|False]') print(' ', 'Change the user <user_id> permissions over the file <file_id>') print(' ', 'The parameters "read" and "write" are the permissions to be set') print(' ', 'users') print(' ', 'List all the users') print(' ', 'read id=<file_id> [numbered=[TRUE|FALSE]]') print(' ', 'Show file <file_id> content') print(' ', 'Parameter "numbered" sets on/off the number lines') print(' ', 'write id=<file_id> content=<content_to_insert> after=<line_number>') print(' ', 'Writes <content> in the file <file_id> after line <line_number>') print(' ', 'append id=<file_id> content=<content_to_append>') print(' ', 'Appends <content> in end of the file <file_id>') print(' ', 'replace id=<file_id> line=<line_number> content=<new_line_content>') print(' ', 'Replaces the line <line_number>\'s content by <content> in the file <file_id>') print(' ', 'erase id=<file_id> line=<line_number>') print(' ', 'Erases the line <line_number> in the file <file_id>') def help(*params): print('Usage:') print(' ', 'python main.py [-c <command>] [-i] [-u <username>] [-p <password>]') print('Options:') print(' ', '-c <command>') print(' ', 'Executes the command <command>') print(' ', '-i') print(' ', 'Enters in interactive mode to execute commands') print(' ', '-u, --username <username>') print(' ', 'Configs the username <username> to be used to login') print(' ', 'Should be used together with --password option') print(' ', '-p, --password <password>') print(' ', 'Configs the password <password> to be used to login') print(' ', 'Should be used together with --username option') commandshelp() cmdprocessors = { 'help': help, 'ls': ls, 'upload': upload, 'download': download, 'register': register_user, 'create': create, 'change': change, 'delete': delete, 'login': authenticate, 'check': check_permissions, 'manage': manage_permissions, 'users': list_users, 'read': read, 'write': write, 'append': append, 'replace': replace, 'erase': erase } def process(url, sess, cmd, flags): processor = cmdprocessors.get(cmd) if processor is None: print('Invalid Command') return try: processor(url, sess, flags) except requests.exceptions.ConnectionError as ce: print(ce, file=sys.stderr) def splitter(string): sep = ' ' initIdx = idx = 0 insideQuote = False parts = [] for idx in range(len(string)): if string[idx] == '"': insideQuote = not insideQuote continue if not insideQuote and string[idx] == sep: if initIdx != idx: parts.append(string[initIdx: idx]) initIdx = idx + 1 idx = idx + 1 if initIdx != idx: parts.append(string[initIdx: idx]) return [*map(lambda e: e.replace('"', ''), parts)] def parsecmd(strcmd): cmd, *listparams = splitter(strcmd) listparams = list(itertools.filterfalse(lambda p: '=' not in p, listparams)) params = {k: v for k, v in (p.split('=', 1) for p in listparams)} return cmd, params def process_cmd(val, url, session): if val: cmd, params = parsecmd(val) process(url, session, cmd, params) else: print('Argument expected for the -c option ') def interactive(url, session): while True: i = input('>>') cmd, flags = parsecmd(i) if cmd == 'exit': break if not cmd: continue process(url, session, cmd, flags) def main(args): config = loadconfig('config.json') url = "%s:%d" % (config['address'], config['port']) with requests.Session() as session: options = getoptions(args[1:]) options['url'] = url sessionconfig(session, config, options) execcmd = 'c' in options execinter = 'i' in options if execcmd: process_cmd(options['c'], url, session) if execinter or not execcmd: interactive(url, session) if __name__ == '__main__': main(sys.argv)
py
1a39feddf739a5a164666f807e1e29488987af4b
import re import json from ..base_request import BaseRequest from .device import Device from ..settings import Settings from .. import exceptions from .service import Service from .service_install import ServiceInstall def _is_valid_env_var_name(env_var_name): return re.match('^[a-zA-Z_]+[a-zA-Z0-9_]*$', env_var_name) class EnvironmentVariable(object): """ This class is a wrapper for environment variable models. """ def __init__(self): self.application = ApplicationEnvVariable() self.service_environment_variable = ServiceEnvVariable() self.device = DeviceEnvVariable() self.device_service_environment_variable = DeviceServiceEnvVariable() class DeviceEnvVariable(object): """ This class implements device environment variable model for balena python SDK. """ def __init__(self): self.base_request = BaseRequest() self.device = Device() self.settings = Settings() def _fix_device_env_var_name_key(self, env_var): """ Internal method to workaround the fact that applications environment variables contain a `name` property while device environment variables contain an `env_var_name` property instead. """ if 'env_var_name' in env_var: env_var['name'] = env_var['env_var_name'] env_var.pop('env_var_name', None) return env_var def get_all(self, uuid): """ Get all device environment variables. Args: uuid (str): device uuid. Returns: list: device environment variables. Examples: >>> balena.models.environment_variables.device.get_all('8deb12a58e3b6d3920db1c2b6303d1ff32f23d5ab99781ce1dde6876e8d143') [{u'device': {u'__deferred': {u'uri': u'/ewa/device(122950)'}, u'__id': 122950}, u'__metadata': {u'type': u'', u'uri': u'/ewa/device_environment_variable(2173)'}, u'id': 2173, u'value': u'1322944771964103', u'env_var_name': u'BALENA_DEVICE_RESTART'}] """ device = self.device.get(uuid) params = { 'filter': 'device', 'eq': device['id'] } return self.base_request.request( 'device_environment_variable', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] def create(self, uuid, env_var_name, value): """ Create a device environment variable. Args: uuid (str): device uuid. env_var_name (str): environment variable name. value (str): environment variable value. Returns: dict: new device environment variable info. Examples: >>> balena.models.environment_variables.device.create('8deb12a58e3b6d3920db1c2b6303d1ff32f23d5ab99781ce1dde6876e8d143','test_env4', 'testing1') {'name': u'test_env4', u'__metadata': {u'type': u'', u'uri': u'/balena/device_environment_variable(42166)'}, u'value': u'testing1', u'device': {u'__deferred': {u'uri': u'/balena/device(115792)'}, u'__id': 115792}, u'id': 42166} """ if not _is_valid_env_var_name(env_var_name): raise exceptions.InvalidParameter('env_var_name', env_var_name) device = self.device.get(uuid) data = { 'device': device['id'], 'env_var_name': env_var_name, 'value': value } new_env_var = json.loads(self.base_request.request( 'device_environment_variable', 'POST', data=data, endpoint=self.settings.get('pine_endpoint') ).decode('utf-8')) return self._fix_device_env_var_name_key(new_env_var) def update(self, var_id, value): """ Update a device environment variable. Args: var_id (str): environment variable id. value (str): new environment variable value. Examples: >>> balena.models.environment_variables.device.update(2184, 'new value') 'OK' """ params = { 'filter': 'id', 'eq': var_id } data = { 'value': value } return self.base_request.request( 'device_environment_variable', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) def remove(self, var_id): """ Remove a device environment variable. Args: var_id (str): environment variable id. Examples: >>> balena.models.environment_variables.device.remove(2184) 'OK' """ params = { 'filter': 'id', 'eq': var_id } return self.base_request.request( 'device_environment_variable', 'DELETE', params=params, endpoint=self.settings.get('pine_endpoint') ) def get_all_by_application(self, app_id): """ Get all device environment variables for an application. Args: app_id (str): application id. Returns: list: list of device environment variables. Examples: >>> balena.models.environment_variables.device.get_all_by_application('5780') [{'name': u'device1', u'__metadata': {u'type': u'', u'uri': u'/balena/device_environment_variable(40794)'}, u'value': u'test', u'device': {u'__deferred': {u'uri': u'/balena/device(115792)'}, u'__id': 115792}, u'id': 40794}, {'name': u'BALENA_DEVICE_RESTART', u'__metadata': {u'type': u'', u'uri': u'/balena/device_environment_variable(1524)'}, u'value': u'961506585823372', u'device': {u'__deferred': {u'uri': u'/balena/device(121794)'}, u'__id': 121794}, u'id': 1524}] """ params = { 'filter': 'device/belongs_to__application', 'eq': app_id } env_list = self.base_request.request( 'device_environment_variable', 'GET', params=params, endpoint=self.settings.get('pine_endpoint')) return list(map(self._fix_device_env_var_name_key, env_list['d'])) class DeviceServiceEnvVariable(object): """ This class implements device service variable model for balena python SDK. """ def __init__(self): self.base_request = BaseRequest() self.device = Device() self.settings = Settings() self.service = Service() self.service_install = ServiceInstall() def get_all(self, uuid): """ Get all device service environment variables belong to a device. Args: uuid (str): device uuid. Returns: list: device service environment variables. Examples: >>> balena.models.environment_variables.device_service_environment_variable.get_all('f5213eac0d63ac47721b037a7406d306') [{u'name': u'dev_proxy', u'created_at': u'2018-03-16T19:23:21.727Z', u'__metadata': {u'type': u'', u'uri': u'/balena/device_service_environment_variable(28888)'}, u'value': u'value', u'service_install': [{u'__metadata': {u'type': u'', u'uri': u'/balena/service_install(30788)'}, u'id': 30788, u'service': [{u'service_name': u'proxy', u'__metadata': {u'type': u'', u'uri': u'/balena/service(NaN)'}}]}], u'id': 28888}, {u'name': u'dev_data', u'created_at': u'2018-03-16T19:23:11.614Z', u'__metadata': {u'type': u'', u'uri': u'/balena/device_service_environment_variable(28887)'}, u'value': u'dev_data_value', u'service_install': [{u'__metadata': {u'type': u'', u'uri': u'/balena/service_install(30789)'}, u'id': 30789, u'service': [{u'service_name': u'data', u'__metadata': {u'type': u'', u'uri': u'/balena/service(NaN)'}}]}], u'id': 28887}, {u'name': u'dev_data1', u'created_at': u'2018-03-17T05:53:19.257Z', u'__metadata': {u'type': u'', u'uri': u'/balena/device_service_environment_variable(28964)'}, u'value': u'aaaa', u'service_install': [{u'__metadata': {u'type': u'', u'uri': u'/balena/service_install(30789)'}, u'id': 30789, u'service': [{u'service_name': u'data', u'__metadata': {u'type': u'', u'uri': u'/balena/service(NaN)'}}]}], u'id': 28964}] """ # TODO: pine client for python device = self.device.get(uuid) query = '$expand=service_install($select=id&$expand=service($select=service_name))&$filter=service_install/any(d:d/device%20eq%20{device_id})'.format(device_id=device['id']) return self.base_request.request( 'device_service_environment_variable', 'GET', raw_query=query, endpoint=self.settings.get('pine_endpoint') )['d'] def create(self, uuid, service_name, env_var_name, value): """ Create a device service environment variable. Args: uuid (str): device uuid. service_name (str): service name. env_var_name (str): device service environment variable name. value (str): device service environment variable value. Returns: dict: new device service environment variable info. Examples: >>> balena.models.environment_variables.device_service_environment_variable.create('f5213eac0d63ac47721b037a7406d306', 'data', 'dev_data_sdk', 'test1') {"id":28970,"created_at":"2018-03-17T10:13:14.184Z","service_install":{"__deferred":{"uri":"/balena/service_install(30789)"},"__id":30789},"value":"test1","name":"dev_data_sdk","__metadata":{"uri":"/balena/device_service_environment_variable(28970)","type":""}} """ if not _is_valid_env_var_name(env_var_name): raise exceptions.InvalidParameter('env_var_name', env_var_name) device = self.device.get(uuid) services = self.service.get_all_by_application(device['belongs_to__application']['__id']) service_id = [i['id'] for i in services if i['service_name'] == service_name] if service_id: service_installs = self.service_install.get_all_by_device(device['id']) service_install_id = [i['id'] for i in service_installs if i['installs__service']['__id'] == service_id[0]] data = { 'service_install': service_install_id[0], 'name': env_var_name, 'value': value } return json.loads(self.base_request.request( 'device_service_environment_variable', 'POST', data=data, endpoint=self.settings.get('pine_endpoint') ).decode('utf-8')) else: raise exceptions.ServiceNotFound(service_name) def update(self, var_id, value): """ Update a device service environment variable. Args: var_id (str): device environment variable id. value (str): new device environment variable value. Examples: >>> balena.models.environment_variables.device_service_environment_variable.update('28970', 'test1 new value') 'OK' """ params = { 'filter': 'id', 'eq': var_id } data = { 'value': value } return self.base_request.request( 'device_service_environment_variable', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) def remove(self, var_id): """ Remove a device service environment variable. Args: var_id (str): device service environment variable id. Examples: >>> balena.models.environment_variables.device_service_environment_variable.remove('28970') 'OK' """ params = { 'filter': 'id', 'eq': var_id } return self.base_request.request( 'device_service_environment_variable', 'DELETE', params=params, endpoint=self.settings.get('pine_endpoint') ) def get_all_by_application(self, app_id): """ Get all device service environment variables belong to an application. Args: app_id (int): application id. Returns: list: list of device service environment variables. Examples: >>> balena.models.environment_variables.device_service_environment_variable.get_all_by_application(1043050) [{'name': u'device1', u'__metadata': {u'type': u'', u'uri': u'/balena/device_environment_variable(40794)'}, u'value': u'test', u'device': {u'__deferred': {u'uri': u'/balena/device(115792)'}, u'__id': 115792}, u'id': 40794}, {'name': u'BALENA_DEVICE_RESTART', u'__metadata': {u'type': u'', u'uri': u'/balena/device_environment_variable(1524)'}, u'value': u'961506585823372', u'device': {u'__deferred': {u'uri': u'/balena/device(121794)'}, u'__id': 121794}, u'id': 1524}] """ raw_query = '$filter=service_install/any(si:si/device/any(d:d/belongs_to__application%20eq%20{0}))'.format(app_id) return self.base_request.request( 'device_service_environment_variable', 'GET', raw_query=raw_query, endpoint=self.settings.get('pine_endpoint') )['d'] class ApplicationEnvVariable(object): """ This class implements application environment variable model for balena python SDK. Attributes: SYSTEM_VARIABLE_RESERVED_NAMES (list): list of reserved system variable names. OTHER_RESERVED_NAMES_START (list): list of prefix for system variable. """ SYSTEM_VARIABLE_RESERVED_NAMES = ['BALENA', 'RESIN', 'USER'] OTHER_RESERVED_NAMES_START = ['BALENA_', 'RESIN_'] def __init__(self): self.base_request = BaseRequest() self.settings = Settings() def get_all(self, app_id): """ Get all environment variables by application. Args: app_id (str): application id. Returns: list: application environment variables. Examples: >>> balena.models.environment_variables.application.get_all(9020) [{u'application': {u'__deferred': {u'uri': u'/ewa/application(9020)'}, u'__id': 9020}, u'__metadata': {u'type': u'', u'uri': u'/ewa/environment_variable(5650)'}, u'id': 5650, u'value': u'7330634368117899', u'name': u'BALENA_RESTART'}] """ params = { 'filter': 'application', 'eq': app_id } return self.base_request.request( 'application_environment_variable', 'GET', params=params, endpoint=self.settings.get('pine_endpoint') )['d'] def create(self, app_id, env_var_name, value): """ Create an environment variable for application. Args: app_id (str): application id. env_var_name (str): environment variable name. value (str): environment variable value. Returns: dict: new application environment info. Examples: >>> balena.models.environment_variables.application.create('978062', 'test2', '123') {'id': 91138, 'application': {'__deferred': {'uri': '/balena/application(978062)'}, '__id': 978062}, 'name': 'test2', 'value': '123', '__metadata': {'uri': '/balena/environment_variable(91138)', 'type': ''}} """ if not _is_valid_env_var_name(env_var_name): raise exceptions.InvalidParameter('env_var_name', env_var_name) data = { 'name': env_var_name, 'value': value, 'application': app_id } return json.loads(self.base_request.request( 'application_environment_variable', 'POST', data=data, endpoint=self.settings.get('pine_endpoint') ).decode('utf-8')) def update(self, var_id, value): """ Update an environment variable value for application. Args: var_id (str): environment variable id. value (str): new environment variable value. Examples: >>> balena.models.environment_variables.application.update(5652, 'new value') 'OK' """ params = { 'filter': 'id', 'eq': var_id } data = { 'value': value } return self.base_request.request( 'application_environment_variable', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) def remove(self, var_id): """ Remove application environment variable. Args: var_id (str): environment variable id. Examples: >>> balena.models.environment_variables.application.remove(5652) 'OK' """ params = { 'filter': 'id', 'eq': var_id } return self.base_request.request( 'application_environment_variable', 'DELETE', params=params, endpoint=self.settings.get('pine_endpoint') ) def is_system_variable(self, variable): """ Check if a variable is system specific. Args: variable (str): environment variable name. Returns: bool: True if system variable, False otherwise. Examples: >>> balena.models.environment_variables.application.is_system_variable('BALENA_API_KEY') True >>> balena.models.environment_variables.application.is_system_variable('APPLICATION_API_KEY') False """ if variable in self.SYSTEM_VARIABLE_RESERVED_NAMES: return True return any( true for prefix in self.OTHER_RESERVED_NAMES_START if variable.startswith(prefix) ) class ServiceEnvVariable(object): """ This class implements service environment variable model for balena python SDK. """ def __init__(self): self.base_request = BaseRequest() self.settings = Settings() self.service = Service() def get_all_by_application(self, app_id): """ Get all service environment variables by application. Args: app_id (str): application id. Returns: list: service application environment variables. Examples: >>> balena.models.environment_variables.service_environment_variable.get_all_by_application('1005160') [{u'name': u'app_data', u'service': {u'__deferred': {u'uri': u'/balena/service(21667)'}, u'__id': 21667}, u'created_at': u'2018-03-16T19:21:21.087Z', u'__metadata': {u'type': u'', u'uri': u'/balena/service_environment_variable(12365)'}, u'value': u'app_data_value', u'id': 12365}, {u'name': u'app_data1', u'service': {u'__deferred': {u'uri': u'/balena/service(21667)'}, u'__id': 21667}, u'created_at': u'2018-03-16T19:21:49.662Z', u'__metadata': {u'type': u'', u'uri': u'/balena/service_environment_variable(12366)'}, u'value': u'app_data_value', u'id': 12366}, {u'name': u'app_front', u'service': {u'__deferred': {u'uri': u'/balena/service(21669)'}, u'__id': 21669}, u'created_at': u'2018-03-16T19:22:06.955Z', u'__metadata': {u'type': u'', u'uri': u'/balena/service_environment_variable(12367)'}, u'value': u'front_value', u'id': 12367}] """ # TODO: pine client for python raw_query = '$filter=service/any(s:s/application%20eq%20{app_id})'.format(app_id=app_id) return self.base_request.request( 'service_environment_variable', 'GET', raw_query=raw_query, endpoint=self.settings.get('pine_endpoint') )['d'] def create(self, app_id, service_name, env_var_name, value): """ Create a service environment variable for application. Args: app_id (str): application id. service_name(str): service name. env_var_name (str): environment variable name. value (str): environment variable value. Returns: str: new service environment variable info. Examples: >>> balena.models.environment_variables.service_environment_variable.create('1005160', 'proxy', 'app_proxy', 'test value') {"id":12444,"created_at":"2018-03-18T09:34:09.144Z","service":{"__deferred":{"uri":"/balena/service(21668)"},"__id":21668},"name":"app_proxy","value":"test value","__metadata":{"uri":"/balena/service_environment_variable(12444)","type":""}} """ if not _is_valid_env_var_name(env_var_name): raise exceptions.InvalidParameter('env_var_name', env_var_name) services = self.service.get_all_by_application(app_id) service_id = [i['id'] for i in services if i['service_name'] == service_name] data = { 'name': env_var_name, 'value': value, 'service': service_id } return json.loads(self.base_request.request( 'service_environment_variable', 'POST', data=data, endpoint=self.settings.get('pine_endpoint') ).decode('utf-8')) def update(self, var_id, value): """ Update a service environment variable value for application. Args: var_id (str): service environment variable id. value (str): new service environment variable value. Examples: >>> balena.models.environment_variables.service_environment_variable.update('12444', 'new test value') 'OK' """ params = { 'filter': 'id', 'eq': var_id } data = { 'value': value } return self.base_request.request( 'service_environment_variable', 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint') ) def remove(self, var_id): """ Remove service environment variable. Args: var_id (str): service environment variable id. Examples: >>> balena.models.environment_variables.service_environment_variable.remove('12444') 'OK' """ params = { 'filter': 'id', 'eq': var_id } return self.base_request.request( 'service_environment_variable', 'DELETE', params=params, endpoint=self.settings.get('pine_endpoint') )
py
1a3a00fbec16de609a5235e9e675dda55c60175a
from django.contrib.auth.models import AnonymousUser from core.models.group import get_user_group from core.models.project import Project from rest_framework import serializers class ProjectsField(serializers.Field): def to_representation(self, project_mgr): request_user = self.parent.request_user if isinstance(request_user, AnonymousUser): return None try: group = get_user_group(request_user.username) projects = project_mgr.filter(owner=group) # Modifications to how 'project' should be displayed here: return [p.uuid for p in projects] except Project.DoesNotExist: return None def to_internal_value(self, data, files, field_name, into): value = data.get(field_name) if value is None: return related_obj = self.parent.instance user = self.parent.request_user group = get_user_group(user.username) # Retrieve the New Project(s) if isinstance(value, list): project_id = value[0] else: project_id = value new_project = Project.objects.get(id=project_id, owner=group) related_obj.project = new_project related_obj.save() # Modifications to how 'project' should be displayed here: into[field_name] = project_id
py
1a3a0144d498abd8f68e30cd59021031caf80a40
"""Config flow for Came Eti Domo integration.""" import logging import voluptuous as vol from homeassistant import config_entries, core, exceptions from .const import DOMAIN, CONF_HOST, CONF_USERNAME, CONF_PASSWORD # pylint:disable=unused-import from eti_domo import Domo, ServerNotFound _LOGGER = logging.getLogger(__name__) # TODO adjust the data schema to the data that you need DATA_SCHEMA = vol.Schema({CONF_HOST: str, CONF_USERNAME: str, CONF_PASSWORD: str}) async def validate_input(hass: core.HomeAssistant, data): """Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """ # TODO validate the data can be used to set up a connection. # If your PyPI package is not built with async, pass your methods # to the executor: # await hass.async_add_executor_job( # your_validate_func, data["username"], data["password"] # ) # Create an object representing the eti/domo with the host ip hub = None try: hub = Domo(data["host"]) except ServerNotFound: raise CannotConnect # login to the server if not hub.login(data["username"], data['password']): raise InvalidAuth # save the Domo object containing the client id session hass.data[DOMAIN] = {} hass.data[DOMAIN]["hub"] = hub # search for the unique id of the server server_info = hub.list_request(Domo.available_commands['features']) serial = server_info['serial'] #_LOGGER.error("Server host %s", hub.host, exc_info=1) # Return info that you want to store in the config entry. return {"title": serial} class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Came Eti Domo.""" VERSION = 1 # TODO pick one of the available connection classes in homeassistant/config_entries.py #CONN_CLASS_UNKNOWN oppure CONN_CLASS_LOCAL_PUSH oppure CONN_CLASS_LOCAL_POLL CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN def __init__(self): """Initialize the Config flow.""" self.config = None async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: try: # save user_input into the config parameter self.config = { CONF_HOST: user_input[CONF_HOST], CONF_USERNAME: user_input[CONF_USERNAME], CONF_PASSWORD: user_input[CONF_PASSWORD] } # validate user input and login info = await validate_input(self.hass, user_input) # set unique id await self.async_set_unique_id(info['title']) self._abort_if_unique_id_configured() return self.async_create_entry(title=info['title'], data=user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" return self.async_show_form( step_id="user", data_schema=DATA_SCHEMA, errors=errors ) class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidAuth(exceptions.HomeAssistantError): """Error to indicate there is invalid auth."""
py
1a3a01d3322185997c48ce9f3da766c079b01fd8
from pytest_factoryboy import register from chemreg.compound.tests.factories import DefinedCompoundFactory from chemreg.substance.tests.factories import ( QCLevelsTypeFactory, RelationshipTypeFactory, SourceFactory, SubstanceFactory, SubstanceRelationshipFactory, SubstanceTypeFactory, SynonymFactory, SynonymQualityFactory, SynonymTypeFactory, ) register(DefinedCompoundFactory) register(QCLevelsTypeFactory) register(RelationshipTypeFactory) register(SourceFactory) register(SubstanceFactory) register(SubstanceRelationshipFactory) register(SubstanceTypeFactory) register(SynonymFactory) register(SynonymQualityFactory) register(SynonymTypeFactory)
py
1a3a04cdc12748151286dc5d6b3600c82e734c52
from __future__ import absolute_import import urlparse import boto3 class S3DirectoryGenerator(object): def __init__(self, s3_url): parsed_s3_url = urlparse.urlparse(s3_url) if parsed_s3_url.scheme != 's3': raise SyntaxError('Invalid S3 scheme') self.bucket_name = parsed_s3_url.netloc self.bucket_path = parsed_s3_url.path[1:] if parsed_s3_url.path.startswith('/') else parsed_s3_url.path bucket_path_split = self.bucket_path.split('/') try: client = boto3.client('s3') region = client.get_bucket_location(Bucket=self.bucket_name)['LocationConstraint'] except: region=None s3_connection = boto3.resource('s3', region_name=region) self.bucket = s3_connection.Bucket(self.bucket_name) if bucket_path_split[-1] == '': # directory listing self.strip_length = len(self.bucket_path) else: # prefix listing self.strip_length = len('/'.join(bucket_path_split[:-1])) def __iter__(self): return self.generator() def generator(self): for o in self.bucket.objects.filter(Prefix=self.bucket_path): key = o.key[self.strip_length:] # S3 doesn't really have a concept of dirs. The convention is '/' is path separator, we do the same path = key.split('/') if path[0] == '' and not self.bucket_path.endswith('/'): # we assume the S3 prefix is a directory that wasn't terminated with a '/' path.pop(0) yield (path, o.size)
py
1a3a04ce26fe17505174aab30b24e6085f3624cc
# qubit number=4 # total number=40 import cirq import qiskit from qiskit import IBMQ from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2 import numpy as np import networkx as nx def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f # NOTE: use multi_control_toffoli_gate ('noancilla' mode) # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[3]) # number=19 prog.cz(input_qubit[0],input_qubit[3]) # number=20 prog.h(input_qubit[3]) # number=21 prog.cx(input_qubit[0],input_qubit[3]) # number=23 prog.x(input_qubit[3]) # number=24 prog.cx(input_qubit[0],input_qubit[3]) # number=25 prog.cx(input_qubit[0],input_qubit[3]) # number=17 prog.rx(-0.48380526865282825,input_qubit[3]) # number=26 prog.h(input_qubit[1]) # number=2 prog.y(input_qubit[3]) # number=18 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 prog.y(input_qubit[3]) # number=12 prog.h(input_qubit[0]) # number=5 oracle = build_oracle(n-1, f) prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]]) prog.h(input_qubit[1]) # number=6 prog.h(input_qubit[2]) # number=7 prog.h(input_qubit[1]) # number=34 prog.cz(input_qubit[0],input_qubit[1]) # number=35 prog.h(input_qubit[1]) # number=36 prog.cx(input_qubit[0],input_qubit[1]) # number=31 prog.cx(input_qubit[0],input_qubit[1]) # number=37 prog.x(input_qubit[1]) # number=38 prog.cx(input_qubit[0],input_qubit[1]) # number=39 prog.cx(input_qubit[0],input_qubit[1]) # number=33 prog.cx(input_qubit[0],input_qubit[1]) # number=30 prog.h(input_qubit[3]) # number=8 prog.h(input_qubit[0]) # number=9 prog.y(input_qubit[2]) # number=10 prog.x(input_qubit[2]) # number=22 prog.y(input_qubit[2]) # number=11 prog.x(input_qubit[0]) # number=13 prog.x(input_qubit[0]) # number=14 # circuit end for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': a = "111" b = "0" f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b) prog = make_circuit(4,f) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True)) sample_shot =8000 info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_QC2762.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.__len__(),file=writefile) print(circuit1,file=writefile) writefile.close()
py
1a3a05673bf823d9f7e33dc08e1c4e62e2e43568
import json import random import sys from allennlp_reasoning_explainqa.common.constants import CORRECT_OPTION_TAG from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import ( F1MeasureCustomRetrievalEval, ) from allennlp_reasoning_explainqa.training.metrics.explanation_eval import ( ExplanationEval, ) # Sets random seed to a nothing-up-my-sleeve number so that we have # deterministic evaluation scores. random.seed(12345) # Sets random seed to a nothing-up-my-sleeve number so that we have # deterministic evaluation scores. random.seed(12345) def evaluate(prediction_filename, label_filename): chainid_to_label = json.load(open(label_filename, "r")) chain_count = len(chainid_to_label) predictions_lines = open(prediction_filename, "r").readlines() predictions = [json.loads(row) for row in predictions_lines] prediction_count = len(predictions) if chain_count != prediction_count: print( f"Label file {label_filename} has {chain_count} chains, but prediction file {prediction_filename} has {prediction_count} predictions. These must be equal." ) sys.exit(1) f1eval = F1MeasureCustomRetrievalEval(pos_label=1) explanation_eval = ExplanationEval() chain_ids_covered = [] cnt = 0 for row in predictions: assert "score" in row, "Prediction should contain field score" assert "chain_id" in row, "Prediction should contain field chain_id" score = row["score"] chain_id = row["chain_id"] qid = chain_id.strip().split("_")[0] print("qid,chain_id,score = ", qid, chain_id, score) gtlabel = chainid_to_label[chain_id] f1eval(int(gtlabel), score) explanation_eval(qid, CORRECT_OPTION_TAG, int(gtlabel), score) chain_ids_covered.append(chain_id) cnt += 1 assert len(chain_ids_covered) == len( chainid_to_label ), "Found {} chains but expected {} chains".format( len(chain_ids_covered), len(chainid_to_label) ) binclf_performance = f1eval.get_metric(reset=True) print("f1.get_metric() = ", binclf_performance) explanation_performance = explanation_eval.get_metric(reset=True) print("explanation_eval.get_metric() = ", explanation_performance) final_metrics = { "auc_roc": binclf_performance["auc_roc"], "explainP1": explanation_performance["explainP1"], "explainNDCG": explanation_performance["explainNDCG"], } print("=" * 32) print(": auc_roc = ", binclf_performance["auc_roc"]) print(": P1 = ", explanation_performance["explainP1"]) print(": explainNDCG = ", explanation_performance["explainNDCG"]) print("=" * 32) return final_metrics if __name__ == "__main__": prediction_filename = sys.argv[1] label_filename = sys.argv[2] metrics_filename = sys.argv[3] print( f"Evaluating prediction file {prediction_filename} with label file {label_filename}" ) metrics = evaluate(prediction_filename, label_filename) print(f"Writing final metrics to file: {metrics_filename}") json.dump(metrics, open(metrics_filename, "w"))
py
1a3a0567e4d47dc591998a824a7b54e8551c06f1
import matplotlib.pyplot as plt import pandas as pd from rich import pretty, print from rich.progress import BarColumn, Progress from sklearn.metrics import ( accuracy_score, auc, classification_report, f1_score, plot_confusion_matrix, roc_auc_score, roc_curve, ) from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import LabelBinarizer, LabelEncoder import utils def draw_roc(y_test, y_pred): lb = LabelBinarizer() lb.fit(y_test) lb.classes_.tolist() fpr = dict() tpr = dict() roc_auc = dict() by_test = lb.transform(y_test) by_pred = lb.transform(y_pred) for i in range(4): fpr[i], tpr[i], _ = roc_curve(by_test[:, i], by_pred[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) roc_auc = roc_auc_score(by_test, by_pred, average=None) plt.figure(figsize=(8, 5)) for i in range(4): plt.plot( fpr[i], tpr[i], label="%s ROC curve (area = %0.2f)" % (lb.classes_.tolist()[i], roc_auc[i]), ) plt.plot([0, 1], [0, 1], "k--") plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.title("Single Hidden Layer Neural Network Roc-Curve") plt.xlabel("False Positive Rate", fontsize=10) plt.ylabel("True Positive Rate", fontsize=10) plt.tick_params(axis="both", which="major", labelsize=12) plt.legend(loc="lower right", fontsize=7, frameon=False) plt.show() def draw_confusion_matrix(Clf, X, y): titles_options = [ ("Confusion matrix, without normalization", None), ("Neural network confusion matrix", "true"), ] # colors: Wistia too yellow for title, normalize in titles_options: disp = plot_confusion_matrix(Clf, X, y, cmap="PuBuGn", normalize=normalize) disp.ax_.set_title(title) plt.show() def execute_and_report(learn_rate, acti, current_params): clf = MLPClassifier( activation=acti, learning_rate_init=learn_rate, random_state=5213890, hidden_layer_sizes=current_params, ) clf.fit(train_x, train_y) # Apply on the training set print("Training set:") y_pred = clf.predict(train_x) print(classification_report(train_y, y_pred)) # Apply on the test set and evaluate the performance y_pred = clf.predict(test_x) print("Test set:") print(classification_report(test_y, y_pred)) acc = accuracy_score(test_y, y_pred) * 100 f1 = f1_score(test_y, y_pred, average="weighted") * 100 # draw draw draw_confusion_matrix(clf, test_x, test_y) draw_roc(test_y, y_pred) # plt.plot(clf.loss_curve_) # plt.show() # report return { "Params": f"{acti}, {learn_rate}, {current_params}", "accuracy %": round(acc, 2), "F1 weighted %": round(f1, 2), } pretty.install() pd.set_option("display.max_rows", None) # DATASET train_x, train_y, test_x, test_y = utils.load_tracks_xyz( buckets="discrete", extractclass=("album", "type"), splits=2 ).values() # feature to reshape label_encoders = dict() column2encode = [ ("track", "language_code"), ("album", "listens"), ("track", "license"), ("album", "comments"), ("album", "date_created"), ("album", "favorites"), ("artist", "comments"), ("artist", "date_created"), ("artist", "favorites"), ("track", "comments"), ("track", "date_created"), ("track", "duration"), ("track", "favorites"), ("track", "interest"), ("track", "listens"), ] for col in column2encode: le = LabelEncoder() le.fit(test_x[col]) train_x[col] = le.fit_transform(train_x[col]) test_x[col] = le.fit_transform(test_x[col]) label_encoders[col] = le le = LabelEncoder() le.fit(train_y) test_y = le.fit_transform(test_y) train_y = le.fit_transform(train_y) class_name = ("album", "type") # Preparation count = 0 reports = pd.DataFrame(columns=["Params", "accuracy %", "F1 weighted %"]) params = [ { "activations": "identity", "learning_rate_inits": 0.001, "hidden_layer_sizes": (40, 40), }, { "activations": "identity", "learning_rate_inits": 0.001, "hidden_layer_sizes": (40, 20, 8), # old single layer "learning_rate_inits": 0.02, # old single layer "hidden_layer_sizes": (40,), }, ] testing_params = [params[-1]] activations = ["identity", "logistic", "tanh", "relu"] learning_rate_inits = [0.01, 0.001, 0.02] # progress reporting init progress = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", "{task.completed} of {task.total}", ) with progress: # adjust len if needed task_layers = progress.add_task("[red]Building…", total=len(params) * 2) for best_params in params: learn_rate = best_params["learning_rate_inits"] acti = best_params["activations"] hidd = best_params["hidden_layer_sizes"] row = execute_and_report(learn_rate, acti, hidd) reports = reports.append(row, ignore_index=True) count += 1 progress.advance(task_layers) # ------- switch up datasets: put in the 10-feature dataframe train_x, train_y, test_x, test_y = utils.load_tracks_xyz( buckets="discrete", extractclass=("album", "type"), splits=2, small=True ).values() # feature to reshape label_encoders = dict() column2encode = [ ("track", "duration"), ("track", "interest"), ("track", "listens"), ] for col in column2encode: le = LabelEncoder() le.fit(test_x[col]) train_x[col] = le.fit_transform(train_x[col]) test_x[col] = le.fit_transform(test_x[col]) label_encoders[col] = le le = LabelEncoder() le.fit(train_y) test_y = le.fit_transform(test_y) train_y = le.fit_transform(train_y) class_name = ("album", "type") # rerun neural networks for best_params in params: learn_rate = best_params["learning_rate_inits"] acti = best_params["activations"] hidd = best_params["hidden_layer_sizes"] row = execute_and_report(learn_rate, acti, hidd) reports = reports.append(row, ignore_index=True) count += 1 progress.advance(task_layers) # end switching up datasets ------- # results print(reports.sort_values(by=["accuracy %", "F1 weighted %"], ascending=False)) print(f"I have built {count} neural networks")
py
1a3a06e77f806129d4103ad520258ddb7da0dc3f
import secrets; from app import app; from .rvp import pvr; from .algo import final; from flask import render_template, request, redirect, flash @app.route("/", methods=["GET","POST"]) def index(): secret_key=secrets.token_hex(16) app.config["SECRET_KEY"]=secret_key if(request.method=="POST"): req=request.form percentile=req["percentile"] rank=req["rank"] state=req["state"] pwd=req["pwd"] gender=req["gender"] category=req["category"] sortby=str(req["sortby"]) if(percentile=="" and rank==""): flash("Please enter either your Rank or your Percentile",'error') return redirect(request.url) if(rank==""): ranks=pvr(float(percentile),pwd,category); ranks=int(ranks); if(ranks<=0): ranks=2; result=final(ranks,float(percentile),category,state,gender,pwd,sortby); if(rank): result=final(int(rank),percentile,category,state,gender,pwd,sortby); ranks=rank; return render_template("public/result.html",ranks=ranks,category=category,tables=[result.to_html(classes='data')], titles=result.columns.values) return render_template("public/index.html")
py
1a3a0858c77be5b83504b7273113941a5b485606
from __future__ import print_function from builtins import object from pyethapp.eth_protocol import ETHProtocol, TransientBlockBody from devp2p.service import WiredService from devp2p.protocol import BaseProtocol from devp2p.app import BaseApp from ethereum.tools import tester import rlp class PeerMock(object): packets = [] config = dict() def send_packet(self, packet): self.packets.append(packet) def setup(): peer = PeerMock() proto = ETHProtocol(peer, WiredService(BaseApp())) proto.service.app.config['eth'] = dict(network_id=1337) chain = tester.Chain() cb_data = [] def cb(proto, **data): cb_data.append((proto, data)) return peer, proto, chain, cb_data, cb def test_basics(): peer, proto, chain, cb_data, cb = setup() assert isinstance(proto, BaseProtocol) d = dict() d[proto] = 1 assert proto in d assert d[proto] == 1 assert not proto proto.start() assert proto def test_status(): peer, proto, chain, cb_data, cb = setup() genesis = head = chain.chain.get_descendants(chain.chain.get_block_by_number(0))[-1] # test status proto.send_status( chain_difficulty=chain.chain.get_score(head), chain_head_hash=head.hash, genesis_hash=genesis.hash ) packet = peer.packets.pop() proto.receive_status_callbacks.append(cb) proto._receive_status(packet) _p, _d = cb_data.pop() assert _p == proto assert isinstance(_d, dict) assert _d['chain_difficulty'] == chain.chain.get_score(head) print(_d) assert _d['chain_head_hash'] == head.hash assert _d['genesis_hash'] == genesis.hash assert 'eth_version' in _d assert 'network_id' in _d def test_blocks(): peer, proto, chain, cb_data, cb = setup() # test blocks chain.mine(number_of_blocks=2) assert chain.block.number == 3 # monkey patch to make "blocks" attribute available chain.blocks = chain.chain.get_descendants(chain.chain.get_block_by_number(0)) proto.send_blockbodies(*chain.blocks) packet = peer.packets.pop() assert len(rlp.decode(packet.payload)) == 3 def list_cb(proto, blocks): # different cb, as we expect a list of blocks cb_data.append((proto, blocks)) proto.receive_blockbodies_callbacks.append(list_cb) proto._receive_blockbodies(packet) _p, blocks = cb_data.pop() assert isinstance(blocks, tuple) for block in blocks: assert isinstance(block, TransientBlockBody) assert isinstance(block.transactions, tuple) assert isinstance(block.uncles, tuple) # assert that transactions and uncles have not been decoded assert len(block.transactions) == 0 assert len(block.uncles) == 0 # newblock approximate_difficulty = chain.blocks[-1].difficulty * 3 proto.send_newblock(block=chain.blocks[-1], chain_difficulty=approximate_difficulty) packet = peer.packets.pop() proto.receive_newblock_callbacks.append(cb) proto._receive_newblock(packet) _p, _d = cb_data.pop() assert 'block' in _d assert 'chain_difficulty' in _d assert _d['chain_difficulty'] == approximate_difficulty assert _d['block'].header == chain.blocks[-1].header assert isinstance(_d['block'].transactions, tuple) assert isinstance(_d['block'].uncles, tuple) # assert that transactions and uncles have not been decoded assert len(_d['block'].transactions) == 0 assert len(_d['block'].uncles) == 0
py
1a3a08fd0e1ac78775f68f08333c8b70ed10c7bb
"""Provide the Message class.""" from typing import TYPE_CHECKING, Any, Dict from ...const import API_PATH from .base import RedditBase from .mixins import FullnameMixin, InboxableMixin, ReplyableMixin from .redditor import Redditor from .subreddit import Subreddit if TYPE_CHECKING: # pragma: no cover from ... import Reddit class Message(InboxableMixin, ReplyableMixin, FullnameMixin, RedditBase): """A class for private messages. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ======================= ============================================================ Attribute Description ======================= ============================================================ ``author`` Provides an instance of :class:`.Redditor`. ``body`` The body of the message, as Markdown. ``body_html`` The body of the message, as HTML. ``created_utc`` Time the message was created, represented in `Unix Time`_. ``dest`` Provides an instance of :class:`.Redditor`. The recipient of the message. ``id`` The ID of the message. ``name`` The full ID of the message, prefixed with ``t4_``. ``subject`` The subject of the message. ``was_comment`` Whether or not the message was a comment reply. ======================= ============================================================ .. _Unix Time: https://en.wikipedia.org/wiki/Unix_time """ STR_FIELD = "id" @classmethod def parse(cls, data: Dict[str, Any], reddit: "Reddit"): """Return an instance of Message or SubredditMessage from ``data``. :param data: The structured data. :param reddit: An instance of :class:`.Reddit`. """ if data["author"]: data["author"] = Redditor(reddit, data["author"]) if data["dest"].startswith("#"): data["dest"] = Subreddit(reddit, data["dest"][1:]) else: data["dest"] = Redditor(reddit, data["dest"]) if data["replies"]: replies = data["replies"] data["replies"] = reddit._objector.objectify(replies["data"]["children"]) else: data["replies"] = [] if data["subreddit"]: data["subreddit"] = Subreddit(reddit, data["subreddit"]) return SubredditMessage(reddit, _data=data) return cls(reddit, _data=data) @property def _kind(self) -> str: """Return the class's kind.""" return self._reddit.config.kinds["message"] def __init__(self, reddit: "Reddit", _data: Dict[str, Any]): """Construct an instance of the Message object.""" super().__init__(reddit, _data=_data) self._fetched = True def delete(self): """Delete the message. .. note:: Reddit does not return an indication of whether or not the message was successfully deleted. For example, to delete the most recent message in your inbox: .. code-block:: python next(reddit.inbox.all()).delete() """ self._reddit.post(API_PATH["delete_message"], data={"id": self.fullname}) class SubredditMessage(Message): """A class for messages to a subreddit. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ======================= ============================================================ Attribute Description ======================= ============================================================ ``author`` Provides an instance of :class:`.Redditor`. ``body`` The body of the message, as Markdown. ``body_html`` The body of the message, as HTML. ``created_utc`` Time the message was created, represented in `Unix Time`_. ``dest`` Provides an instance of :class:`.Redditor`. The recipient of the message. ``id`` The ID of the message. ``name`` The full ID of the message, prefixed with ``t4_``. ``subject`` The subject of the message. ``subreddit`` If the message was sent from a subreddit, provides an instance of :class:`.Subreddit`. ``was_comment`` Whether or not the message was a comment reply. ======================= ============================================================ .. _Unix Time: https://en.wikipedia.org/wiki/Unix_time """ def mute(self): """Mute the sender of this SubredditMessage. For example, to mute the sender of the first SubredditMessage in the authenticated users' inbox: .. code-block:: python from praw.models import SubredditMessage msg = next( message for message in reddit.inbox.all() if isinstance(message, SubredditMessage) ) msg.mute() """ self._reddit.post(API_PATH["mute_sender"], data={"id": self.fullname}) def unmute(self): """Unmute the sender of this SubredditMessage. For example, to unmute the sender of the first SubredditMessage in the authenticated users' inbox: .. code-block:: python from praw.models import SubredditMessage msg = next( message for message in reddit.inbox.all() if isinstance(message, SubredditMessage) ) msg.unmute() """ self._reddit.post(API_PATH["unmute_sender"], data={"id": self.fullname})
py
1a3a0910ec1fde5878856e9d3e272b88e786da8f
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Cmor(AutotoolsPackage): """Climate Model Output Rewriter is used to produce CF-compliant netCDF files. The structure of the files created by the library and the metadata they contain fulfill the requirements of many of the climate community's standard model experiments.""" homepage = "http://cmor.llnl.gov" url = "https://github.com/PCMDI/cmor/archive/3.1.2.tar.gz" version('3.3.0', 'cfdeeddab1aedb823e26ec38723bd67e') version('3.2.0', 'b48105105d4261012c19cd65e89ff7a6') version('3.1.2', '72f7227159c901e4bcf80d2c73a8ce77') variant('fortran', default=True, description='Enable Fortran API') variant('python', default=False, description='Enable PYTHON support') depends_on('uuid') depends_on('netcdf') depends_on('udunits2') depends_on('hdf5@:1.8.19') extends('python', when='+python') depends_on('python@:2.8', when='+python') depends_on('py-numpy', type=('build', 'run'), when='+python') @run_before('configure') def validate(self): if '+fortran' in self.spec and not self.compiler.fc: msg = 'cannot build a fortran variant without a fortran compiler' raise RuntimeError(msg) def configure_args(self): extra_args = ['--disable-debug'] if '+fortran' in self.spec: extra_args.append('--enable-fortran') else: extra_args.append('--disable-fortran') return extra_args def install(self, spec, prefix): make('install') if '+python' in spec: setup_py('install', '--prefix=' + prefix)
py
1a3a09e87ab9d7312dbdf73408d35f225ae82c21
''' https://blog.csdn.net/xuzhexing/article/details/90729390 https://blog.csdn.net/weixin_44580210/article/details/90314878 粒子滤波定位可以比单纯地利用观测值更精确 步骤: 1.初始:用大量粒子模拟运动状态,这些粒子在整个运动空间内均匀分布 2.预测:根据状态转移方程(运动方程),将每一个粒子带入,得到预测粒子,这里应该包括粒子的速度角速度,以及xy值,进行高维的预测 3.校正:对预测粒子进行评价,这里用下一时刻的观测值(有噪声)与预测粒子的距离作评价 距离越短,则对应粒子的权重越大,可以用高斯方程计算距离与对应权重的关系 4.重采样:对所有粒子的权重归一化,并进行筛选,既要保留权重大的粒子,又要小部分权重 小的粒子,具体的方法: 1.多项式重采样 2.残差重采样 3.分层重采样 4.系统重采样 重采样带来的新问题是,权值越大的粒子子代越多,相反则子代越少甚至无子代。 这样重采样后的粒子群多样性减弱,从而不足以用来近似表征后验密度。克服这一 问题的方法有多种,最简单的就是直接增加足够多的粒子,但这常会导致运算量的 急剧膨胀。其它方法可以去查看有关文献,这里暂不做介绍。 5.更新:用重采样后生成的粒子更新原有的粒子,用这些粒子的位置均值代表粒子滤波的结果,重复步骤2 ''' import numpy as np import math import matplotlib.pyplot as plgt ''' 假设 速度的测量方差为0.5 角速度的测量方差为5度,0.087 传感器测量rf标志物的误差为0.5m, ''' def guassian_noise(sigma):#这里用标准差 y=np.random.randn()*sigma return y v_error=guassian_noise(0.5) w_error=guassian_noise(0.087) dist_error=guassian_noise(0.5) RANGE_DITECT=10#最大探测距离 NP=200 NTh = NP / 2.0 # Number of particle for re-sampling T_max=100.0 dt=0.1 L=2.5#车长 #用的是最简单的运动方程,根据此时的状态[x y yaw]',以及速度角速度[u,w],求出下一时刻的状态 def motion_model(x, u): # F = np.array([[1.0, 0, 0], # [0, 1.0, 0], # [0, 0, 1.0]])#第四行为0,表示输入的x的最后一个数字没有用,只是为了利于矩阵方程的求解,所以加入了第四列 # print(x.shape) B = np.array([[dt * math.cos(x[2, 0]), 0], [dt * math.sin(x[2, 0]), 0], [0.0, dt]]) x = x+ B@u return x def dead_reckoning(x_true,u): u[0]+=v_error#给速度和角速度加上高斯噪声 u[1]+=w_error return motion_model(x_true,u) def gauss_likelihood(x, sigma): p = 1.0 / math.sqrt(2.0 * math.pi * sigma ** 2) * \ math.exp(-x ** 2 / (2 * sigma ** 2)) return p #根据上一时刻的真实值,和这一时刻测量得到的速度角速度,得到下一时刻的真实值,以及rf目标的观测值,速度角速度的测量值(加了噪声) def observation(x_true, u, rf_id): x_true_real=motion_model(x_true,u)#根据匀速圆周运动模型获得这一时刻的真实值 z=np.zeros((0,3))# 0*3的数列,为了后面方便进行堆叠 for i in range(len(rf_id[:,0])): dx=x_true[0,0]-rf_id[i,0] dy=x_true[1,0]-rf_id[i,1] dist=math.hypot(dx,dy) if dist<RANGE_DITECT: dist+=dist_error#距离加上噪声,表示传感器测量的有误差 zi=np.array([[dist,rf_id[i,0],rf_id[i,1]]]) z=np.vstack(z,zi) ud=np.array([[0,0]]).T ud[0,0]=u[0,0]+v_error ud[1,0]=u[1,0]+w_error return x_true_real, z,ud def re_sampling(px,pw): N_eff = 1.0 / (pw.dot(pw.T))[0, 0] # Effective particle number,计算有效粒子数 1/(权值的平方和) if N_eff < NTh:#如果有效粒子数太少,则进行重采样 w_cum = np.cumsum(pw) #每个位置的权值是前面权值和[1,3,5]->[1,4,9],即为轮盘采样的方法 base = np.arange(0.0, 1.0, 1 / NP)#返回0.0-1.0步长为1/NP的数列 re_sample_id = base + np.random.uniform(0, 1 / NP)#加上噪声,形成均匀分布的随机采样值 indexes = [] ind = 0 for ip in range(NP): while re_sample_id[ip] > w_cum[ind]: ind += 1 indexes.append(ind) #存储重采样后的id px = px[:, indexes] pw = np.zeros((1, NP)) + 1.0 / NP # init weight return px,pw #输入 粒子群,权重,rf目标观测值,以及测量的速度角速度(有噪声) def pf_localization(px, pw, z, ud): # 预测:根据状态转移方程(运动方程),将每一个粒子带入,得到预测粒子,这里应该包括粒子的速度角速度,以及xy值,进行高维的预测 for i in range(NP): x_pf_tmp=px[:,i] #每一个粒子的状态 x y yaw w_tmp=pw[0,i]#每一个粒子的权重 x_pf_tmp=motion_model(x_pf_tmp,ud)#根据粒子的状态以及测量得到的速度角速度预测粒子的下一个位置 #根据粒子滤波预测得到的rf距离值和传感器测量的rf距离进行粒子权重的更新????这里的rf的距离真实值无法获取????????? for j in range(len(z[0,:])): dx=x_pf_tmp[0]-z[j,1] dy=x_pf_tmp[1]-z[j,2] pre_dist=math.hypot(dx,dy) dz=pre_dist-z[j,0] w_tmp*=gauss_likelihood(dz, math.sqrt(0.2*0.2))#用预测得到的距离与测量得到的距离的高斯值作为权重 px[:,i]=x_pf_tmp#更新粒子的状态 pw[0,i]=w_tmp#更新粒子权重 #权重归一化 pw=pw/pw.sum() px,pw=re_sampling(px,pw) # 重采样 [email protected] # 计算得到的粒子滤波的结果。这里感觉应该先进行重采样,与原文作者不一样 return px,pw,x_pf def main(): print(__file__ + " start!!") time = 0.0 # RF_ID positions [x, y],用来代替一些已知位置的点 rf_id = np.array([[10.0, 0.0], [10.0, 10.0], [0.0, 15.0], [-5.0, 20.0]]) # State Vector [x y yaw]' 第四列只是为了便于进行矩阵计算 x_pf = np.zeros((3, 1)) x_true = np.zeros((3, 1))#3*1 #粒子及其权重的初始化 px = np.zeros((3, NP)) # Particle store 粒子群,x y yaw各对应一群粒子 pw = np.zeros((1, NP)) + 1.0 / NP # Particle weight 粒子权重均匀分布 # x_dr = x_true # Dead reckoning # history h_true=np.array([[x_true[0,0],x_true[1,0]]]) # 保存真实值 x y # h_dead_reckoning=np.array([[x_true[0],x_true[1]]])#保存航迹推算的值 h_pf=np.array([[x_pf[0],x_pf[1]]])#保存粒子滤波的结果 v=1.0 # m/s yaw_rate=0.1 # rad/s while time<T_max: time+=dt u=np.array([[v, yaw_rate]]).T#获得转置矩阵 (2,1)矩阵 x_true_real, z,ud=observation(x_true, u, rf_id) # x_dr=dead_reckoning(x_true) #根据上一时刻的真实值和 px,pw,x_pf=pf_localization(px, pw, z, ud) h_true=np.vstack(h_true,np.array([[x_true[0],x_true[1]]])) #保存真实值 h_pf=np.vstack(h_pf,np.array([[x_pf[0],x_pf[1]]])) plt.cla() # for stopping simulation with the esc key. plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None]) plt.plot(h_true[:,0],h_true[:,1],'-g') plt.plot(h_pf[:,0],h_pf[:,1],'-b') plt.plot(rf_id[:,0],rf_id[:,1],'*r') for i in range(len(z[:,0])): plt.plot([x_true[0, 0], z[i, 1]], [x_true[1, 0], z[i, 2]], "-k") plt.axis("equal") plt.grid(True) plt.pause(0.001) if __name__ == '__main__': main()
py
1a3a0a215b1d4e51524d517b1f165dac0e11024a
# coding: utf-8 """ Cisco Intersight OpenAPI specification. The Cisco Intersight OpenAPI specification. OpenAPI spec version: 1.0.9-1461 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class HyperflexProxySettingPolicyApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def hyperflex_proxy_setting_policies_get(self, **kwargs): """ Read a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_get(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param bool count: The $count query option allows clients to request a count of the matching resources. :param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response :param int top: The max number of documents to return. :param int skip: The number of documents to skip. :param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London') :param str select: Specifies a subset of properties to return. :param str orderby: Determines what values are used to order a collection of documents. :param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames :param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory)) :param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd' :return: HyperflexProxySettingPolicyList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_get_with_http_info(**kwargs) else: (data) = self.hyperflex_proxy_setting_policies_get_with_http_info(**kwargs) return data def hyperflex_proxy_setting_policies_get_with_http_info(self, **kwargs): """ Read a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_get_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param bool count: The $count query option allows clients to request a count of the matching resources. :param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response :param int top: The max number of documents to return. :param int skip: The number of documents to skip. :param str filter: Filter criteria for documents to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London') :param str select: Specifies a subset of properties to return. :param str orderby: Determines what values are used to order a collection of documents. :param str expand: Specify additional attributes or related documents to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames :param str apply: Specify one or more transformation operations to perform aggregation on documents. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory)) :param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for documents to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd' :return: HyperflexProxySettingPolicyList If the method is called asynchronously, returns the request thread. """ all_params = ['count', 'inlinecount', 'top', 'skip', 'filter', 'select', 'orderby', 'expand', 'apply', 'at'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_get" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'count' in params: query_params.append(('$count', params['count'])) if 'inlinecount' in params: query_params.append(('$inlinecount', params['inlinecount'])) if 'top' in params: query_params.append(('$top', params['top'])) if 'skip' in params: query_params.append(('$skip', params['skip'])) if 'filter' in params: query_params.append(('$filter', params['filter'])) if 'select' in params: query_params.append(('$select', params['select'])) if 'orderby' in params: query_params.append(('$orderby', params['orderby'])) if 'expand' in params: query_params.append(('$expand', params['expand'])) if 'apply' in params: query_params.append(('$apply', params['apply'])) if 'at' in params: query_params.append(('at', params['at'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='HyperflexProxySettingPolicyList', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def hyperflex_proxy_setting_policies_moid_delete(self, moid, **kwargs): """ Delete a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_delete(moid, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_moid_delete_with_http_info(moid, **kwargs) else: (data) = self.hyperflex_proxy_setting_policies_moid_delete_with_http_info(moid, **kwargs) return data def hyperflex_proxy_setting_policies_moid_delete_with_http_info(self, moid, **kwargs): """ Delete a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_delete_with_http_info(moid, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['moid'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_moid_delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'moid' is set if ('moid' not in params) or (params['moid'] is None): raise ValueError("Missing the required parameter `moid` when calling `hyperflex_proxy_setting_policies_moid_delete`") collection_formats = {} path_params = {} if 'moid' in params: path_params['Moid'] = params['moid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies/{Moid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def hyperflex_proxy_setting_policies_moid_get(self, moid, **kwargs): """ Read a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_get(moid, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :return: HyperflexProxySettingPolicy If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_moid_get_with_http_info(moid, **kwargs) else: (data) = self.hyperflex_proxy_setting_policies_moid_get_with_http_info(moid, **kwargs) return data def hyperflex_proxy_setting_policies_moid_get_with_http_info(self, moid, **kwargs): """ Read a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_get_with_http_info(moid, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :return: HyperflexProxySettingPolicy If the method is called asynchronously, returns the request thread. """ all_params = ['moid'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_moid_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'moid' is set if ('moid' not in params) or (params['moid'] is None): raise ValueError("Missing the required parameter `moid` when calling `hyperflex_proxy_setting_policies_moid_get`") collection_formats = {} path_params = {} if 'moid' in params: path_params['Moid'] = params['moid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies/{Moid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='HyperflexProxySettingPolicy', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def hyperflex_proxy_setting_policies_moid_patch(self, moid, body, **kwargs): """ Update a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_patch(moid, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to update (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_moid_patch_with_http_info(moid, body, **kwargs) else: (data) = self.hyperflex_proxy_setting_policies_moid_patch_with_http_info(moid, body, **kwargs) return data def hyperflex_proxy_setting_policies_moid_patch_with_http_info(self, moid, body, **kwargs): """ Update a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_patch_with_http_info(moid, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to update (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['moid', 'body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_moid_patch" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'moid' is set if ('moid' not in params) or (params['moid'] is None): raise ValueError("Missing the required parameter `moid` when calling `hyperflex_proxy_setting_policies_moid_patch`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `hyperflex_proxy_setting_policies_moid_patch`") collection_formats = {} path_params = {} if 'moid' in params: path_params['Moid'] = params['moid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies/{Moid}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def hyperflex_proxy_setting_policies_moid_post(self, moid, body, **kwargs): """ Update a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_post(moid, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to update (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_moid_post_with_http_info(moid, body, **kwargs) else: (data) = self.hyperflex_proxy_setting_policies_moid_post_with_http_info(moid, body, **kwargs) return data def hyperflex_proxy_setting_policies_moid_post_with_http_info(self, moid, body, **kwargs): """ Update a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_moid_post_with_http_info(moid, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str moid: The Moid of the hyperflexProxySettingPolicy instance. (required) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to update (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['moid', 'body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_moid_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'moid' is set if ('moid' not in params) or (params['moid'] is None): raise ValueError("Missing the required parameter `moid` when calling `hyperflex_proxy_setting_policies_moid_post`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `hyperflex_proxy_setting_policies_moid_post`") collection_formats = {} path_params = {} if 'moid' in params: path_params['Moid'] = params['moid'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies/{Moid}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def hyperflex_proxy_setting_policies_post(self, body, **kwargs): """ Create a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_post(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to add (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.hyperflex_proxy_setting_policies_post_with_http_info(body, **kwargs) else: (data) = self.hyperflex_proxy_setting_policies_post_with_http_info(body, **kwargs) return data def hyperflex_proxy_setting_policies_post_with_http_info(self, body, **kwargs): """ Create a 'hyperflex.ProxySettingPolicy' resource. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.hyperflex_proxy_setting_policies_post_with_http_info(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param HyperflexProxySettingPolicy body: hyperflexProxySettingPolicy to add (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method hyperflex_proxy_setting_policies_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `hyperflex_proxy_setting_policies_post`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api('/hyperflex/ProxySettingPolicies', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
py
1a3a0a7e6bacaae60572ed55b6077aaf41b60180
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-11-20 16:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('configurations', '0002_auto_20171120_1016'), ] operations = [ migrations.AddField( model_name='appconfiguration', name='app_domain', field=models.CharField(choices=[('D3M_DOMAIN', 'D3M_DOMAIN'), ('DATAVERSE_DOMAIN', 'DATAVERSE_DOMAIN'), ('EVENTDATA_DOMAIN', 'EVENTDATA_DOMAIN')], default='D3M_DOMAIN', max_length=70, verbose_name='App domain'), preserve_default=False, ), ]
py
1a3a0ab77a6d24e39dd9fc3626410141969f1318
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for evaluation metrics and summary statistics. @@KernelLinearClassifier @@RandomFourierFeatureMapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.kernel_methods.python.kernel_estimators import KernelLinearClassifier from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper from tensorflow.contrib.kernel_methods.python.mappers.random_fourier_features import RandomFourierFeatureMapper from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
py
1a3a0acfecc32da6feeac242cd589a6b705c5126
#!/usr/bin/env python """Package: mininet Test creation and pings for topologies with link and/or CPU options.""" import unittest import sys from functools import partial from mininet.net import Mininet from mininet.node import OVSSwitch, UserSwitch, IVSSwitch from mininet.node import CPULimitedHost from mininet.link import TCLink from mininet.topo import Topo from mininet.log import setLogLevel from mininet.util import quietRun from mininet.clean import cleanup # Number of hosts for each test N = 2 class SingleSwitchOptionsTopo(Topo): "Single switch connected to n hosts." def __init__(self, n=2, hopts=None, lopts=None): if not hopts: hopts = {} if not lopts: lopts = {} Topo.__init__(self, hopts=hopts, lopts=lopts) switch = self.addSwitch('s1') for h in range(n): host = self.addHost('h%s' % (h + 1)) self.addLink(host, switch) # Tell pylint not to complain about calls to other class # pylint: disable=E1101 class testOptionsTopoCommon( object ): """Verify ability to create networks with host and link options (common code).""" switchClass = None # overridden in subclasses @staticmethod def tearDown(): "Clean up if necessary" if sys.exc_info != ( None, None, None ): cleanup() def runOptionsTopoTest( self, n, msg, hopts=None, lopts=None ): "Generic topology-with-options test runner." mn = Mininet( topo=SingleSwitchOptionsTopo( n=n, hopts=hopts, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) dropped = mn.run( mn.ping ) hoptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in hopts.items() ) loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg += ( '%s%% of pings were dropped during mininet.ping().\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'hopts = %s\n' 'lopts = %s\n' 'host = CPULimitedHost\n' 'link = TCLink\n' 'Switch = %s\n' % ( dropped, n, hoptsStr, loptsStr, self.switchClass ) ) self.assertEqual( dropped, 0, msg=msg ) def assertWithinTolerance( self, measured, expected, tolerance_frac, msg ): """Check that a given value is within a tolerance of expected tolerance_frac: less-than-1.0 value; 0.8 would yield 20% tolerance. """ upperBound = ( float( expected ) + ( 1 - tolerance_frac ) * float( expected ) ) lowerBound = float( expected ) * tolerance_frac info = ( 'measured value is out of bounds\n' 'expected value: %s\n' 'measured value: %s\n' 'failure tolerance: %s\n' 'upper bound: %s\n' 'lower bound: %s\n' % ( expected, measured, tolerance_frac, upperBound, lowerBound ) ) msg += info self.assertGreaterEqual( float( measured ), lowerBound, msg=msg ) self.assertLessEqual( float( measured ), upperBound, msg=msg ) def testCPULimits( self ): "Verify topology creation with CPU limits set for both schedulers." CPU_FRACTION = 0.1 CPU_TOLERANCE = 0.8 # CPU fraction below which test should fail hopts = { 'cpu': CPU_FRACTION } #self.runOptionsTopoTest( N, hopts=hopts ) mn = Mininet( SingleSwitchOptionsTopo( n=N, hopts=hopts ), host=CPULimitedHost, switch=self.switchClass, waitConnected=True ) mn.start() results = mn.runCpuLimitTest( cpu=CPU_FRACTION ) mn.stop() hostUsage = '\n'.join( 'h%s: %s' % ( n + 1, results[ (n - 1) * 5 : (n * 5) - 1 ] ) for n in range( N ) ) hoptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in hopts.items() ) msg = ( '\nTesting cpu limited to %d%% of cpu per host\n' 'cpu usage percent per host:\n%s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'hopts = %s\n' 'host = CPULimitedHost\n' 'Switch = %s\n' % ( CPU_FRACTION * 100, hostUsage, N, hoptsStr, self.switchClass ) ) for pct in results: #divide cpu by 100 to convert from percentage to fraction self.assertWithinTolerance( pct/100, CPU_FRACTION, CPU_TOLERANCE, msg ) def testLinkBandwidth( self ): "Verify that link bandwidths are accurate within a bound." if self.switchClass is UserSwitch: self.skipTest( 'UserSwitch has very poor performance -' ' skipping for now' ) BW = 5 # Mbps BW_TOLERANCE = 0.8 # BW fraction below which test should fail # Verify ability to create limited-link topo first; lopts = { 'bw': BW, 'use_htb': True } # Also verify correctness of limit limitng within a bound. mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), link=TCLink, switch=self.switchClass, waitConnected=True ) bw_strs = mn.run( mn.iperf, fmt='m' ) loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting link bandwidth limited to %d Mbps per link\n' 'iperf results[ client, server ]: %s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( BW, bw_strs, N, loptsStr, self.switchClass ) ) # On the client side, iperf doesn't wait for ACKs - it simply # reports how long it took to fill up the TCP send buffer. # As long as the kernel doesn't wait a long time before # delivering bytes to the iperf server, its reported data rate # should be close to the actual receive rate. serverRate, _clientRate = bw_strs bw = float( serverRate.split(' ')[0] ) self.assertWithinTolerance( bw, BW, BW_TOLERANCE, msg ) def testLinkDelay( self ): "Verify that link delays are accurate within a bound." DELAY_MS = 15 DELAY_TOLERANCE = 0.8 # Delay fraction below which test should fail REPS = 3 lopts = { 'delay': '%sms' % DELAY_MS, 'use_htb': True } mn = Mininet( SingleSwitchOptionsTopo( n=N, lopts=lopts ), link=TCLink, switch=self.switchClass, autoStaticArp=True, waitConnected=True ) mn.start() for _ in range( REPS ): ping_delays = mn.pingFull() mn.stop() test_outputs = ping_delays[0] # Ignore unused variables below # pylint: disable=W0612 node, dest, ping_outputs = test_outputs sent, received, rttmin, rttavg, rttmax, rttdev = ping_outputs pingFailMsg = 'sent %s pings, only received %s' % ( sent, received ) self.assertEqual( sent, received, msg=pingFailMsg ) # pylint: enable=W0612 loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting Link Delay of %s ms\n' 'ping results across 4 links:\n' '(Sent, Received, rttmin, rttavg, rttmax, rttdev)\n' '%s\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default' 'switch = %s\n' % ( DELAY_MS, ping_outputs, N, loptsStr, self.switchClass ) ) for rttval in [rttmin, rttavg, rttmax]: # Multiply delay by 4 to cover there & back on two links self.assertWithinTolerance( rttval, DELAY_MS * 4.0, DELAY_TOLERANCE, msg ) def testLinkLoss( self ): "Verify that we see packet drops with a high configured loss rate." LOSS_PERCENT = 99 REPS = 1 lopts = { 'loss': LOSS_PERCENT, 'use_htb': True } mn = Mininet( topo=SingleSwitchOptionsTopo( n=N, lopts=lopts ), host=CPULimitedHost, link=TCLink, switch=self.switchClass, waitConnected=True ) # Drops are probabilistic, but the chance of no dropped packets is # 1 in 100 million with 4 hops for a link w/99% loss. dropped_total = 0 mn.start() for _ in range(REPS): dropped_total += mn.ping(timeout='1') mn.stop() loptsStr = ', '.join( '%s: %s' % ( opt, value ) for opt, value in lopts.items() ) msg = ( '\nTesting packet loss with %d%% loss rate\n' 'number of dropped pings during mininet.ping(): %s\n' 'expected number of dropped packets: 1\n' 'Topo = SingleSwitchTopo, %s hosts\n' 'Link = TCLink\n' 'lopts = %s\n' 'host = default\n' 'switch = %s\n' % ( LOSS_PERCENT, dropped_total, N, loptsStr, self.switchClass ) ) self.assertGreater( dropped_total, 0, msg ) def testMostOptions( self ): "Verify topology creation with most link options and CPU limits." lopts = { 'bw': 10, 'delay': '5ms', 'use_htb': True } hopts = { 'cpu': 0.5 / N } msg = '\nTesting many cpu and link options\n' self.runOptionsTopoTest( N, msg, hopts=hopts, lopts=lopts ) # pylint: enable=E1101 class testOptionsTopoOVSKernel( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (OVS kernel switch).""" longMessage = True switchClass = OVSSwitch @unittest.skip( 'Skipping OVS user switch test for now' ) class testOptionsTopoOVSUser( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (OVS user switch).""" longMessage = True switchClass = partial( OVSSwitch, datapath='user' ) @unittest.skipUnless( quietRun( 'which ivs-ctl' ), 'IVS is not installed' ) class testOptionsTopoIVS( testOptionsTopoCommon, unittest.TestCase ): "Verify ability to create networks with host and link options (IVS)." longMessage = True switchClass = IVSSwitch @unittest.skipUnless( quietRun( 'which ofprotocol' ), 'Reference user switch is not installed' ) class testOptionsTopoUserspace( testOptionsTopoCommon, unittest.TestCase ): """Verify ability to create networks with host and link options (UserSwitch).""" longMessage = True switchClass = UserSwitch if __name__ == '__main__': setLogLevel( 'warning' ) unittest.main()
py
1a3a0ce5fc4c19519487f593a211e01a562f9d90
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, "name"): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError( "The '%s' attribute has no file associated with it." % self.field.name ) def _get_file(self): self._require_file() if getattr(self, "_file", None) is None: self._file = self.storage.open(self.name, "rb") return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode="rb"): self._require_file() if getattr(self, "_file", None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, "_file"): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, "_file", None) return file is None or file.closed def close(self): file = getattr(self, "_file", None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { "name": self.name, "closed": False, "_committed": True, "_file": None, "instance": self.instance, "field": self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__( self, verbose_name=None, name=None, upload_to="", storage=None, **kwargs ): self._primary_key_set_explicitly = "primary_key" in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % ( self.__class__.__qualname__, Storage.__module__, Storage.__qualname__, ) ) self.upload_to = upload_to kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id="fields.E201", ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith("/"): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id="fields.E202", hint="Remove the leading slash.", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs["upload_to"] = self.upload_to if self.storage is not default_storage: kwargs["storage"] = getattr(self, "_storage_callable", self.storage) return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for # database insertion. if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or "") def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FileField, "max_length": self.max_length, **kwargs, } ) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, "_dimensions_cache"): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__( self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs, ): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( "Cannot use ImageField because Pillow is not installed.", hint=( "Get Pillow at https://pypi.org/project/Pillow/ " 'or run command "python -m pip install Pillow".' ), obj=self, id="fields.E210", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs["width_field"] = self.width_field if self.height_field: kwargs["height_field"] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not ( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.ImageField, **kwargs, } )
py
1a3a0cf69ce6c43c4c2544d995e5627dddcc4605
import mock import unittest from mock import patch from pyramid.testing import setUp, tearDown from swiftclient.exceptions import ClientException from .. api.swift import * class SwiftTests(unittest.TestCase): def setUp(self): self.config = setUp() def tearDown(self): tearDown() @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.settings') def test_connection(self, settings_mock, mock_connection): auth_token = "HPAUTH_9787665544434434" fake_connection = mock.Mock() mock_connection.return_value = fake_connection result = connection(auth_token) self.assertEqual(result, fake_connection) @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.settings') def test_connection_client_exception(self, settings_mock, mock_connection): auth_token = "HPAUTH_9787665544434434" error_message = 'SWIFT CLIENT ERROR' mock_connection.side_effect = ClientException(error_message) result = connection(auth_token) self.assertRaises(ClientException, mock_connection, error_message) @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.settings') def test_connection_exception(self, settings_mock, mock_connection): auth_token = "HPAUTH_9787665544434434" error_message = 'CONNECTION ERROR' mock_connection.side_effect = Exception(error_message) result = connection(auth_token) self.assertRaises(Exception, error_message) def test_verify_container_missing_when_false(self): account = ([{}], [{'name': 'name1'}]) connection = mock.Mock() connection.get_account.return_value = account test_container_name = 'name1' result = verify_container_missing(connection, test_container_name) self.assertFalse(result) def test_verify_container_missing_when_true(self): account = ([{}], [{'name': 'name1'}]) connection = mock.Mock() connection.get_account.return_value = account test_container_name = 'dummy' result = verify_container_missing(connection, test_container_name) self.assertTrue(result) def test_verify_container_missing_client_exception(self): name = 'foo' connection = mock.Mock() error_message = 'SWIFT CLIENT ERROR' connection.get_account.side_effect = ClientException(error_message) result = verify_container_missing(connection, name) self.assertRaises( ClientException, connection.get_account, error_message) self.assertFalse(result) def test_verify_container_missing_exception(self): name = 'foo' connection = mock.Mock() error_message = 'PUT CONTAINER ERROR' connection.get_account.side_effect = Exception(error_message) result = verify_container_missing(connection, name) self.assertRaises(Exception, error_message) self.assertFalse(result) @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.verify_container_missing') def test_ensure_addins_container_exists( self, mock_check_container, mock_swift_connection): success = {'status': 200} def mock_put_success(container_name, response_dict): response_dict['status'] = success['status'] container_name = "dummy_container_name" with mock.patch.object( mock_swift_connection, 'put_container', side_effect=mock_put_success) as mocked_put: ensure_addins_container_exists( mock_swift_connection, container_name) mocked_put.assert_called_with(container_name, response_dict=success) @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.verify_container_missing') def test_ensure_addins_container_exists_client_exception( self, mock_check_container, mock_swift_connection): container_name = "dummy_container_name" error_message = 'SWIFT CLIENT ERROR' with mock.patch.object( mock_swift_connection, 'put_container', side_effect=ClientException(error_message)) as mocked_put: ensure_addins_container_exists( mock_swift_connection, container_name) self.assertRaises(ClientException, mocked_put, error_message) @mock.patch('swiftclient.client.Connection') @mock.patch('localapi.api.swift.verify_container_missing') def test_ensure_addins_container_exists_exception( self, mock_check_container, mock_swift_connection): container_name = "dummy_container_name" error_message = 'PUT CONTAINER ERROR' with mock.patch.object( mock_swift_connection, 'put_container', side_effect=Exception(error_message)) as mocked_put: ensure_addins_container_exists( mock_swift_connection, container_name) self.assertRaises(Exception, error_message) @patch('mimetypes.guess_type') @patch('swiftclient.client.Connection') @patch('sys.getsizeof') def test_put_object( self, mock_getsizeof, mock_swift_connection, mock_guess_type): success = {'status': 200} def mock_put_success( container_name, file_name, contents, content_length, content_type, response_dict): response_dict['status'] = success['status'] file_name = 'filename' container_name = "dummy_container_name" contents = mock.MagicMock() mock_getsizeof.return_value = 999 mock_guess_type.return_value = ['filetype'] with mock.patch.object( mock_swift_connection, 'put_object', side_effect=mock_put_success) as mocked_put: put_object( mock_swift_connection, container_name, file_name, contents) mocked_put.assert_called_with( container_name, file_name, contents, content_length=999, content_type='filetype', response_dict=success) @patch('mimetypes.guess_type') @patch('swiftclient.client.Connection') @patch('sys.getsizeof') def test_put_object_returns_client_exception( self, mock_getsizeof, mock_swift_connection, mock_guess_type): file_name = 'filename' container_name = "dummy_container_name" contents = mock.MagicMock() mock_getsizeof.return_value = 999 mock_guess_type.return_value = ['application/json'] error_message = "ERROR" mock_swift_connection.put_object.side_effect = ClientException( error_message) put_object( mock_swift_connection, container_name, file_name, contents) self.assertRaises( ClientException, mock_swift_connection.put_object, error_message) @patch('mimetypes.guess_type') @patch('swiftclient.client.Connection') @patch('sys.getsizeof') def test_put_object_returns_exception( self, mock_getsizeof, mock_swift_connection, mock_guess_type): file_name = 'filename' container_name = "dummy_container_name" contents = mock.MagicMock() # mock_getsizeof.return_value = 999 # mock_guess_type.return_value = ['filetype'] error_message = "ERROR" mock_swift_connection.put_object.side_effect = Exception(error_message) put_object( mock_swift_connection, container_name, file_name, contents) self.assertRaises(Exception, error_message) @mock.patch('localapi.api.keystone.get_auth_token') @mock.patch('localapi.api.swift.connection') @mock.patch('localapi.api.swift.extract_manifest_from_package') @mock.patch('localapi.api.swift.ensure_addins_container_exists') @mock.patch('localapi.api.swift.put_object') @mock.patch('localapi.api.swift.settings') def test_write_package( self, mock_settings, mock_put_object, mock_ensure_container, mock_extract_manifest, mock_connection, mock_get_auth_token): name = "thepackage/manifest.json" filedata = mock.Mock() mock_put_object.return_value = {'status': 200} result = write_package(name, filedata) self.assertEqual(result, 200) @mock.patch('localapi.api.keystone.get_auth_token') @mock.patch('localapi.api.swift.connection') @mock.patch('localapi.api.swift.extract_manifest_from_package') @mock.patch('localapi.api.swift.put_object') @mock.patch('localapi.api.swift.settings') def test_write_package_client_exception( self, mock_settings, mock_put_object, mock_extract_manifest, mock_connection, mock_get_auth_token): name = "thepackage/manifest.json" filedata = mock.Mock() error_message = 'SAVE TO SWIFT ERROR' mock_connection.side_effect = ClientException(error_message) result = write_package(name, filedata) self.assertRaises(ClientException, mock_connection, error_message) @mock.patch('localapi.api.swift.settings') def test_write_package_exception(self, mock_settings): name = "thepackage/manifest.json" filedata = mock.Mock() error_message = 'SAVE TO SWIFT ERROR' # doesn't matter what throw the exceptyion, just pick the first # thing in mock_settings.side_effect = Exception(error_message) result = write_package(name, filedata) self.assertRaises(Exception, error_message)
py
1a3a0eb1d97d22e75238b9dc1a01a6192a6a38f4
#!/usr/bin/env python3 import argparse import ctypes import os import readline import socket import subprocess import sys import threading readline.get_history_length() # throw this away because we import readline for prompt stuff parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-l', '--listen', type=str, dest='listen', default='0.0.0.0', help='address to bind and sniff packets') parser.description = """\ This is a Python program to scan a network for live hosts by spraying UDP traffic and inspecting responses. """ args = parser.parse_args() def main(): # make this work on windows too if os.name == 'nt': socket_protocol = socket.IPPROTO_IP else: socket_protocol = socket.IPPROTO_ICMP # set up raw socket and bind sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol) sniffer.bind((args.listen,0)) # we want to include headers sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) # if windows explicitly set promiscuous mode if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) # read a single packet data = sniffer.recv(65536) print(data) hexdump(data) # if windows explicitly set promiscuous mode if os.name == 'nt': sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) def hexdump(src, length=16, sep='.'): """ https://gist.github.com/1mm0rt41PC/c340564823f283fe530b """ result = [] for i in range(0, len(src), length): subSrc = src[i:i + length] hexa = '' for h in range(0, len(subSrc)): if h == length / 2: hexa += ' ' h = subSrc[h] if not isinstance(h, int): h = ord(h) h = hex(h).replace('0x', '') if len(h) == 1: h = '0' + h hexa += h + ' ' hexa = hexa.strip(' ') text = '' for c in subSrc: if not isinstance(c, int): c = ord(c) if 0x20 <= c < 0x7F: text += chr(c) else: text += sep result.append(('%08X: %-' + str(length * (2 + 1) + 1) + 's |%s|') % (i, hexa, text)) print('\n'.join(result)) if __name__ == '__main__': main()
py
1a3a11a0c94761b0d38b9a7aaef4222c9545d4e6
""" Single-subject data (two sessions) in native space ================================================== The example shows the analysis of an SPM dataset studying face perception. The analysis is performed in native space. Realignment parameters are provided with the input images, but those have not been resampled to a common space. The experimental paradigm is simple, with two conditions; viewing a face image or a scrambled face image, supposedly with the same low-level statistical properties, to find face-specific responses. For details on the data, please see: Henson, R.N., Goshen-Gottstein, Y., Ganel, T., Otten, L.J., Quayle, A., Rugg, M.D. Electrophysiological and haemodynamic correlates of face perception, recognition and priming. Cereb Cortex. 2003 Jul;13(7):793-805. http://www.dx.doi.org/10.1093/cercor/13.7.793 This example takes a lot of time because the input are lists of 3D images sampled in different positions (encoded by different affine functions). """ print(__doc__) ######################################################################### # Fetch the SPM multimodal_faces data. from nilearn.datasets import fetch_spm_multimodal_fmri subject_data = fetch_spm_multimodal_fmri() ######################################################################### # Specfiy timing and design matrix parameters. tr = 2. # repetition time, in seconds slice_time_ref = 0. # Sample at the beginning of each acquisition. drift_model = 'Cosine' # We use a discrete cosine transform to model signal drifts. high_pass = .01 # The cutoff for the drift model is 0.01 Hz. hrf_model = 'spm + derivative' # The hemodynamic response function is the SPM canonical one. ######################################################################### # Resample the images. # # This is achieved by the concat_imgs function of Nilearn. from nilearn.image import concat_imgs, resample_img, mean_img fmri_img = [concat_imgs(subject_data.func1, auto_resample=True), concat_imgs(subject_data.func2, auto_resample=True)] affine, shape = fmri_img[0].affine, fmri_img[0].shape print('Resampling the second image (this takes time)...') fmri_img[1] = resample_img(fmri_img[1], affine, shape[:3]) ######################################################################### # Let's create mean image for display purposes. mean_image = mean_img(fmri_img) ######################################################################### # Make the design matrices. import numpy as np import pandas as pd from nilearn.glm.first_level import make_first_level_design_matrix design_matrices = [] ######################################################################### # Loop over the two sessions. for idx, img in enumerate(fmri_img, start=1): # Build experimental paradigm n_scans = img.shape[-1] events = pd.read_table(subject_data['events{}'.format(idx)]) # Define the sampling times for the design matrix frame_times = np.arange(n_scans) * tr # Build design matrix with the reviously defined parameters design_matrix = make_first_level_design_matrix( frame_times, events, hrf_model=hrf_model, drift_model=drift_model, high_pass=high_pass, ) # put the design matrices in a list design_matrices.append(design_matrix) ######################################################################### # We can specify basic contrasts (to get beta maps). # We start by specifying canonical contrast that isolate design matrix columns. contrast_matrix = np.eye(design_matrix.shape[1]) basic_contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) ######################################################################### # We actually want more interesting contrasts. The simplest contrast # just makes the difference between the two main conditions. We # define the two opposite versions to run one-tailed t-tests. We also # define the effects of interest contrast, a 2-dimensional contrasts # spanning the two conditions. contrasts = { 'faces-scrambled': basic_contrasts['faces'] - basic_contrasts['scrambled'], 'scrambled-faces': -basic_contrasts['faces'] + basic_contrasts['scrambled'], 'effects_of_interest': np.vstack((basic_contrasts['faces'], basic_contrasts['scrambled'])) } ######################################################################### # Fit the GLM for the 2 sessions by specifying a FirstLevelModel and then # fitting it. from nilearn.glm.first_level import FirstLevelModel print('Fitting a GLM') fmri_glm = FirstLevelModel() fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices) ######################################################################### # Now we can compute contrast-related statistical maps (in z-scale), and plot # them. print('Computing contrasts') from nilearn import plotting # Iterate on contrasts for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) # compute the contrasts z_map = fmri_glm.compute_contrast( contrast_val, output_type='z_score') # plot the contrasts as soon as they're generated # the display is overlaid on the mean fMRI image # a threshold of 3.0 is used, more sophisticated choices are possible plotting.plot_stat_map( z_map, bg_img=mean_image, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True, title=contrast_id) plotting.show() ######################################################################### # Based on the resulting maps we observe that the analysis results in # wide activity for the 'effects of interest' contrast, showing the # implications of large portions of the visual cortex in the # conditions. By contrast, the differential effect between "faces" and # "scrambled" involves sparser, more anterior and lateral regions. It # also displays some responses in the frontal lobe.
py
1a3a11da7273ac252e1354db4aa2a318145a1f93
from thingsboard_gateway.things.meter import MeterDataDefine from thingsboard_gateway.things.meter.nan_suo import NsDataDefineName from thingsboard_gateway.things.meter.gb_meter_protocol import CJT188Protocol class NasReadDataRequest(CJT188Protocol): """ 读表数据 """ def __init__(self, address, device_type, seq): super().__init__() self.address = bytes.fromhex(address) self.device_type = device_type self.control_code = 0x01 self.data_defines.append(MeterDataDefine(NsDataDefineName.Default, 2, data=bytes([0x1F, 0x90]))) self.data_defines.append(MeterDataDefine(NsDataDefineName.Seq, 1, data=bytes([seq]))) class NsoReadAddressRequest(CJT188Protocol): """ 读表地址 """ def __init__(self, device_type, seq): super().__init__() self.address = bytes([0xAA] * 7) self.device_type = device_type self.control_code = 0x03 self.data_defines.append(MeterDataDefine(NsDataDefineName.Default, 2, data=bytes([0x0A, 0x81]))) self.data_defines.append(MeterDataDefine(NsDataDefineName.Seq, 1, data=bytes([seq]))) class NsReadUser1Request(CJT188Protocol): """ 读用户参数1- 读终端用户号、表号 """ def __init__(self,address, device_type, seq): super().__init__() self.address = bytes.fromhex(address) self.device_type = device_type self.control_code = 0x03 self.data_defines.append(MeterDataDefine(NsDataDefineName.Default, 2, data=bytes([0xAA, 0x81]))) self.data_defines.append(MeterDataDefine(NsDataDefineName.Seq, 1, data=bytes([seq]))) class NsReadUser2Request(CJT188Protocol): """ 读用户参数2- 读终端超容值、透支量、报警量 """ def __init__(self,address, device_type, seq): super().__init__() self.address = bytes.fromhex(address) self.device_type = device_type self.control_code = 0x03 self.data_defines.append(MeterDataDefine(NsDataDefineName.Default, 2, data=bytes([0xB0, 0x81]))) self.data_defines.append(MeterDataDefine(NsDataDefineName.Seq, 1, data=bytes([seq])))
py
1a3a11fee002212f4d53dc08b3d8f4dc16113f56
""" Exercício Python 113: Reescreva a função leiaInt() que fizemos no desafio 104, incluindo agora a possibilidade da digitação de um número de tipo inválido. Aproveite e crie também uma função leiaFloat() com a mesma funcionalidade. """ def leiaInt(mensagem): value = 0 while True: try: value = int(input(mensagem)) except KeyboardInterrupt: print('O usuario decidiu nao informar o numero') break except: print('Ocorreu um erro, certifique-se de que digitou um numero inteiro...') else: break return value def leiaFloat(mensagem): value = 0 while True: try: value = float(input(mensagem)) except KeyboardInterrupt: print('\n\nO usuario decidiu nao informar o numero') break except: print('Ocorreu um erro, certifique-se de que digitou um numero inteiro...') else: break return value # MainProgram try: number1 = leiaInt('insira um numero inteiro: ') number2 = leiaFloat('insira um numero real:') except: print('Erros localizados, da proxima insira os dados corretamente') finally: print(f'o numero int {number1} e o real {number2}')
py
1a3a135dc6e70838b0669daeed96145c387d8898
#!/usr/bin/env python2.5 # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper classes which help converting a url to a list of SB expressions.""" import array import logging import re import string import urllib import urlparse import util class UrlParseError(Exception): pass def GenerateSafeChars(): """ Return a string containing all 'safe' characters that shouldn't be escaped for url encoding. This includes all printable characters except '#%' and whitespace characters. """ unfiltered_chars = string.digits + string.ascii_letters + string.punctuation filtered_list = [c for c in unfiltered_chars if c not in '%#'] return array.array('c', filtered_list).tostring() class ExpressionGenerator(object): """Class does the conversion url -> list of SafeBrowsing expressions. This class converts a given url into the list of all SafeBrowsing host-suffix, path-prefix expressions for that url. These are expressions that are on the SafeBrowsing lists. """ HEX = re.compile(r'^0x([a-fA-F0-9]+)$') OCT = re.compile(r'^0([0-7]+)$') DEC = re.compile(r'^(\d+)$') IP_WITH_TRAILING_SPACE = re.compile(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) ') POSSIBLE_IP = re.compile(r'^(?i)((?:0x[0-9a-f]+|[0-9\\.])+)$') FIND_BAD_OCTAL_REGEXP = re.compile(r'(^|\.)0\d*[89]') # This regular expression parses the host and port from a hostname. Note: any # user and password are removed from the hostname. HOST_PORT_REGEXP = re.compile(r'^(?:.*@)?(?P<host>[^:]*)(:(?P<port>\d+))?$') SAFE_CHARS = GenerateSafeChars() # Dict that maps supported schemes to their default port number. DEFAULT_PORTS = {'http': '80', 'https': '443', 'ftp': '21'} def __init__(self, url): parse_exception = UrlParseError('failed to parse URL "%s"' % (url,)) canonical_url = ExpressionGenerator.CanonicalizeUrl(url) if not canonical_url: raise parse_exception # Each element is a list of host components used to build expressions. self._host_lists = [] # A list of paths used to build expressions. self._path_exprs = [] url_split = urlparse.urlsplit(canonical_url) canonical_host, canonical_path = url_split[1], url_split[2] self._MakeHostLists(canonical_host, parse_exception) if url_split[3]: # Include canonicalized path with query arguments self._path_exprs.append(canonical_path + '?' + url_split[3]) self._path_exprs.append(canonical_path) # Get the first three directory path components and create the 4 path # expressions starting at the root (/) and successively appending directory # path components, including the trailing slash. E.g.: # /a/b/c/d.html -> [/, /a/, /a/b/, /a/b/c/] path_parts = canonical_path.rstrip('/').lstrip('/').split('/')[:3] if canonical_path.count('/') < 4: # If the last component in not a directory we remove it. path_parts.pop() while path_parts: self._path_exprs.append('/' + '/'.join(path_parts) + '/') path_parts.pop() if canonical_path != '/': self._path_exprs.append('/') @staticmethod def CanonicalizeUrl(url): """Canonicalize the given URL for the SafeBrowsing protocol. Args: url: URL to canonicalize. Returns: A canonical URL or None if the URL could not be canonicalized. """ # Start by stripping off the fragment identifier. tmp_pos = url.find('#') if tmp_pos >= 0: url = url[0:tmp_pos] # Stripping off leading and trailing white spaces. url = url.lstrip().rstrip() # Remove any embedded tabs and CR/LF characters which aren't escaped. url = url.replace('\t', '').replace('\r', '').replace('\n', '') # Un-escape and re-escpae the URL just in case there are some encoded # characters in the url scheme for example. url = ExpressionGenerator._Escape(url) url_split = urlparse.urlsplit(url) if not url_split[0]: # URL had no scheme. In this case we assume it is http://. url = 'http://' + url url_split = urlparse.urlsplit(url) url_scheme = url_split[0].lower() if url_scheme not in ExpressionGenerator.DEFAULT_PORTS: return None # Unsupported scheme. # Note: applying HOST_PORT_REGEXP also removes any user and password. m = ExpressionGenerator.HOST_PORT_REGEXP.match(url_split[1]) if not m: return None host, port = m.group('host'), m.group('port') canonical_host = ExpressionGenerator.CanonicalizeHost(host) if not canonical_host: return None # Now that the host is canonicalized we add the port back if it's not the # default port for that url scheme. if port and port != ExpressionGenerator.DEFAULT_PORTS[url_scheme]: canonical_host += ':' + port canonical_path = ExpressionGenerator.CanonicalizePath(url_split[2]) # If the URL ends with ? we want to keep the ?. canonical_url = url_split[0] + '://' + canonical_host + canonical_path if url_split[3] != '' or url.endswith('?'): canonical_url += '?' + url_split[3] return canonical_url @staticmethod def CanonicalizePath(path): """Canonicalize the given path.""" if not path: return '/' # There are some cases where the path will not start with '/'. Example: # "ftp://host.com?q" -- the hostname is 'host.com' and the path '%3Fq'. # Browsers typically do prepend a leading slash to the path in this case, # we'll do the same. if path[0] != '/': path = '/' + path path = ExpressionGenerator._Escape(path) path_components = [] for path_component in path.split('/'): # If the path component is '..' we skip it and remove the preceding path # component if there are any. if path_component == '..': if len(path_components) > 0: path_components.pop() # We skip empty path components to remove successive slashes (i.e., # // -> /). Note: this means that the leading and trailing slash will # also be removed and need to be re-added afterwards. # # If the path component is '.' we also skip it (i.e., /./ -> /). elif path_component != '.' and path_component != '': path_components.append(path_component) # Put the path components back together and re-add the leading slash which # got stipped by removing empty path components. canonical_path = '/' + '/'.join(path_components) # If necessary we also re-add the trailing slash. if path.endswith('/') and not canonical_path.endswith('/'): canonical_path += '/' return canonical_path @staticmethod def CanonicalizeHost(host): """Canonicalize the given host. Returns None in case of an error.""" if not host: return None host = ExpressionGenerator._Escape(host.lower()) ip = ExpressionGenerator.CanonicalizeIp(host) if ip: # Host is an IP address. host = ip else: # Host is a normal hostname. # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) < 2: return None host = '.'.join(host_split) return host @staticmethod def CanonicalizeIp(host): """ Return a canonicalized IP if host can represent an IP and None otherwise. """ if len(host) <= 15: # The Windows resolver allows a 4-part dotted decimal IP address to have a # space followed by any old rubbish, so long as the total length of the # string doesn't get above 15 characters. So, "10.192.95.89 xy" is # resolved to 10.192.95.89. # If the string length is greater than 15 characters, # e.g. "10.192.95.89 xy.wildcard.example.com", it will be resolved through # DNS. m = ExpressionGenerator.IP_WITH_TRAILING_SPACE.match(host) if m: host = m.group(1) if not ExpressionGenerator.POSSIBLE_IP.match(host): return None # Basically we should parse octal if we can, but if there are illegal octal # numbers, i.e. 08 or 09, then we should just look at decimal and hex. allow_octal = not ExpressionGenerator.FIND_BAD_OCTAL_REGEXP.search(host) # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) > 4: return None ip = [] for i in xrange(len(host_split)): m = ExpressionGenerator.HEX.match(host_split[i]) if m: base = 16 else: m = ExpressionGenerator.OCT.match(host_split[i]) if m and allow_octal: base = 8 else: m = ExpressionGenerator.DEC.match(host_split[i]) if m: base = 10 else: return None n = long(m.group(1), base) if n > 255: if i < len(host_split) - 1: n &= 0xff ip.append(n) else: bytes = [] shift = 0 while n > 0 and len(bytes) < 4: bytes.append(n & 0xff) n >>= 8 if len(ip) + len(bytes) > 4: return None bytes.reverse() ip.extend(bytes) else: ip.append(n) while len(ip) < 4: ip.append(0) return '%u.%u.%u.%u' % tuple(ip) def Expressions(self): """ A generator of the possible expressions. """ for host_parts in self._host_lists: host = '.'.join(host_parts) for p in self._path_exprs: yield Expression(host, p) @staticmethod def _Escape(unescaped_str): """Fully unescape the given string, then re-escape once. Args: unescaped_str: string that should be escaped. Returns: Escaped string according to the SafeBrowsing protocol. """ unquoted = urllib.unquote(unescaped_str) while unquoted != unescaped_str: unescaped_str = unquoted unquoted = urllib.unquote(unquoted) return urllib.quote(unquoted, ExpressionGenerator.SAFE_CHARS) def _MakeHostLists(self, host, parse_exception): """ Canonicalize host and build self._host_lists. """ ip = ExpressionGenerator.CanonicalizeIp(host) if ip is not None: # Is an IP. self._host_lists.append([ip]) return # Is a hostname. # Skip trailing, leading and consecutive dots. host_split = [part for part in host.split('.') if part] if len(host_split) < 2: raise parse_exception start = len(host_split) - 5 stop = len(host_split) - 1 if start <= 0: start = 1 self._host_lists.append(host_split) for i in xrange(start, stop): self._host_lists.append(host_split[i:]) class Expression(object): """Class which represents a host-suffix, path-prefix expression.""" def __init__(self, host, path): self._host = host self._path = path self._value = host + path self._hash_value = util.GetHash256(self._value) def __str__(self): return self.Value() def __repr__(self): """ Not really a good repr. This is for debugging. """ return self.Value() def Value(self): return self._value def HashValue(self): return self._hash_value
py
1a3a13cebd97c6893624281bf55d73954aff4bc9
#from redthread import ReductionT from reduction import Reduction from polynomial import Polynomial #import xlsxwriter import os from collections import defaultdict import sys, getopt def recoverfile(saved, readed): if not os.path.exists(saved): return True, [] f = open(saved,'r') if(not os.stat(saved).st_size==0): pols = [] pols_done = [] for line in readed: pol = Polynomial(line) pols.append(pol) for line in f: line = line.replace("[","") line = line.replace("]","") spl = line.split(',') p = "" for i in xrange(0,len(spl)-1): p = p + " + x^" + str(spl[i].replace(" ","")) p = p + " + 1" p = p.replace("+","",1) #print p pol_ = Polynomial(p) pols_done.append(pol_) pols_set = set(pols) pols_set_done = set(pols_done) result = pols_set - pols_set_done return False, list(result) else: return True, [] def main(argv): inputfile = '' outputfile = '' debug = False try: opts, args = getopt.getopt(argv,"hi:o:d",["ifile=","ofile="]) except getopt.GetoptError: print 'single.py -i <inputfile> -o <outputfile>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'single.py -i <inputfile> -o <outputfile> -d for debug' sys.exit() elif opt in ("-d", "--debug"): debug = True elif opt in ("-i", "--ifile"): inputfile = arg elif opt in ("-o", "--ofile"): outputfile = arg try: fi = open(inputfile,"r") fl = open(outputfile,"a") except IOError: print 'main.py -i <inputfile> -o <outputfile>' sys.exit(2) l = [] pols = [] files = [inputfile] for fileName in files: save = outputfile f = open(fileName,'r') #read, pols = recoverfile(save, f) if True: for line in f: try: pol = Polynomial(line) pols.append(pol) except Exception as e: print line sys.exit(2) result = defaultdict(list) print len(pols) for pol in pols: if len(pol.coefs()) > 1: red = Reduction(debug) count = red.reduction(pol.coefs()) result = str(pol.coefs()) + ":" + str(count) print result fl.write(result + "\n") if __name__ == '__main__': main(sys.argv[1:])
py
1a3a13e796d25738a6d04b9f1877eb4dee412798
from .model import Model from radar.models.geofence import Geofence from radar.models.region import Region from radar.models.place import Place class RadarContext(Model): """Location context Parameters: live (bool) geofences (`list` of :class:`~radar.models.geofence.Geofence`) place (`list` of :class:`~radar.models.place.Place`, optional) country (:class:`~radar.models.region.Region`, optional) state (:class:`~radar.models.region.Region`, optional) dma (:class:`~radar.models.region.Region`, optional) postalCode (:class:`~radar.models.region.Region`, optional) fraud (FraudObject, optional) """ OBJECT_NAME = "Context" _DISPLAY_ATTRIBUTES = ( "live", "geofences", "place", "country", "state", "dma", "postalCode", ) def __init__(self, radar, data={}): """Initialize a Radar Model instance Args: radar (:class:`~radar.RadarClient`): RadarClient for instance CRUD actions raw_json (dict): raw data to initialize the model with """ self._radar = radar self.raw_json = data for attribute, value in data.items(): if attribute == "geofences": geofences = [Geofence(radar, geofence) for geofence in data[attribute]] setattr(self, attribute, geofences) elif attribute == "place": place = Place(radar, data[attribute]) setattr(self, attribute, place) elif attribute in ["country", "state", "dma", "postalCode"]: region = Region(radar, data[attribute]) setattr(self, attribute, region) else: setattr(self, attribute, value)
py
1a3a13ee862309618505a271f6a9beecc20cd63d
"""Random variable generators. integers -------- uniform within range sequences --------- pick random element pick random sample generate random permutation distributions on the real line: ------------------------------ uniform triangular normal (Gaussian) lognormal negative exponential gamma beta pareto Weibull distributions on the circle (angles 0 to 2pi) --------------------------------------------- circular uniform von Mises General notes on the underlying Mersenne Twister core generator: * The period is 2**19937-1. * It is one of the most extensively tested generators in existence. * Without a direct way to compute N steps forward, the semantics of jumpahead(n) are weakened to simply jump to another distant state and rely on the large period to avoid overlapping sequences. * The random() method is implemented in C, executes in a single Python step, and is, therefore, threadsafe. """ from __future__ import division from warnings import warn as _warn from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin from os import urandom as _urandom from binascii import hexlify as _hexlify import hashlib as _hashlib __all__ = ["Random","seed","random","uniform","randint","choice","sample", "randrange","shuffle","normalvariate","lognormvariate", "expovariate","vonmisesvariate","gammavariate","triangular", "gauss","betavariate","paretovariate","weibullvariate", "getstate","setstate","jumpahead", "WichmannHill", "getrandbits", "SystemRandom"] NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) TWOPI = 2.0*_pi LOG4 = _log(4.0) SG_MAGICCONST = 1.0 + _log(4.5) BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF # Translated by Guido van Rossum from C source provided by # Adrian Baddeley. Adapted by Raymond Hettinger for use with # the Mersenne Twister and os.urandom() core generators. import _random class Random(_random.Random): """Random number generator base class used by bound module functions. Used to instantiate instances of Random to get generators that don't share state. Especially useful for multi-threaded programs, creating a different instance of Random for each thread, and using the jumpahead() method to ensure that the generated sequences seen by each thread don't overlap. Class Random can also be subclassed if you want to use a different basic generator of your own devising: in that case, override the following methods: random(), seed(), getstate(), setstate() and jumpahead(). Optionally, implement a getrandbits() method so that randrange() can cover arbitrarily large ranges. """ VERSION = 3 # used by getstate/setstate def __init__(self, x=None): """Initialize an instance. Optional argument x controls seeding, as for Random.seed(). """ self.seed(x) self.gauss_next = None def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds super(Random, self).seed(a) self.gauss_next = None def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, super(Random, self).getstate(), self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 3: version, internalstate, self.gauss_next = state super(Random, self).setstate(internalstate) elif version == 2: version, internalstate, self.gauss_next = state # In version 2, the state was saved as signed ints, which causes # inconsistencies between 32/64-bit systems. The state is # really unsigned 32-bit ints, so we convert negative ints from # version 2 to positive longs for version 3. try: internalstate = tuple( long(x) % (2**32) for x in internalstate ) except ValueError, e: raise TypeError, e super(Random, self).setstate(internalstate) else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) def jumpahead(self, n): """Change the internal state to one that is likely far away from the current state. This method will not be in Py3.x, so it is better to simply reseed. """ # The super.jumpahead() method uses shuffling to change state, # so it needs a large and "interesting" n to work with. Here, # we use hashing to create a large n for the shuffle. s = repr(n) + repr(self.getstate()) n = int(_hashlib.new('sha512', s).hexdigest(), 16) super(Random, self).jumpahead(n) ## ---- Methods below this point do not need to be overridden when ## ---- subclassing for the purpose of using a different core generator. ## -------------------- pickle support ------------------- def __getstate__(self): # for pickle return self.getstate() def __setstate__(self, state): # for pickle self.setstate(state) def __reduce__(self): return self.__class__, (), self.getstate() ## -------------------- integer methods ------------------- def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L<<BPF): """Choose a random item from range(start, stop[, step]). This fixes the problem with randint() which includes the endpoint; in Python this is usually not what you want. """ # This code is a bit messy to make it fast for the # common case while still doing adequate error checking. istart = _int(start) if istart != start: raise ValueError, "non-integer arg 1 for randrange()" if stop is None: if istart > 0: if istart >= _maxwidth: return self._randbelow(istart) return _int(self.random() * istart) raise ValueError, "empty range for randrange()" # stop argument supplied. istop = _int(stop) if istop != stop: raise ValueError, "non-integer stop for randrange()" width = istop - istart if step == 1 and width > 0: # Note that # int(istart + self.random()*width) # instead would be incorrect. For example, consider istart # = -2 and istop = 0. Then the guts would be in # -2.0 to 0.0 exclusive on both ends (ignoring that random() # might return 0.0), and because int() truncates toward 0, the # final result would be -1 or 0 (instead of -2 or -1). # istart + int(self.random()*width) # would also be incorrect, for a subtler reason: the RHS # can return a long, and then randrange() would also return # a long, but we're supposed to return an int (for backward # compatibility). if width >= _maxwidth: return _int(istart + self._randbelow(width)) return _int(istart + _int(self.random()*width)) if step == 1: raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width) # Non-unit step argument supplied. istep = _int(step) if istep != step: raise ValueError, "non-integer step for randrange()" if istep > 0: n = (width + istep - 1) // istep elif istep < 0: n = (width + istep + 1) // istep else: raise ValueError, "zero step for randrange()" if n <= 0: raise ValueError, "empty range for randrange()" if n >= _maxwidth: return istart + istep*self._randbelow(n) return istart + istep*_int(self.random() * n) def randint(self, a, b): """Return random integer in range [a, b], including both end points. """ return self.randrange(a, b+1) def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF, _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType): """Return a random int in the range [0,n) Handles the case where n has more bits than returned by a single call to the underlying generator. """ try: getrandbits = self.getrandbits except AttributeError: pass else: # Only call self.getrandbits if the original random() builtin method # has not been overridden or if a new getrandbits() was supplied. # This assures that the two methods correspond. if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method: k = _int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2) r = getrandbits(k) while r >= n: r = getrandbits(k) return r if n >= _maxwidth: _warn("Underlying random() generator does not supply \n" "enough bits to choose from a population range this large") return _int(self.random() * n) ## -------------------- sequence methods ------------------- def choice(self, seq): """Choose a random element from a non-empty sequence.""" return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty def shuffle(self, x, random=None): """x, random=random.random -> shuffle list x in place; return None. Optional arg random is a 0-argument function returning a random float in [0.0, 1.0); by default, the standard random.random. """ if random is None: random = self.random _int = int for i in reversed(xrange(1, len(x))): # pick an element in x[:i+1] with which to exchange x[i] j = _int(random() * (i+1)) x[i], x[j] = x[j], x[i] def sample(self, population, k): """Chooses k unique random elements from a population sequence. Returns a new list containing elements from the population while leaving the original population unchanged. The resulting list is in selection order so that all sub-slices will also be valid random samples. This allows raffle winners (the sample) to be partitioned into grand prize and second place winners (the subslices). Members of the population need not be hashable or unique. If the population contains repeats, then each occurrence is a possible selection in the sample. To choose a sample in a range of integers, use xrange as an argument. This is especially fast and space efficient for sampling from a large population: sample(xrange(10000000), 60) """ # Sampling without replacement entails tracking either potential # selections (the pool) in a list or previous selections in a set. # When the number of selections is small compared to the # population, then tracking selections is efficient, requiring # only a small set and an occasional reselection. For # a larger number of selections, the pool tracking method is # preferred since the list takes less space than the # set and it doesn't suffer from frequent reselections. n = len(population) if not 0 <= k <= n: raise ValueError("sample larger than population") random = self.random _int = int result = [None] * k setsize = 21 # size of a small set minus size of an empty list if k > 5: setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets if n <= setsize or hasattr(population, "keys"): # An n-length list is smaller than a k-length set, or this is a # mapping type so the other algorithm wouldn't work. pool = list(population) for i in xrange(k): # invariant: non-selected at [0,n-i) j = _int(random() * (n-i)) result[i] = pool[j] pool[j] = pool[n-i-1] # move non-selected item into vacancy else: try: selected = set() selected_add = selected.add for i in xrange(k): j = _int(random() * n) while j in selected: j = _int(random() * n) selected_add(j) result[i] = population[j] except (TypeError, KeyError): # handle (at least) sets if isinstance(population, list): raise return self.sample(tuple(population), k) return result ## -------------------- real-valued distributions ------------------- ## -------------------- uniform distribution ------------------- def uniform(self, a, b): "Get a random number in the range [a, b) or [a, b] depending on rounding." return a + (b-a) * self.random() ## -------------------- triangular -------------------- def triangular(self, low=0.0, high=1.0, mode=None): """Triangular distribution. Continuous distribution bounded by given lower and upper limits, and having a given mode value in-between. http://en.wikipedia.org/wiki/Triangular_distribution """ u = self.random() c = 0.5 if mode is None else (mode - low) / (high - low) if u > c: u = 1.0 - u c = 1.0 - c low, high = high, low return low + (high - low) * (u * c) ** 0.5 ## -------------------- normal distribution -------------------- def normalvariate(self, mu, sigma): """Normal distribution. mu is the mean, and sigma is the standard deviation. """ # mu = mean, sigma = standard deviation # Uses Kinderman and Monahan method. Reference: Kinderman, # A.J. and Monahan, J.F., "Computer generation of random # variables using the ratio of uniform deviates", ACM Trans # Math Software, 3, (1977), pp257-260. random = self.random while 1: u1 = random() u2 = 1.0 - random() z = NV_MAGICCONST*(u1-0.5)/u2 zz = z*z/4.0 if zz <= -_log(u2): break return mu + z*sigma ## -------------------- lognormal distribution -------------------- def lognormvariate(self, mu, sigma): """Log normal distribution. If you take the natural logarithm of this distribution, you'll get a normal distribution with mean mu and standard deviation sigma. mu can have any value, and sigma must be greater than zero. """ return _exp(self.normalvariate(mu, sigma)) ## -------------------- exponential distribution -------------------- def expovariate(self, lambd): """Exponential distribution. lambd is 1.0 divided by the desired mean. It should be nonzero. (The parameter would be called "lambda", but that is a reserved word in Python.) Returned values range from 0 to positive infinity if lambd is positive, and from negative infinity to 0 if lambd is negative. """ # lambd: rate lambd = 1/mean # ('lambda' is a Python reserved word) # we use 1-random() instead of random() to preclude the # possibility of taking the log of zero. return -_log(1.0 - self.random())/lambd ## -------------------- von Mises distribution -------------------- def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta ## -------------------- gamma distribution -------------------- def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < .9999999: continue u2 = 1.0 - random() v = _log(u1/(1.0-u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb+ccc*v-x if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p ** (1.0/alpha) else: x = -_log((b-p)/alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta ## -------------------- Gauss (faster alternative) -------------------- def gauss(self, mu, sigma): """Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function. Not thread-safe without a lock around calls. """ # When x and y are two variables from [0, 1), uniformly # distributed, then # # cos(2*pi*x)*sqrt(-2*log(1-y)) # sin(2*pi*x)*sqrt(-2*log(1-y)) # # are two *independent* variables with normal distribution # (mu = 0, sigma = 1). # (Lambert Meertens) # (corrected version; bug discovered by Mike Miller, fixed by LM) # Multithreading note: When two threads call this function # simultaneously, it is possible that they will receive the # same return value. The window is very small though. To # avoid this, you have to use a lock around all calls. (I # didn't want to slow this down in the serial case by using a # lock here.) random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z*sigma ## -------------------- beta -------------------- ## See ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html ## for Ivan Frohne's insightful analysis of why the original implementation: ## ## def betavariate(self, alpha, beta): ## # Discrete Event Simulation in C, pp 87-88. ## ## y = self.expovariate(alpha) ## z = self.expovariate(1.0/beta) ## return z/(y+z) ## ## was dead wrong, and how it probably got that way. def betavariate(self, alpha, beta): """Beta distribution. Conditions on the parameters are alpha > 0 and beta > 0. Returned values range between 0 and 1. """ # This version due to Janne Sinkkonen, and matches all the std # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). y = self.gammavariate(alpha, 1.) if y == 0: return 0.0 else: return y / (y + self.gammavariate(beta, 1.)) ## -------------------- Pareto -------------------- def paretovariate(self, alpha): """Pareto distribution. alpha is the shape parameter.""" # Jain, pg. 495 u = 1.0 - self.random() return 1.0 / pow(u, 1.0/alpha) ## -------------------- Weibull -------------------- def weibullvariate(self, alpha, beta): """Weibull distribution. alpha is the scale parameter and beta is the shape parameter. """ # Jain, pg. 499; bug fix courtesy Bill Arms u = 1.0 - self.random() return alpha * pow(-_log(u), 1.0/beta) ## -------------------- Wichmann-Hill ------------------- class WichmannHill(Random): VERSION = 1 # used by getstate/setstate def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. If a is an int or long, a is used directly. Distinct values between 0 and 27814431486575L inclusive are guaranteed to yield distinct internal states (this guarantee is specific to the default Wichmann-Hill generator). """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds if not isinstance(a, (int, long)): a = hash(a) a, x = divmod(a, 30268) a, y = divmod(a, 30306) a, z = divmod(a, 30322) self._seed = int(x)+1, int(y)+1, int(z)+1 self.gauss_next = None def random(self): """Get the next random number in the range [0.0, 1.0).""" # Wichman-Hill random number generator. # # Wichmann, B. A. & Hill, I. D. (1982) # Algorithm AS 183: # An efficient and portable pseudo-random number generator # Applied Statistics 31 (1982) 188-190 # # see also: # Correction to Algorithm AS 183 # Applied Statistics 33 (1984) 123 # # McLeod, A. I. (1985) # A remark on Algorithm AS 183 # Applied Statistics 34 (1985),198-200 # This part is thread-unsafe: # BEGIN CRITICAL SECTION x, y, z = self._seed x = (171 * x) % 30269 y = (172 * y) % 30307 z = (170 * z) % 30323 self._seed = x, y, z # END CRITICAL SECTION # Note: on a platform using IEEE-754 double arithmetic, this can # never return 0.0 (asserted by Tim; proof too long for a comment). return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, self._seed, self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 1: version, self._seed, self.gauss_next = state else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) def jumpahead(self, n): """Act as if n calls to random() were made, but quickly. n is an int, greater than or equal to 0. Example use: If you have 2 threads and know that each will consume no more than a million random numbers, create two Random objects r1 and r2, then do r2.setstate(r1.getstate()) r2.jumpahead(1000000) Then r1 and r2 will use guaranteed-disjoint segments of the full period. """ if not n >= 0: raise ValueError("n must be >= 0") x, y, z = self._seed x = int(x * pow(171, n, 30269)) % 30269 y = int(y * pow(172, n, 30307)) % 30307 z = int(z * pow(170, n, 30323)) % 30323 self._seed = x, y, z def __whseed(self, x=0, y=0, z=0): """Set the Wichmann-Hill seed from (x, y, z). These must be integers in the range [0, 256). """ if not type(x) == type(y) == type(z) == int: raise TypeError('seeds must be integers') if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): raise ValueError('seeds must be in range(0, 256)') if 0 == x == y == z: # Initialize from current time import time t = long(time.time() * 256) t = int((t&0xffffff) ^ (t>>24)) t, x = divmod(t, 256) t, y = divmod(t, 256) t, z = divmod(t, 256) # Zero is a poor seed, so substitute 1 self._seed = (x or 1, y or 1, z or 1) self.gauss_next = None def whseed(self, a=None): """Seed from hashable object's hash code. None or no argument seeds from current time. It is not guaranteed that objects with distinct hash codes lead to distinct internal states. This is obsolete, provided for compatibility with the seed routine used prior to Python 2.1. Use the .seed() method instead. """ if a is None: self.__whseed() return a = hash(a) a, x = divmod(a, 256) a, y = divmod(a, 256) a, z = divmod(a, 256) x = (x + a) % 256 or 1 y = (y + a) % 256 or 1 z = (z + a) % 256 or 1 self.__whseed(x, y, z) ## --------------- Operating System Random Source ------------------ class SystemRandom(Random): """Alternate random number generator using sources provided by the operating system (such as /dev/urandom on Unix or CryptGenRandom on Windows). Not available on all systems (see os.urandom() for details). """ def random(self): """Get the next random number in the range [0.0, 1.0).""" return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF def getrandbits(self, k): """getrandbits(k) -> x. Generates a long int with k random bits.""" if k <= 0: raise ValueError('number of bits must be greater than zero') if k != int(k): raise TypeError('number of bits should be an integer') bytes = (k + 7) // 8 # bits / 8 and rounded up x = long(_hexlify(_urandom(bytes)), 16) return x >> (bytes * 8 - k) # trim excess bits def _stub(self, *args, **kwds): "Stub method. Not used for a system random number generator." return None seed = jumpahead = _stub def _notimplemented(self, *args, **kwds): "Method should not be called for a system random number generator." raise NotImplementedError('System entropy source does not have state.') getstate = setstate = _notimplemented ## -------------------- test program -------------------- def _test_generator(n, func, args): import time print n, 'times', func.__name__ total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print round(t1-t0, 3), 'sec,', avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print 'avg %g, stddev %g, min %g, max %g' % \ (avg, stddev, smallest, largest) def _test(N=2000): _test_generator(N, random, ()) _test_generator(N, normalvariate, (0.0, 1.0)) _test_generator(N, lognormvariate, (0.0, 1.0)) _test_generator(N, vonmisesvariate, (0.0, 1.0)) _test_generator(N, gammavariate, (0.01, 1.0)) _test_generator(N, gammavariate, (0.1, 1.0)) _test_generator(N, gammavariate, (0.1, 2.0)) _test_generator(N, gammavariate, (0.5, 1.0)) _test_generator(N, gammavariate, (0.9, 1.0)) _test_generator(N, gammavariate, (1.0, 1.0)) _test_generator(N, gammavariate, (2.0, 1.0)) _test_generator(N, gammavariate, (20.0, 1.0)) _test_generator(N, gammavariate, (200.0, 1.0)) _test_generator(N, gauss, (0.0, 1.0)) _test_generator(N, betavariate, (3.0, 3.0)) _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) # Create one instance, seeded from current time, and export its methods # as module-level functions. The functions share state across all uses #(both in the user's code and in the Python libraries), but that's fine # for most programs and is easier for the casual user than making them # instantiate their own Random() instance. _inst = Random() seed = _inst.seed random = _inst.random uniform = _inst.uniform triangular = _inst.triangular randint = _inst.randint choice = _inst.choice randrange = _inst.randrange sample = _inst.sample shuffle = _inst.shuffle normalvariate = _inst.normalvariate lognormvariate = _inst.lognormvariate expovariate = _inst.expovariate vonmisesvariate = _inst.vonmisesvariate gammavariate = _inst.gammavariate gauss = _inst.gauss betavariate = _inst.betavariate paretovariate = _inst.paretovariate weibullvariate = _inst.weibullvariate getstate = _inst.getstate setstate = _inst.setstate jumpahead = _inst.jumpahead getrandbits = _inst.getrandbits if __name__ == '__main__': _test()
py
1a3a1458a00589935c723a267dbcd67389a5d65c
from pprint import pprint import yaml from tabulate import tabulate from funcy import project from wws.commands import utils class Rm: def __init__(self): super().__init__() def process(self, args): """ edits the warp database """ if args['debug']: pprint(args) with open(args['workspace_warp_database'],'r+') as f: data = yaml.load(f, Loader=yaml.FullLoader) if not data: data = [] remove_entries = dict() keep_entries = dict() remove_entries = [ d for d in data if any( [ a for a in args['alias'] if a.upper() in d['alias'].upper() ] ) ] keep_entries = [ d for d in data if not any( [ a for a in args['alias'] if a.upper() in d['alias'].upper() ] ) ] if not args['verbose']: data = [ project(d,['alias', 'local', 'remote' ]) for d in remove_entries] else: data = remove_entries if not data: print("Nothing to remove.") exit() print("Entries to remove:") print(tabulate(data, headers="keys", tablefmt = "psql")) rm = utils._confirm("Are you sure to remove these entries?") if rm: f.seek(0) f.truncate() yaml.dump(keep_entries, f, default_flow_style=False) print("Aliases were removed but data remain untouched. Please remove the listed source directories.")
py
1a3a1480ee129890b005cabf8d314e7f7cf5ac3d
# Generated by Django 2.0.7 on 2018-07-29 22:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ibantools', '0001_initial'), ] operations = [ migrations.AlterField( model_name='bankcodede', name='indicator_changed', field=models.CharField(choices=[('A', 'New'), ('D', 'Deleted'), ('M', 'Changed'), ('U', 'Unchanged')], default='A', help_text='Änderungskennzeichen „A“ (Addition) für neue, „D“ (Deletion) für gelöschte, „U“(Unchanged) für unveränderte und „M“ (Modified) für veränderte Datensätze', max_length=1, verbose_name='Indicator changed'), ), migrations.AlterField( model_name='bankcodede', name='indicator_deleted', field=models.CharField(choices=[('0', 'No declaration'), ('1', 'Declarted for deletion')], default='0', help_text='Hinweis auf eine beabsichtigte Bankleitzahllöschung', max_length=1, verbose_name='Indicator deleted'), ), migrations.AlterField( model_name='bankcodede', name='payment_service_provider', field=models.CharField(choices=[('1', 'Yes'), ('2', 'No')], default='2', help_text='Merkmal, ob bankleitzahlführender Zahlungsdienstleister („1“) oder nicht („2“)', max_length=1, verbose_name='Payment service provider'), ), migrations.AlterField( model_name='bankcodede', name='short_description', field=models.CharField(help_text='Kurzbezeichnung des Zahlungsdienstleisters mit Ort (ohne Rechtsform)', max_length=27, verbose_name='Short description'), ), ]
py
1a3a152a6d6e2658ec8d8a922a2d6a92fbea7f3b
#!/usr/bin/env python3 import asyncio import time from psnawp_api import psnawp from pypresence import Presence from asset_updater import add_game_icon from playstationpresence.lib.files import load_config, load_game_data, load_game_icons from playstationpresence.lib.notifiable import Notifiable from playstationpresence.lib.rpc_retry import rpc_retry from requests.exceptions import * from threading import Event class PlaystationPresence: def __init__(self): self.notifier = None self.rpc = None self.exit_event = Event() self.old_info: dict = {'onlineStatus': None, 'titleId': None} self.config: dict = load_config() self.supported_games: set[str] = load_game_data() self.game_icons: set[str] = load_game_icons() self.psapi = psnawp.PSNAWP(self.config['npsso']) self.psnid = self.config['PSNID'] self.initRpc() def initRpc(self): self.rpc = Presence(self.config['discordClientId'], pipe=0, loop=asyncio.new_event_loop()) self.rpc.connect() def quit(self): self.exit_event.set() if self.notifier is not None: self.notifier.visible = False self.notifier.stop() def notify(self, message): print(message) if self.notifier is not None: self.notifier.title = message self.notifier.notify(message, "playstationpresence") @rpc_retry def clearStatus(self): self.rpc.clear() self.notify(f"Status changed to Offline") @rpc_retry def updateStatus(self, show_time: bool, state: str, large_image: str, details: str): if show_time: start_time = int(time.time()) self.rpc.update(state=state, start=start_time, small_image="ps5_main", small_text=self.psnid, large_image=large_image, large_text=state, details=details) else: self.rpc.update(state=state, small_image="ps5_main", small_text=self.psnid, large_image=large_image, large_text=state, details=details) self.notify(f"Status changed to {state}") def processPresenceInfo(self, mainpresence: dict): if mainpresence is None: return # Read PSN API data onlineStatus: str = mainpresence['primaryPlatformInfo']['onlineStatus'] onlinePlatform: str = mainpresence['primaryPlatformInfo']['platform'] game_info: list[dict] = mainpresence.get('gameTitleInfoList', None) # Check online status if onlineStatus == "offline": if self.old_info['onlineStatus'] != onlineStatus: self.clearStatus() self.old_info = {'onlineStatus': onlineStatus, 'titleId': None} elif game_info == None: # Set home menu state if self.old_info['onlineStatus'] != "online" or self.old_info['titleId'] != None: self.updateStatus(False, "Home Menu", "ps5_main", f"Online on {onlinePlatform}") self.old_info = {'onlineStatus': onlineStatus, 'titleId': None} elif self.old_info['titleId'] != game_info[0]['npTitleId']: # New title id is different -> update # Read game data game: dict[str, str] = game_info[0] # large_icon logic if game['npTitleId'] in self.supported_games: large_icon = game['npTitleId'].lower() else: # Game not known self.notify("Game not in library, checking for icon") # Check if icon exists if game['npTitleId'] in self.game_icons: self.notify("Game icon found\CONSIDER PUSHING NEW DISCORD ASSETS") else: # Get icon add_game_icon(game['npTitleId'], game['npTitleIconUrl']) self.notify("Reloading icons") self.game_icons = load_game_icons() large_icon = "ps5_main" # Update status self.updateStatus(True, game['titleName'], large_icon, f"Playing on {game['launchPlatform']}") self.old_info = {'onlineStatus': onlineStatus, 'titleId': game['npTitleId']} def mainloop(self, notifier: Notifiable): if notifier is not None: self.notifier = notifier self.notifier.visible = True while not self.exit_event.is_set(): mainpresence: dict = None user_online_id = None try: user_online_id = self.psapi.user(online_id=self.psnid) mainpresence = user_online_id.get_presence() # Uncomment for debug info about currently running game #print(mainpresence) except (ConnectionError, HTTPError) as e: print("Error when trying to read presence") print(e) self.processPresenceInfo(mainpresence) # Adjust this to be higher if you get ratelimited self.exit_event.wait(30) self.clearStatus() self.rpc.close()
py
1a3a163a3d0a1007672a351e040af508e4ee19f6
#! /usr/bin/env python """ based on this quickstart: from https://developers.google.com/google-apps/calendar/quickstart/python Don't forget to put CLIENT_SECRET_FILE in ~/.credentials Note: the above URL redirects to https://developers.google.com/calendar/quickstart/python which has a different sequence for get_credentials(). The one in this file still seems to work... TODO: test that it really does work and perhaps update to the newer version. Usage: The google calendar is the database. Calendar events are added as they are made known to the calendar owner (me). Members are encouraged to add this calendar to their calendar viewing apps so they can see who else will be in the cabin on any given night. The first word in the event 'summary' (the thing that shows up in your calendar view) shold be the member name. Append something in Camel-case to avoid name collisions e.g. 'BobB' and 'BobS'. Guests are indicated by a +N (separated by whitespace. N is the guest count). Around Thursday of each week, I assign rooms by inserting the room name into the 'description' in the calendar event. Then I run this script and, if the output looks OK, paste it into the communication to the members (Slack, email, whatever). Often, members have a room preference which I keep in an event in my personal calendar. I try to honor their preferences and follow other social norms such as not booking un-related men and women in the same bed/room but on popular nights, that might be unavoidable. As members pay their guest fees, I add a '$' (w/ whitespace) to the 'summary'. The '$' moves them from the 'deadbeat' list to the 'sponsor' list in the weekly communications. Customization: Obviously, you need to use your own google calendar. Replace ROOMS with the appropriate selection for your situation. DAYS_PEAK, GUEST_FEES_MID, and GUEST_FEES_PEAK may also need your attention. Member names are extracted from the calendar, so no need to do anything in this file, but you should probably examine fix_spelling() and add_guest_fees() since they implement rules that are specific to my cabin. I don't use f-strings because the raspberry pi that I sometimes run this on only has python 3.4 and I'm too lazy to install 3.7 """ import datetime import os import json USE_STR = """ --Show room usage in Lone Clone Ski Cabin-- Note: Enter guests as 'member +N' and, when paid, 'member $ +N' Usage: rooms [--counts] [--debug] [--nights] [--offline] [--peak] [--raw] [--shift=<S>] [--whosup] [--year=<Y>] rooms -h | --help rooms -v | --version Options: -h --help Show this screen. -c --counts show how many times each member has used each room -d --debug show stuff -n --nights show who slept where, each night -o --offline don't get the live calendar. Use a test data set -p --peak Show peak nights for this season, exlcuding Fri and Sat -r --raw show the raw calendar events -s --shift <S> move 'today' by integer number of days -v --version show the version -w --whosup show who's up in the next week -y --year <Y> year season starts [default: 2019] """ try: import httplib2 from googleapiclient import discovery from oauth2client import client from oauth2client import tools from oauth2client.file import Storage import docopt except ImportError: IMP_ERR_STR = '** Failed import! Type "workon rooms" and try again, Bob **' print('\n%s\n'%('*'*len(IMP_ERR_STR)), IMP_ERR_STR, '\n%s\n'%('*'*len(IMP_ERR_STR))) # If modifying these scopes, delete your previously saved credentials # at ~/.credentials/calendar-python-quickstart.json SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' APPLICATION_NAME = 'Google Calendar API Python Quickstart' # why do I have 2 different client secret files? TODO CLIENT_SECRET_FILE = 'calendar-python-quickstart.json' CLIENT_SECRET_FILE_ANOTHER = 'client_secret.json' ROOMS = ('in-law', 'master', 'middle', 'bunk', 'loft',) # assignable rooms in the cabin """ DAYS_PEAK is a list of days-of-the-week or dates that guest fee is higher than not. The dates are specific to the Julian calendar of each season. The year index is the season *start* year. Note: Fri and Sat should always be the first 2 entries """ NIGHTS_PEAK = { '2016': ['Fri', 'Sat']+['12/%2d'%x for x in range(18, 32)]+['01/01', '01/02', '02/19',], #pylint: disable=C0326 '2017': ['Fri', 'Sat']+['12/%2d'%x for x in range(17, 32)]+['01/01', '02/18',], #pylint: disable=C0326 '2018': ['Fri', 'Sat']+['12/%2d'%x for x in range(16, 32)]+['01/01', '02/17',], #pylint: disable=C0326 '2019': ['Fri', 'Sat']+['12/%2d'%x for x in range(15, 32)]+['01/01', '02/16',], #pylint: disable=C0326 '2020': ['Fri', 'Sat']+['12/%2d'%x for x in range(20, 32)]+['01/01', '02/14',], #pylint: disable=C0326 } # "mid week" and "weekend/holiday" guest fee in dollars GUEST_FEE_MID = 30 GUEST_FEE_PEAK = 35 def get_credentials(opts): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, CLIENT_SECRET_FILE) if opts['--debug']: print('** using credentials at '+credential_path) with open(credential_path) as cred_file: cred_text = cred_file.read() print('\n'.join(cred_text.split(','))) store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE_ANOTHER, SCOPES) flow.user_agent = APPLICATION_NAME # if flags: credentials = tools.run_flow(flow, store) #, flags) # else: # Needed only for compatibility with Python 2.6 # credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) # except, I'm not storing them? return credentials def get_events(cred, **kwargs): """ Wraps the service.events() call """ http = cred.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http) # throws Warning and ImportError # print(f'service.events={dir(service.events)}') """ #pylint: disable=E1101 pylint thinks there is no events()... but there is """ #pylint: disable=W0105 cal_events = service.events().list(**kwargs).execute() #pylint: disable=E1101 return cal_events.get('items', []) def get_events_raw(credentials, opts): """ Grab the entire calendar for the season from Nov 29 to May 1 ctor the dicts with night, leave, summary, description keys. nightShort is added later by more_dates() """ day0 = datetime.datetime(*opts['season_start']).isoformat()+'Z' day1 = datetime.datetime(*opts['season_end']).isoformat()+'Z' events = get_events( credentials, timeMin=day0, timeMax=day1, singleEvents=True, orderBy='startTime', calendarId="primary") return events def events_to_raw_dates(events, opts): #pylint: disable=W0613 """ make a new list: dates_raw that has only the fields I care about: night, leave, member, and room """ dates_raw = [] for event in events: day_dict = {} day_dict['night'] = event['start'].get('dateTime', event['start'].get('date'))[:10] day_dict['leave'] = event['end'].get('dateTime', event['end'].get('date'))[:10] # summary is the member name, description has room assignment for k in (('summary', 'member',), ('description', 'where', ),): try: day_dict[k[1]] = event[k[0]].strip() except KeyError: day_dict[k[1]] = '' dates_raw += [day_dict] # dates_raw[] is a list of # {'night':'2016-12-15', 'summary':'Logan', 'where':'master', 'leave':'2016-12-16',} return dates_raw def expand_multi_nights(dates_raw): """ expand multi-night stays into individual nights """ dates_multi_night = [] for one_date in dates_raw: # add day of week one_date['date'] = datetime.datetime.strptime(one_date['night'], '%Y-%m-%d') nights = (datetime.datetime.strptime(one_date['leave'], '%Y-%m-%d').date() - one_date['date'].date()).days - 1 for i in range(nights): new_date = one_date.copy() new_date['date'] = datetime.datetime.strptime(one_date['night'], '%Y-%m-%d') \ + datetime.timedelta(days=i+1) dates_multi_night += [new_date] dates_raw += dates_multi_night def add_day_of_week(dates_raw): """ Use 'date of "2016-12-23" to make night_abrev of "Fri 12/23" """ for one_date in dates_raw: one_date['night_abrev'] = one_date['date'].strftime('%a %m/%d') dates_raw = dates_raw.sort(key=lambda x: x['date']) def fix_spelling(dates_raw): """ Common data entry errors: fix the dict and flag it for me to fix the google calendar """ for date in dates_raw: for field, wrong, right in [ ('where', 'inlaw', 'in-law',), ('member', 'Sarah', 'Sara',), ]: if wrong in date[field]: print('** spellcheck:', date) date[field] = date[field].replace(wrong, right) # in-law, not inlaw, e.g. if 'Glen ' in date['member']: # special treatment for missing n in Glenn print('** spellcheck:', date) date['member'] = date['summary'].replace('Glen', 'Glenn') # two n in Glenn return dates_raw def select_dates(dates_raw, opts, day0=None, day1=None): """ return a subset of the events from today+day0 to today+day1 None in day0 means begining of current ski season None in day1 means end of current ski season """ dt_today = datetime.datetime.utcnow() if opts['--shift']: dt_today += datetime.timedelta(days=int(opts['--shift'])) season_start = datetime.datetime(*opts['season_start']) season_end = datetime.datetime(*opts['season_end']) date0 = season_start if day0 is None else dt_today + datetime.timedelta(days=day0) date1 = season_end if day1 is None else dt_today + datetime.timedelta(days=day1) if opts['--debug']: print('select', date0.strftime('%a %m/%d'), date1.strftime('%a %m/%d')) return [e for e in dates_raw if bool(date0 <= e['date'] <= date1)] def debug_print_raw(dates_raw): """ Debugging aid formatted to copy into code """ print('** dates_raw') print('{'+ '},\n{'.join([', '.join( ["'%s':'%s'"%(n, e[n]) for n in ('night', 'leave', 'member', 'where')] ) for e in dates_raw]) +'}') def show_raw(dates_raw): """ Debugging aid formatted for humans """ print('') print('%10s %20s %-20s'%('', '', 'Raw Calendar',)+' '.join(['%10s'%r for r in ROOMS])) for date in dates_raw: print('%10s %-20s %-20s'%(date['night'], date['member'], date['where'].strip()) + ' '.join(['%10s'%date[room] for room in ROOMS])) def put_members_in_rooms(dates_raw): """ add ['middle']='Logan', ['bunk']='' etc so that all dates have all rooms as keys, w/ or w/o a member """ for date in dates_raw: for room in ROOMS: if room in date['where'].lower(): date[room] = gevent_to_member_name(date) # just the first name else: date[room] = '' def add_guest_fee(event, opts): """ add 'guest_fee' key to a dates_raw event 0 means no guest, negative means fee is OWED, positive means paid a '+ indicatees guests but not Z+1 (Sam is not charged). Enter "Z +1" to indicate not Sam (chargable) """ if '+' in event['member'] and 'Z+1' not in event['member']: event['guest_fee'] = GUEST_FEE_PEAK if any([x in event['night_abrev'] \ for x in NIGHTS_PEAK[opts['--year']]]) else GUEST_FEE_MID # remove the 'paid' indicator ('$') str_guest_count = event['member'].replace('$','') # look for the guest count after the '+' # we don't get here if 'Z+1' in the event so OK to split on '+' str_guest_count = str_guest_count.split('+')[-1].strip() try: guest_count = int(str_guest_count) except ValueError: print('** FAILED to convert guest count', event['member'], 'on', event['night_abrev']) guest_count = 1 event['guest_fee'] = guest_count * event['guest_fee'] # look for 'paid' indicator to see who's been naughty and who's been nice if '$' not in event['member']: event['guest_fee'] = -event['guest_fee'] # OWED else: event['guest_fee'] = 0 return event def get_deadbeat_sponsors(dates_past): """ return dicts of members and their guest fee accounts. deadbeats owe guest fees sponsors have paid their guest fees. A member may appear in both. """ # init the member dicts with {name: []} deadbeats = {gevent_to_member_name(event): [] for event in dates_past} sponsors = {gevent_to_member_name(event): [] for event in dates_past} for event in dates_past: if event['guest_fee'] < 0: deadbeats[gevent_to_member_name(event)] += [(event['night_abrev'], -event['guest_fee'])] if event['guest_fee'] > 0: sponsors[gevent_to_member_name(event)] += [(event['night_abrev'], event['guest_fee'])] return deadbeats, sponsors def show_guest_fees(members): """ members is a dict created by get_deadbeat_sponsors(): member: [(night, fee), (night, fee), (night, fee), ...] for each member, prints $sum, member, dates or ' none' if there are no guest fees. """ out_lst = [] total = 0 for member in members: mem_total = sum([x[1] for x in members[member]]) dates = [x[0].split()[1] for x in members[member]] if mem_total: out_lst += ['$%4d %10s: %s'%(mem_total, member, ", ".join(dates))] total += mem_total if out_lst: print('\n'.join(out_lst)) print('$%4d %10s'%(total, 'total')) else: print(' none') def get_whos_up(dates_selected): """ return members_dict['Bob'] = [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ] for use by show_whos_up() """ members_dict = {} p_ord = 0 for event in dates_selected: member = event['member'] try: members_dict[member] += [(event['where'], event['night_abrev']),] except KeyError: members_dict[member] = [p_ord, member, (event['where'], event['night_abrev']),] p_ord += 1 return members_dict def show_whos_up(whos_up_dict): """ This output gets pasted into my periodic emails who room: day date, date, date [, room: date, date] I generate a dict, keyed on the member, with values of a list: [order#, member, (rooms,day),(rooms,day),...)] I repeat the rooms for each day because it can change during a stay. """ # whos_up_dict['Bob'] = [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ] # sort by the begining night of stay (the p_ord value, above) # for member_ass in sorted(list(whos_up_dict.items()), key=lambda k_v: k_v[1][0]): for member_ass in list(whos_up_dict.items()): # member_ass = ('Bob', [0, 'Bob', ('middle','Mon 12/24'), ('middle','Tue 12/25'), ]) day_tup = member_ass[1][2:] # [('middle','Mon 12/24'), ('middle','Tue 12/25'),] room = day_tup[0][0] # save the room so we only print it when it changes print('%20s %7s: %s,'%(member_ass[0], day_tup[0][0], day_tup[0][1]), end=' ') for a_day in day_tup[1:]: if a_day[0] == room: print(a_day[1].split()[1]+',', end=' ') else: print('%7s: %s,'%(a_day[0], a_day[1].split()[1]), end=' ') room = a_day[0] # save the room again print('') def show_missing_rooms(dates_raw, opts): """ Flag the data entry error condition: all members in the cabin on a given night must be in a room. Otherwise, the count will be wrong and the priority system breaks down. """ dates_raw = select_dates(dates_raw, opts, None, 0) missing_rooms_str = [] for date in dates_raw: if not date['where']: # catch members in cabin but not assigned to any room missing_rooms_str += \ ['** On %s, where did "%s" sleep?'%(date['night_abrev'], date['member'])] if missing_rooms_str: print('** Missing rooms ! **') print('\n'.join(missing_rooms_str)) def show_nights(dates_past, opts): #pylint: disable=W0613 """ colapse the raw calendar to show each night on one line date, inlaw, master, middle, bunk, loft who, who, who, who, who """ if dates_past: dates_combo = [dates_past[0].copy()] for date in dates_past[1:]: if dates_combo[-1]['night_abrev'] not in date['night_abrev']: # new date dates_combo += [date.copy()] else: for room in ROOMS: sep = ',' if date[room] and dates_combo[-1][room] else '' dates_combo[-1][room] = dates_combo[-1][room]+sep+date[room] # dates_combo[] is {'night':'2016-12-15', 'member':'Logan', 'where':'master', # 'master':'Logan', 'in-law':'Bob', 'middle':'Mark', ...} print('\n%10s '%('Nights')+' '.join(['%16s'%room for room in ROOMS])) for date in dates_combo: print('%10s '%(date['night_abrev'])+' '.join(['%16s'%date[room] for room in ROOMS])) else: print('\n** no events found by show_dates()') def count_members_in_rooms(dates_raw, opts): #pylint: disable=W0613 """ Construct the memberCount dict { 'Bob': {'inlaw': count, 'master' count, ...}...} for season up to today. """ # init the member_counts with the first {name: {rooms}} member_counts = {gevent_to_member_name(event): \ {room:0 for room in ROOMS+('total',)} for event in dates_raw} # add ['middle']='Logan' or blank for all rooms for event in dates_raw: # print '*****',gevent_to_member_name(event), # '+++', event['member'], '====', event['where'], '*****' member_counts[gevent_to_member_name(event)]['total'] = \ member_counts[gevent_to_member_name(event)]['total']+1 for room in ROOMS: if room in event['where'].lower(): try: member_counts[event[room]][room] = member_counts[event[room]][room]+1 except KeyError as why: msg = getattr(why, 'message', repr(why)) print("FAILED room=%s\nevent=%r\n%s\n"%(room, event, msg)) print("member_counts=%r\n"%member_counts) return member_counts def show_room_counts(member_counts): """ Room priority is based on which member has used the room the least. display: date, who, where inlaw, master, middle, bunk, loft total who, count, count, count, count, count """ # show how many times each member has slept in each room print('\n%4s%10s'%('', 'Counts')+' '.join(['%8s'%room for room in ROOMS])) for member in member_counts: print('%4d%10s'%(member_counts[member]['total'], member)+ ' '.join(['%8s'%('%d'%member_counts[member][room] if member_counts[member][room] else '') for room in ROOMS])) def gevent_to_member_name(event): """ Each calendar event has only one member name as the first word in the summary. extract the member name ignoring whatever else is in the summary. Should be run *after* fix_spelling() """ member = event['member'].split()[0].replace(',', '') return member def opts_add_season(opts): """ The Lone CLone cabin runs for the first weekend in Dec to the last in April. Sometimes, that includes the end of November ;-) """ opts['season_start'] = (int(opts['--year']), 11, 29,) opts['season_end'] = (int(opts['--year'])+1, 5, 1,) def read_test_dates_raw(file_name): """Read test data from a json encoded file. """ with open(file_name,'r') as fp: dates_raw_test = json.load(fp) return dates_raw_test def write_test_dates_raw(file_name, test_data): """Write test data to a json encoded file. """ with open(file_name,'w') as fp: json.dump(test_data, fp) def create_test_dates_raw(): """Todo: make a list of dicts as expected from google calendar """ return [] # yes, lots of branches and statements #pylint: disable=R0912 def main(opts): #pylint: disable=R0915 """ the program """ # ignore line-to-long #pylint: disable=C0301 if opts['--offline']: dates_raw = read_test_dates_raw('test.json') # start in the middle of the test data test_shift = datetime.datetime.strptime(dates_raw[len(dates_raw)//2]['night'], '%Y-%m-%d') opts['--year'] = str(datetime.datetime.strptime(dates_raw[0]['night'], '%Y-%m-%d').year) opts_add_season(opts) test_shift -= datetime.datetime.utcnow() test_shift = test_shift.days if opts['--shift']: opts['--shift'] = str(int(opts['--shift']) + test_shift) else: opts['--shift'] = str(test_shift) else: opts_add_season(opts) credentials = get_credentials(opts) events_raw = get_events_raw(credentials, opts) # print('events', ',\n'.join([repr(x) for x in events_raw])) # translate 'start' and 'end' to 'night' and 'leave' # translate 'summary' and 'description' to 'member' and 'where' dates_raw = events_to_raw_dates(events_raw, opts) # print ',\n'.join([repr(x) for x in dates_raw]) #pylint: enable=C0301 if opts['--debug']: print('opts:\n', '\n'.join(['%s: %r'%(k, opts[k]) for k in opts if '--' in k])) debug_print_raw(dates_raw) # dates_raw is a list of dicts. The dates_raw dicts need a few more fields... expand_multi_nights(dates_raw) # add more date dicts to fill in between night and leaving add_day_of_week(dates_raw) # add 'night_abrev' field to the date dicts dates_raw = fix_spelling(dates_raw) # catch data entry errors put_members_in_rooms(dates_raw) # to each date, add entries for each room if opts['--shift']: dt_today = datetime.datetime.now() + datetime.timedelta(days=int(opts['--shift'])) print('Shifted to ', ('%s'%dt_today)[:16]) # dates_raw[] is now a list of {'night':'2016-12-15', 'member':'Peter', # 'where':'master', 'master':'Peter', 'in-law':'', 'middle':'', ...} # always flag any members I failed to assign to a room show_missing_rooms(select_dates(dates_raw, opts, None, 0), opts) if opts['--whosup']: print("Here's who I've heard from:") dates_coming_up = select_dates(dates_raw, opts, -2, 7) whos_up_dict = get_whos_up(dates_coming_up) if whos_up_dict: show_whos_up(whos_up_dict) else: print(' no one!\n') if opts['--raw']: show_raw(dates_raw) # always show the guest fee accounts # give members 2 days before mentioning guest fees dates_guests = [add_guest_fee(event, opts) for event in select_dates(dates_raw, opts, None, -2)] # dates_guests[] includes a 'guest_fee' key (+ paid, - owed) deadbeats, sponsors = get_deadbeat_sponsors(dates_guests) print('\nMembers who owe guest fees:') show_guest_fees(deadbeats) print('\nMembers who have paid their guest fees: (Yay!)') show_guest_fees(sponsors) dates_past = select_dates(dates_raw, opts, None, 0) if opts['--nights']: show_nights(dates_past, opts) if opts['--counts']: member_counts = count_members_in_rooms(dates_past, opts) # member_counts{} = {'Bob':{'in-law':1, 'master':0, 'middle':0, # 'bunk':1, 'loft':0}, 'Mark:{'master':1,...},...} show_room_counts(member_counts) if opts['--peak']: nights_extra = NIGHTS_PEAK[opts['--year']][2:] # ignore Fri, Sat entries print('\nPeak nights starting %s, excluding Fri & Sat nights:'%opts['--year'], end='') str_peak = ', '.join(['%s%s'%('' if i%8 != 0 else '\n ', x) for i, x in enumerate(nights_extra)]) print(str_peak) if __name__ == '__main__': OPTS = docopt.docopt(USE_STR, version='0.9.0') main(OPTS)
py
1a3a171c9818a34fdc2078add62e4fa45eb19afb
# coding: utf8 """ Implementation of finite DPP MCMC samplers: - `add_exchange_delete_sampler` - `add_delete_sampler` - `basis_exchange_sampler` - `zonotope_sampler` .. seealso: `Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/finite_dpps/mcmc_sampling.html>`_ """ import time import numpy as np import scipy.linalg as la # For zonotope sampler from cvxopt import matrix, spmatrix, solvers solvers.options['show_progress'] = False solvers.options['glpk'] = {'msg_lev': 'GLP_MSG_OFF'} from dppy.utils import det_ST, check_random_state ############################################ # Approximate samplers for projection DPPs # ############################################ def dpp_sampler_mcmc(kernel, mode='AED', **params): """ Interface function with initializations and samplers for MCMC schemes. .. seealso:: - :ref:`finite_dpps_mcmc_sampling_add_exchange_delete` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AD_and_E_sampler <initialize_AD_and_E_sampler>` """ rng = check_random_state(params.get('random_state', None)) s_init = params.get('s_init', None) nb_iter = params.get('nb_iter', 10) T_max = params.get('T_max', None) size = params.get('size', None) # = Tr(K) for projection correlation K if mode == 'AED': # Add-Exchange-Delete S'=S+t, S-t+u, S-t if s_init is None: s_init = initialize_AED_sampler(kernel, random_state=rng) sampl = add_exchange_delete_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) elif mode == 'AD': # Add-Delete S'=S+t, S-t if s_init is None: s_init = initialize_AD_and_E_sampler(kernel, random_state=rng) sampl = add_delete_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) elif mode == 'E': # Exchange S'=S-t+u if s_init is None: s_init = initialize_AD_and_E_sampler(kernel, size, random_state=rng) sampl = basis_exchange_sampler(kernel, s_init, nb_iter, T_max, random_state=rng) return sampl def initialize_AED_sampler(kernel, random_state=None): """ .. seealso:: - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` """ rng = check_random_state(random_state) N = kernel.shape[0] ground_set = np.arange(N) S0, det_S0 = [], 0.0 nb_iter = 100 tol = 1e-9 for _ in range(nb_iter): if det_S0 > tol: break else: T = rng.choice(2 * N, size=N, replace=False) S0 = np.intersect1d(T, ground_set, assume_unique=True) det_S0 = det_ST(kernel, S0) else: raise ValueError('Initialization problem, you may be using a size `k` > rank of the kernel') return S0.tolist() def initialize_AD_and_E_sampler(kernel, size=None, random_state=None): """ .. seealso:: - :func:`add_delete_sampler <add_delete_sampler>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` - :func:`initialize_AED_sampler <initialize_AED_sampler>` - :func:`add_exchange_delete_sampler <add_exchange_delete_sampler>` """ rng = check_random_state(random_state) N = kernel.shape[0] S0, det_S0 = [], 0.0 it_max = 100 tol = 1e-9 for _ in range(it_max): if det_S0 > tol: break else: S0 = rng.choice(N, size=size if size else rng.randint(1, N + 1), replace=False) det_S0 = det_ST(kernel, S0) else: raise ValueError('Initialization problem, you may be using a size `k` > rank of the kernel') return S0.tolist() def add_exchange_delete_sampler(kernel, s_init=None, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for generic DPPs, it is a mix of add/delete and basis exchange MCMC samplers. :param kernel: Kernel martrix :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: list of `nb_iter` approximate sample of DPP(kernel) :rtype: array_like .. seealso:: Algorithm 3 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] ground_set = np.arange(N) S0, det_S0 = s_init, det_ST(kernel, s_init) size_S0 = len(S0) # Size of the current sample chain = [S0] # Initialize the collection (list) of sample # Evaluate running time... t_start = time.time() if T_max else 0 for _ in range(1, nb_iter): S1 = S0.copy() # S1 = S0 # Pick one element s in S_0 by index uniformly at random s_ind = rng.choice(size_S0 if size_S0 else N) # , size=1)[0] # Unif t in [N]-S0 t = rng.choice(np.delete(ground_set, S0)) U = rng.rand() ratio = size_S0 / N # Proportion of items in current sample # Add: S1 = S0 + t if U < 0.5 * (1 - ratio)**2: S1.append(t) # S1 = S0 + t # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0 * (size_S0 + 1) / (N - size_S0): S0, det_S0 = S1, det_S1 chain.append(S1) size_S0 += 1 else: chain.append(S0) # Exchange: S1 = S0 - s + t elif (0.5 * (1 - ratio)**2 <= U) & (U < 0.5 * (1 - ratio)): del S1[s_ind] # S1 = S0 - s S1.append(t) # S1 = S1 + t = S0 - s + t # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < (det_S1 / det_S0): S0, det_S0 = S1, det_S1 chain.append(S1) # size_S0 stays the same else: chain.append(S0) # Delete: S1 = S0 - s elif (0.5 * (1 - ratio) <= U) & (U < 0.5 * (ratio**2 + (1 - ratio))): del S1[s_ind] # S0 - s # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0 * size_S0 / (N - (size_S0 - 1)): S0, det_S0 = S1, det_S1 chain.append(S1) size_S0 -= 1 else: chain.append(S0) else: chain.append(S0) if T_max: if time.time() - t_start < T_max: break return chain def add_delete_sampler(kernel, s_init, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for generic DPP(kernel), it performs local moves by removing/adding one element at a time. :param kernel: Kernel martrix :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). Default is None. :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: list of `nb_iter` approximate sample of DPP(kernel) :rtype: array_like .. seealso:: Algorithm 1 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] # Number of elements # Initialization S0, det_S0 = s_init, det_ST(kernel, s_init) chain = [S0] # Initialize the collection (list) of sample # Evaluate running time... t_start = time.time() if T_max else 0 for _ in range(1, nb_iter): # With proba 1/2 try to add/delete an element if rng.rand() < 0.5: # Perform the potential add/delete move S1 = S0 +/- s S1 = S0.copy() # S1 = S0 s = rng.choice(N) # Uniform item in [N] if s in S1: S1.remove(s) # S1 = S0 - s else: S1.append(s) # S1 = SO + s # Accept_reject the move det_S1 = det_ST(kernel, S1) # det K_S1 if rng.rand() < det_S1 / det_S0: S0, det_S0 = S1, det_S1 chain.append(S1) else: chain.append(S0) else: chain.append(S0) if T_max: if time.time() - t_start < T_max: break return chain def basis_exchange_sampler(kernel, s_init, nb_iter=10, T_max=None, random_state=None): """ MCMC sampler for projection DPPs, based on the basis exchange property. :param kernel: Feature vector matrix, feature vectors are stacked columnwise. It is assumed to be full row rank. :type kernel: array_like :param s_init: Initial sample. :type s_init: list :param nb_iter: Maximum number of iterations performed by the the algorithm. Default is 10. :type nb_iter: int :param T_max: Maximum running time of the algorithm (in seconds). Default is None. :type T_max: float :param random_state: :type random_state: None, np.random, int, np.random.RandomState :return: MCMC chain of approximate sample (stacked row_wise i.e. nb_iter rows). :rtype: array_like .. seealso:: Algorithm 2 in :cite:`LiJeSr16c` """ rng = check_random_state(random_state) # Initialization N = kernel.shape[0] # Number of elements ground_set = np.arange(N) # Ground set size = len(s_init) # Size of the sample (cardinality is fixed) # Initialization S0, det_S0 = s_init, det_ST(kernel, s_init) chain = np.zeros((nb_iter, size), dtype=int) chain[0] = S0 # Evaluate running time... t_start = time.time() if T_max else 0 for it in range(1, nb_iter): # With proba 1/2 try to swap 2 elements if rng.rand() < 0.5: # Perform the potential exchange move S1 = S0 - s + t S1 = S0.copy() # S1 = S0 # Pick one element s in S0 by index uniformly at random s_ind = rng.choice(size) # Pick one element t in [N]\S0 uniformly at random t = rng.choice(np.delete(ground_set, S0)) S1[s_ind] = t # S_1 = S0 - S0[s_ind] + t det_S1 = det_ST(kernel, S1) # det K_S1 # Accept_reject the move w. proba if rng.rand() < det_S1 / det_S0: S0, det_S0 = S1, det_S1 chain[it] = S1 else: # if reject, stay in the same state chain[it] = S0 else: chain[it] = S0 if T_max: if time.time() - t_start < T_max: break return chain.tolist() ############ # ZONOTOPE # ############ def extract_basis(y_sol, eps=1e-5): """ Subroutine of zono_sampling to extract the tile of the zonotope in which a point lies. It extracts the indices of entries of the solution of LP :eq:`eq:Px` that are in (0,1). :param y_sol: Optimal solution of LP :eq:`eq:Px` :type y_sol: list :param eps: Tolerance :math:`y_i^* \\in (\\epsilon, 1-\\epsilon), \\quad \\epsilon \\geq 0` :eps type: float :return: Indices of the feature vectors spanning the tile in which the point is lies. :math:`B_{x} = \\left\\{ i \\, ; \\, y_i^* \\in (0,1) \\right\\}` :rtype: list .. seealso:: Algorithm 3 in :cite:`GaBaVa17` - :func:`zono_sampling <zono_sampling>` """ basis = np.where((eps < y_sol) & (y_sol < 1 - eps))[0] return basis def zonotope_sampler(A_zono, **params): """ MCMC based sampler for projection DPPs. The similarity matrix is the orthogonal projection matrix onto the row span of the feature vector matrix. Samples are of size equal to the ransampl_size of the projection matrix also equal to the rank of the feature matrix (assumed to be full row rank). :param A_zono: Feature vector matrix, feature vectors are stacked columnwise. It is assumed to be full row rank. :type A_zono: array_like :param params: Dictionary containing the parameters - ``'lin_obj'`` (list): Linear objective (:math:`c`) of the linear program used to identify the tile in which a point lies. Default is a random Gaussian vector. - ``'x_0'` (list): Initial point. - ``'nb_iter'`` (int): Number of iterations of the MCMC chain. Default is 10. - ``'T_max'`` (float): Maximum running time of the algorithm (in seconds). Default is None. - ``'random_state`` (default None) :type params: dict :return: MCMC chain of approximate samples (stacked row_wise i.e. nb_iter rows). :rtype: array_like .. seealso:: Algorithm 5 in :cite:`GaBaVa17` - :func:`extract_basis <extract_basis>` - :func:`basis_exchange_sampler <basis_exchange_sampler>` """ rng = check_random_state(params.get('random_state', None)) r, N = A_zono.shape # Sizes of r=samples=rank(A_zono), N=ground set # Linear objective c = matrix(params.get('lin_obj', rng.randn(N))) # Initial point x0 = A*u, u~U[0,1]^n x0 = matrix(params.get('x_0', A_zono.dot(rng.rand(N)))) nb_iter = params.get('nb_iter', 10) T_max = params.get('T_max', None) ################### # Linear problems # ################### # Canonical form # min c.T*x min c.T*x # s.t. G*x <= h <=> s.t. G*x + s = h # A*x = b A*x = b # s >= 0 # CVXOPT # =====> solvers.lp(c, G, h, A, b, solver='glpk') ################################################# # To access the tile Z(B_x) # Solve P_x(A,c) ###################################################### # y^* = # argmin c.T*y argmin c.T*y # s.t. A*y = x <=> s.t. A *y = x # 0 <= y <= 1 [ I_n] *y <= [1^n] # [-I_n] [0^n] ###################################################### # Then B_x = \{ i ; y_i^* \in ]0,1[ \} A = spmatrix(0.0, [], [], (r, N)) A[:, :] = A_zono G = spmatrix(0.0, [], [], (2 * N, N)) G[:N, :] = spmatrix(1.0, range(N), range(N)) G[N:, :] = spmatrix(-1.0, range(N), range(N)) # Endpoints of segment # D_x \cap Z(A) = [x+alpha_m*d, x-alpha_M*d] ########################################################################### # alpha_m/_M = argmin +/-alpha argmin [+/-1 0^N].T * [alpha,lambda] # s.t. x + alpha d = A lambda <=> s.t. [-d A] *[alpha, lambda] = x # 0 <= lambda <= 1 [0^N I_N] *[alpha, lambda] <= [1^N] # [0^N -I_N] [0^N] ########################################################################## c_mM = matrix(0.0, (N + 1, 1)) c_mM[0] = 1.0 A_mM = spmatrix(0.0, [], [], (r, N + 1)) A_mM[:, 1:] = A G_mM = spmatrix(0.0, [], [], (2 * N, N + 1)) G_mM[:, 1:] = G # Common h to both kind of LP # cf. 0 <= y <= 1 and 0 <= lambda <= 1 h = matrix(0.0, (2 * N, 1)) h[:N, :] = 1.0 ################## # Initialization # ################## B_x0 = [] while len(B_x0) != r: # Initial tile B_x0 # Solve P_x0(A,c) y_star = solvers.lp(c, G, h, A, x0, solver='glpk')['x'] # Get the tile B_x0 = extract_basis(np.asarray(y_star)) # Initialize sequence of sample chain = np.zeros((nb_iter, r), dtype=int) chain[0] = B_x0 # Compute the det of the tile (Vol(B)=abs(det(B))) det_B_x0 = la.det(A_zono[:, B_x0]) t_start = time.time() if T_max else 0 for it in range(1, nb_iter): # Take uniform direction d defining D_x0 d = matrix(rng.randn(r, 1)) # Define D_x0 \cap Z(A) = [x0 + alpha_m*d, x0 - alpha_M*d] # Update the constraint [-d A] * [alpha,lambda] = x A_mM[:, 0] = -d # Find alpha_m/M alpha_m = solvers.lp(c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] alpha_M = solvers.lp(-c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0] # Propose x1 ~ U_{[x0+alpha_m*d, x0-alpha_M*d]} x1 = x0 + (alpha_m + (alpha_M - alpha_m) * rng.rand()) * d # Proposed tile B_x1 # Solve P_x1(A,c) y_star = solvers.lp(c, G, h, A, x1, solver='glpk')['x'] # Get the tile B_x1 = extract_basis(np.asarray(y_star)) # Accept/Reject the move with proba Vol(B1)/Vol(B0) if len(B_x1) != r: # if extract_basis returned smtg ill conditioned chain[it] = B_x0 else: det_B_x1 = la.det(A_zono[:, B_x1]) if rng.rand() < abs(det_B_x1 / det_B_x0): x0, B_x0, det_B_x0 = x1, B_x1, det_B_x1 chain[it] = B_x1 else: chain[it] = B_x0 if T_max: if time.time() - t_start < T_max: break return chain
py
1a3a173c95838109275d55b8bd291b0ed5875405
def checkPangram(s): List = [] # create list of 26 charecters and set false each entry for i in range(26): List.append(False) # converting the sentence to lowercase and iterating # over the sentence for c in s.lower(): if not c == " ": # make the corresponding entry True List[ord(c) -ord('a')]= True # check if any charecter is missing then return False for ch in List: if ch == False: return False return True # Driver Program to test above functions sentence = input() if (checkPangram(sentence)): print("Yes") else: print("No")
py
1a3a1851544efbf203e7f56c6176d8c4cee1cdf8
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_readitbetter ---------------------------------- Tests for `readitbetter` module. """ import unittest from readitbetter import readitbetter class TestReaditbetter(unittest.TestCase): def setUp(self): pass def test_something(self): pass def tearDown(self): pass if __name__ == '__main__': unittest.main()
py
1a3a18dbf23bb878bbb56ae70be0fd0ce3f226e8
import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 import os from lanedetect_helpers import process_image from moviepy.editor import VideoFileClip from IPython.display import HTML def lane_detect_images(): test_data_dir = "test_images/" # Read Test images test_images = os.listdir(test_data_dir) for test_image in test_images: image = mpimg.imread(os.path.join(test_data_dir, test_image)) final_image = process_image(image) def lane_detect_videos(): test_data_dir = "test_videos/" video_out_dir = "test_videos_output/" test_videos = os.listdir(test_data_dir) for test_video in test_videos: test_video_input = os.path.join(test_data_dir, test_video) test_video_output = os.path.join(video_out_dir, test_video) video_clip = VideoFileClip(test_video_input) video_frame = video_clip.fl_image(process_image) video_frame.write_videofile(test_video_output, audio=False) if __name__ =="__main__": lane_detect_images() #lane_detect_videos()
py
1a3a1919bd6cedfe308f8886bb8dd7f0d2276f17
import logging import json import os import shutil import subprocess from .base import BaseExporter logger = logging.getLogger(__name__) __all__ = ["JSONExporter"] class JSONExporter(BaseExporter): short_name = "json_file" TESTS_DIR_NAME = "tests" SOLUTION_DIR_NAME = "solutions" VALIDATOR_DIR_NAME = "validators" SUBTASKS_DIR_NAME = "subtasks" CHECKER_DIR_NAME = "checker" GRADER_DIR_NAME = "graders" OTHER_FILES_DIR_NAME = "others" def __init__(self, revision): super().__init__(revision) def _do_export(self): def export_resources_to_path(prefix): for resource in self.revision.resource_set.all(): self.extract_from_storage_to_path( resource.file, os.path.join( prefix, resource.name ) ) def generate_clean_name(name): return name.replace(' ', '_').lower() # Exporting problem global data problem_data = self.revision.problem_data problem_data_dict = { "code": problem_data.code_name, "name": problem_data.title, "time_limit": problem_data.time_limit, "memory_limit": problem_data.memory_limit, "score_precision": problem_data.score_precision, } if problem_data.task_type: problem_data_dict.update({ "task_type": problem_data.task_type, "task_type_params": problem_data.task_type_parameters, }) self.write_to_file( "problem.json".format(problem_code=problem_data.code_name), json.dumps(problem_data_dict) ) self.write_to_file( "statement.md", self.revision.statement_set.get().content ) # Exporting problem files self.create_directory(self.OTHER_FILES_DIR_NAME) for file in self.revision.problem.files.all(): self.extract_from_storage_to_path( file, os.path.join( self.OTHER_FILES_DIR_NAME, file.name ) ) # Exporting testcases self.create_directory(self.TESTS_DIR_NAME) ignored_testcases = [] for testcase in self.revision.testcase_set.all(): if not testcase.input_file_generated() or not testcase.output_file_generated(): ignored_testcases.append(testcase) logger.warning("Testcase {} couldn't be generated. Skipping".format(testcase.name)) continue self.extract_from_storage_to_path( testcase.input_file, os.path.join( self.TESTS_DIR_NAME, "{testcase_name}.in".format(testcase_name=generate_clean_name(testcase.name)) ), ) self.extract_from_storage_to_path( testcase.output_file, os.path.join( "tests", "{testcase_name}.out".format(testcase_name=generate_clean_name(testcase.name)) ) ) # Exporting graders self.create_directory(self.GRADER_DIR_NAME) for grader in self.revision.grader_set.all(): self.extract_from_storage_to_path( grader.code, os.path.join( self.GRADER_DIR_NAME, grader.name, ) ) # Exporting subtasks self.create_directory(self.SUBTASKS_DIR_NAME) for subtask in self.revision.subtasks.all(): self.write_to_file( os.path.join( self.SUBTASKS_DIR_NAME, "{subtask_index:02}-{subtask_name}.json".format( subtask_index=subtask.index, subtask_name=subtask.name, )), json.dumps( { "score": subtask.score, "testcases": [ generate_clean_name(t.name) for t in subtask.testcases.all() ] } ) ) # Exporting solutions self.create_directory(self.SOLUTION_DIR_NAME) for solution in self.revision.solution_set.all(): if solution.verdict: solution_dir = os.path.join(self.SOLUTION_DIR_NAME, generate_clean_name(solution.verdict.name)) else: solution_dir = os.path.join(self.SOLUTION_DIR_NAME, "unknown_verdict") self.create_directory(solution_dir) self.extract_from_storage_to_path(solution.code, os.path.join(solution_dir, solution.name)) # We don't export generators. Tests are already generated so there is no use for them # Exporting checker( We only extract main checker) self.create_directory(self.CHECKER_DIR_NAME) for resource in self.revision.checker_set.all(): self.extract_from_storage_to_path( resource.file, os.path.join(self.CHECKER_DIR_NAME, resource.name) ) checker = problem_data.checker if checker is not None: self.extract_from_storage_to_path( checker.file, os.path.join(self.CHECKER_DIR_NAME, "checker{ext}".format( ext=os.path.splitext(checker.name)[1] )) ) export_resources_to_path("checker") # Exporting validators self.create_directory(self.VALIDATOR_DIR_NAME) for validator in self.revision.validator_set.all(): dirs = [] for subtask in validator.subtasks: dirs.append(subtask.name) for dir in dirs: full_dir = os.path.join(self.VALIDATOR_DIR_NAME, dir) self.create_directory(full_dir) self.extract_from_storage_to_path( validator.file, os.path.join( full_dir, validator.name ) ) export_resources_to_path("validators") # Exporting public self.create_directory("repo") os.system('git --git-dir="{repo_dir}" worktree add {work_dir} {commit_id}'.format( repo_dir=self.revision.repository_path, work_dir=self.get_absolute_path("repo"), commit_id=self.revision.commit_id )) tests_dir_in_repo = os.path.join('repo', 'tests') self.create_directory(tests_dir_in_repo) for testcase in self.revision.testcase_set.all(): if not testcase.input_file_generated() or not testcase.output_file_generated(): ignored_testcases.append(testcase) logger.warning("Testcase {} couldn't be generated. Skipping".format(testcase.name)) continue self.extract_from_storage_to_path( testcase.input_file, os.path.join( tests_dir_in_repo, "{testcase_name}.in".format(testcase_name=testcase.name) ), ) self.extract_from_storage_to_path( testcase.output_file, os.path.join( tests_dir_in_repo, "{testcase_name}.out".format(testcase_name=testcase.name) ) ) try: print(subprocess.check_output(['tps', 'make-public'], cwd=self.get_absolute_path("repo"), stderr=subprocess.STDOUT)) except subprocess.CalledProcessError as e: print(e.output) raise e self.create_directory("attachments") try: shutil.move(os.path.join(self.get_absolute_path("repo"), "{}.zip".format(problem_data.code_name)), self.get_absolute_path("attachments")) except OSError: try: shutil.move(os.path.join(self.get_absolute_path("repo"), "{}.zip".format(problem_data.code_name)), self.get_absolute_path("attachments")) except OSError as e: logger.error("Public archive not found") raise e shutil.rmtree(self.get_absolute_path("repo"))
py
1a3a1abc9f641dbe7103719c1d6b936be031c0d2
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v7.resources.types import ad_group_criterion_label from google.ads.googleads.v7.services.types import ad_group_criterion_label_service from .base import AdGroupCriterionLabelServiceTransport, DEFAULT_CLIENT_INFO class AdGroupCriterionLabelServiceGrpcTransport(AdGroupCriterionLabelServiceTransport): """gRPC backend transport for AdGroupCriterionLabelService. Service to manage labels on ad group criteria. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__(self, *, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning) host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel(cls, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs ) def close(self): self.grpc_channel.close() @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_ad_group_criterion_label(self) -> Callable[ [ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest], ad_group_criterion_label.AdGroupCriterionLabel]: r"""Return a callable for the get ad group criterion label method over gRPC. Returns the requested ad group criterion label in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetAdGroupCriterionLabelRequest], ~.AdGroupCriterionLabel]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'get_ad_group_criterion_label' not in self._stubs: self._stubs['get_ad_group_criterion_label'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v7.services.AdGroupCriterionLabelService/GetAdGroupCriterionLabel', request_serializer=ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest.serialize, response_deserializer=ad_group_criterion_label.AdGroupCriterionLabel.deserialize, ) return self._stubs['get_ad_group_criterion_label'] @property def mutate_ad_group_criterion_labels(self) -> Callable[ [ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest], ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse]: r"""Return a callable for the mutate ad group criterion labels method over gRPC. Creates and removes ad group criterion labels. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.MutateAdGroupCriterionLabelsRequest], ~.MutateAdGroupCriterionLabelsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'mutate_ad_group_criterion_labels' not in self._stubs: self._stubs['mutate_ad_group_criterion_labels'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v7.services.AdGroupCriterionLabelService/MutateAdGroupCriterionLabels', request_serializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest.serialize, response_deserializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse.deserialize, ) return self._stubs['mutate_ad_group_criterion_labels'] __all__ = ( 'AdGroupCriterionLabelServiceGrpcTransport', )
py
1a3a1aeac13c10c46875439514c97457879e46e7
from .yolov3 import YOLOV3
py
1a3a1bb03cbf62581cdb694ae0b14ed5661c94a4
# -*- coding: utf-8 -*- import copy import json from freezegun import freeze_time from mantarray_desktop_app import MICRO_TO_BASE_CONVERSION from mantarray_desktop_app import SERIAL_COMM_DEFAULT_DATA_CHANNEL from mantarray_desktop_app import START_MANAGED_ACQUISITION_COMMUNICATION from mantarray_desktop_app import STOP_MANAGED_ACQUISITION_COMMUNICATION import numpy as np from stdlib_utils import drain_queue from stdlib_utils import invoke_process_run_and_check_errors from stdlib_utils import put_object_into_queue_and_raise_error_if_eventually_still_empty from ..fixtures import QUEUE_CHECK_TIMEOUT_SECONDS from ..fixtures_data_analyzer import fixture_four_board_analyzer_process_beta_2_mode from ..fixtures_data_analyzer import set_magnetometer_config from ..fixtures_file_writer import GENERIC_BOARD_MAGNETOMETER_CONFIGURATION from ..helpers import confirm_queue_is_eventually_empty from ..helpers import confirm_queue_is_eventually_of_size from ..parsed_channel_data_packets import SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS __fixtures__ = [ fixture_four_board_analyzer_process_beta_2_mode, ] @freeze_time("2021-06-15 16:39:10.120589") def test_DataAnalyzerProcess__sends_outgoing_data_dict_to_main_as_soon_as_it_retrieves_a_data_packet_from_file_writer__and_sends_data_available_message_to_main( four_board_analyzer_process_beta_2_mode, mocker ): da_process = four_board_analyzer_process_beta_2_mode["da_process"] from_main_queue = four_board_analyzer_process_beta_2_mode["from_main_queue"] to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] outgoing_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][1] # mock so that well metrics don't populate outgoing data queue mocker.patch.object(da_process, "_dump_outgoing_well_metrics", autospec=True) # mock so performance log messages don't populate queue to main mocker.patch.object(da_process, "_handle_performance_logging", autospec=True) da_process.init_streams() # set config arbitrary sampling period test_sampling_period = 1000 set_magnetometer_config( four_board_analyzer_process_beta_2_mode, { "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION, "sampling_period": test_sampling_period, }, ) # start managed_acquisition put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_of_size(to_main_queue, 1) # remove message to main to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_empty(outgoing_data_queue) confirm_queue_is_eventually_empty(to_main_queue) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) confirm_queue_is_eventually_of_size(outgoing_data_queue, 1) confirm_queue_is_eventually_of_size(to_main_queue, 1) # test data dump waveform_data_points = dict() for well_idx in range(24): default_channel_data = test_data_packet[well_idx][SERIAL_COMM_DEFAULT_DATA_CHANNEL] pipeline = da_process.get_pipeline_template().create_pipeline() pipeline.load_raw_gmr_data( np.array([test_data_packet["time_indices"], default_channel_data], np.int64), np.zeros((2, len(default_channel_data))), ) compressed_data = pipeline.get_force() waveform_data_points[well_idx] = { "x_data_points": compressed_data[0].tolist(), "y_data_points": (compressed_data[1] * MICRO_TO_BASE_CONVERSION).tolist(), } expected_outgoing_dict = { "waveform_data": {"basic_data": {"waveform_data_points": waveform_data_points}}, "earliest_timepoint": test_data_packet["time_indices"][0].item(), "latest_timepoint": test_data_packet["time_indices"][-1].item(), "num_data_points": len(test_data_packet["time_indices"]), } outgoing_msg = outgoing_data_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) assert outgoing_msg["data_type"] == "waveform_data" assert outgoing_msg["data_json"] == json.dumps(expected_outgoing_dict) # test message sent to main outgoing_msg = to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS) expected_msg = { "communication_type": "data_available", "timestamp": "2021-06-15 16:39:10.120589", "num_data_points": len(test_data_packet["time_indices"]), "earliest_timepoint": test_data_packet["time_indices"][0], "latest_timepoint": test_data_packet["time_indices"][-1], } assert outgoing_msg == expected_msg def test_DataAnalyzerProcess__does_not_process_data_packets_after_receiving_stop_managed_acquisition_command_until_receiving_first_packet_of_new_stream( four_board_analyzer_process_beta_2_mode, mocker ): da_process = four_board_analyzer_process_beta_2_mode["da_process"] from_main_queue = four_board_analyzer_process_beta_2_mode["from_main_queue"] to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] # mock so these since not using real data mocked_process_data = mocker.patch.object( da_process, "_process_beta_2_data", autospec=True, return_value={} ) invoke_process_run_and_check_errors(da_process, perform_setup_before_loop=True) # set config arbitrary sampling period test_sampling_period = 10000 set_magnetometer_config( four_board_analyzer_process_beta_2_mode, { "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION, "sampling_period": test_sampling_period, }, ) # start managed_acquisition put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) # send first packet of first stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = True put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 1 # send another packet of first stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # stop managed acquisition and make sure next data packet in the first stream is not processed put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(STOP_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # start managed acquisition again and make sure next data packet in the first stream is not processed put_object_into_queue_and_raise_error_if_eventually_still_empty( dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue ) invoke_process_run_and_check_errors(da_process) test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = False put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 2 # send first data packet from second stream and make sure it is processed test_data_packet = copy.deepcopy(SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS) test_data_packet["is_first_packet_of_stream"] = True put_object_into_queue_and_raise_error_if_eventually_still_empty(test_data_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) assert mocked_process_data.call_count == 3 # prevent BrokenPipeErrors drain_queue(to_main_queue) def test_DataAnalyzerProcess__processes_incoming_stim_packet(four_board_analyzer_process_beta_2_mode, mocker): # TODO Tanner (10/20/21): add to this test when ready to add stim handling da_process = four_board_analyzer_process_beta_2_mode["da_process"] incoming_data_queue = four_board_analyzer_process_beta_2_mode["board_queues"][0][0] # can probably remove this spy and assertion once actual handling is implemented spied_process_stim_packet = mocker.spy(da_process, "_process_stim_packet") test_stim_packet = {"data_type": "stimulation"} put_object_into_queue_and_raise_error_if_eventually_still_empty(test_stim_packet, incoming_data_queue) invoke_process_run_and_check_errors(da_process) spied_process_stim_packet.assert_called_once_with(test_stim_packet)
py
1a3a1bd87909f2cf4867ca6fb47cd67d932771a5
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="findmylibs", version="0.0.1", author="The Nomadic Coder", author_email="[email protected]", description="A package to probe installed libraries", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/atemysemicolon/findMyLibs", install_requires=['cmake'], packages=["findmylibs"], entry_points={ 'console_scripts': [ 'findmylibs = findmylibs.__main__:main', ]}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", ], )
py
1a3a1c717b1aa88212c72e2dcd1d49ef5617c939
class Solution: def majorityElement(self, nums: List[int]) -> List[int]: a=list(set(nums)) b=[] for x in a: if nums.count(x)>len(nums)//3: b.append(x) return b
py
1a3a1c7aba94f687ee19c3fd90167a161563ca3f
import numpy as np trials=10_00_000 dice=int(input("Enter the no of dices :")) for i in np.arange(1*dice,dice*6 + 1): found=0 for _ in np.arange(trials): total=0 for _ in np.arange(dice): total+=np.random.randint(1,7) if(total==i): found+=1 print("Sum Value :",i,"probability",np.round((found/trials)*100,4),"%")
py
1a3a1cae058c5dcd405dcb0dec51ebcd8786bc80
#!/usr/bin/env python3 ############################################################################################ # # # Program purpose: Find all the common characters in lexicographical order from # # two given lower case strings. If there are no common letters # # print “No common characters". # # Program Author : Happi Yvan <[email protected]> # # Creation Date : October 30, 2019 # # # ############################################################################################ from collections import Counter def find_common_chars(str1: str, str2: str) -> dict: data = {'found': False, 'info': ''} d1 = Counter(str1) d2 = Counter(str2) common_dict = d1 & d2 if len(common_dict) == 0: data['info'] = 'No common characters' return data data['found'] = True # list of common elements common_chars = list(common_dict.elements()) common_chars = sorted(common_chars) data['data'] = ''.join(common_chars) return data if __name__ == "__main__": str1 = 'Python' str2 = 'PHP' data_info = find_common_chars(str1=str1, str2=str2) if data_info['found']: print(f"Two strings: '{str1}' and '{str2}': {data_info['data']}") else: print(f"Two strings: '{str1}' and '{str2}': {data_info['info']}") str1 = 'Java' str2 = 'PHP' data_info = find_common_chars(str1=str1, str2=str2) if data_info['found']: print(f"Two strings: '{str1}' and '{str2}': {data_info['data']}") else: print(f"Two strings: '{str1}' and '{str2}': {data_info['info']}")
py
1a3a1d90dfefc8a23568af35ac4106ab01586417
""" Copyright 2021 Inmanta Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contact: [email protected] """ from setuptools import setup, find_packages from os import path requires = [ 'inmanta-core', 'intervaltree' ] # read the contents of your README file this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name="inmantals", package_dir={"": "src"}, packages=find_packages("src"), install_requires=requires, version="1.2.0", description="Inmanta Language Server", long_description=long_description, long_description_content_type='text/markdown', author="Inmanta", author_email="[email protected]", license="Apache Software License", url="https://github.com/inmanta/vscode-inmanta", keywords=["ide", "language-server", "vscode", "inmanta"], classifiers=["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Telecommunications Industry", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: System :: Systems Administration", "Topic :: Utilities"], entry_points={ 'console_scripts': [ 'inmanta-language-server-tcp = inmantals.tcpserver:main', ], }, )
py
1a3a1dd5096381691d086849cb9f68f6641518ba
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os # import all class inside framework into fluid module from . import framework from .framework import * # import all class inside executor into fluid module from . import executor from .executor import * from . import data_feed_desc from .data_feed_desc import * from . import dataset from .dataset import * from . import trainer_desc from . import inferencer from . import io from . import evaluator from . import initializer from . import layers from . import dygraph from . import contrib from . import nets from . import optimizer from . import backward from .backward import gradients from . import regularizer from . import average from . import metrics from . import transpiler from . import incubate from . import distribute_lookup_table from .param_attr import ParamAttr, WeightNormParamAttr from .data_feeder import DataFeeder from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope, _Scope from .incubate import fleet from .incubate import data_generator from .transpiler import DistributeTranspiler, \ memory_optimize, release_memory, DistributeTranspilerConfig from .lod_tensor import create_lod_tensor, create_random_int_lodtensor from . import clip from . import dygraph_grad_clip from . import profiler from . import unique_name from . import recordio_writer from . import parallel_executor from .parallel_executor import * from . import compiler from .compiler import * from paddle.fluid.layers.math_op_patch import monkey_patch_variable from . import install_check from .dygraph.nn import * from .dygraph.layers import * Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + \ trainer_desc.__all__ + inferencer.__all__ + transpiler.__all__ + \ parallel_executor.__all__ + lod_tensor.__all__ + \ data_feed_desc.__all__ + compiler.__all__ + backward.__all__ + [ 'io', 'initializer', 'layers', 'contrib', 'dygraph', 'transpiler', 'nets', 'optimizer', 'learning_rate_decay', 'backward', 'regularizer', 'LoDTensor', 'LoDTensorArray', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'Tensor', 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', 'dygraph_grad_clip', 'profiler', 'unique_name', 'recordio_writer', 'Scope', 'install_check', ] def __bootstrap__(): """ Enable reading gflags from environment variables. Returns: None """ import sys import os import platform from . import core in_test = 'unittest' in sys.modules try: num_threads = int(os.getenv('OMP_NUM_THREADS', '1')) except ValueError: num_threads = 1 if num_threads > 1: print( 'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation ' 'speed will not be optimized if you use data parallel. It will ' 'fail if this PaddlePaddle binary is compiled with OpenBlas since' ' OpenBlas does not support multi-threads.'.format(num_threads), file=sys.stderr) print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr) os.environ['OMP_NUM_THREADS'] = str(num_threads) sysstr = platform.system() read_env_flags = [ 'check_nan_inf', 'benchmark', 'eager_delete_scope', 'initial_cpu_memory_in_mb', 'init_allocated_mem', 'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size", 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'memory_fraction_of_eager_deletion', 'allocator_strategy', 'reader_queue_speed_test_mode', 'print_sub_graph_dir', 'pe_profile_fname', 'inner_op_parallelism', 'enable_parallel_graph', 'fuse_parameter_groups_size', 'multiple_of_cupti_buffer_size', 'fuse_parameter_memory_size', 'tracer_profile_fname', 'dygraph_debug' ] if 'Darwin' not in sysstr: read_env_flags.append('use_pinned_memory') if os.name != 'nt': read_env_flags.append('cpu_deterministic') if core.is_compiled_with_mkldnn(): read_env_flags.append('use_mkldnn') if core.is_compiled_with_ngraph(): read_env_flags.append('use_ngraph') if core.is_compiled_with_dist(): #env for rpc read_env_flags.append('rpc_deadline') read_env_flags.append('rpc_server_profile_path') read_env_flags.append('enable_rpc_profiler') read_env_flags.append('rpc_send_thread_num') read_env_flags.append('rpc_get_thread_num') read_env_flags.append('rpc_prefetch_thread_num') read_env_flags.append('rpc_disable_reuse_port') # env for communicator read_env_flags.append('communicator_independent_recv_thread') read_env_flags.append('communicator_send_queue_size') read_env_flags.append('communicator_min_send_grad_num_before_recv') read_env_flags.append('communicator_thread_pool_size') read_env_flags.append('communicator_max_merge_var_num') read_env_flags.append('communicator_fake_rpc') read_env_flags.append('communicator_send_wait_times') if core.is_compiled_with_brpc(): read_env_flags.append('max_body_size') #set brpc max body size os.environ['FLAGS_max_body_size'] = "2147483647" if core.is_compiled_with_cuda(): read_env_flags += [ 'fraction_of_gpu_memory_to_use', 'initial_gpu_memory_in_mb', 'reallocate_gpu_memory_in_mb', 'cudnn_deterministic', 'enable_cublas_tensor_op_math', 'conv_workspace_size_limit', 'cudnn_exhaustive_search', 'selected_gpus', 'sync_nccl_allreduce', 'limit_of_tmp_allocation', 'times_excess_than_required_tmp_allocation', 'enable_inplace_whitelist', 'cudnn_batchnorm_spatial_persistent' ] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) # don't init_p2p when in unittest to save time. core.init_devices(not in_test) # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. # Consider paddle.init(args) or paddle.main(args) monkey_patch_variable() __bootstrap__()
py
1a3a1ebea23549497923ceaca45b0beed11946f4
#!/usr/bin/python3 # -*- coding: utf-8 -*- import logging import sys import time import _ssl from sleekxmpp import ClientXMPP import config import events from common import VERSION class IdleBot(ClientXMPP): def __init__(self, jid, password, rooms, nick): ClientXMPP.__init__(self, jid, password) self.ssl_version = _ssl.PROTOCOL_TLSv1_2 self.rooms = rooms self.nick = nick self.add_event_handler('session_start', self.session_start) self.add_event_handler('groupchat_message', self.muc_message) self.add_event_handler('disconnected', self.disconnected) self.add_event_handler('presence_error', self.disconnected) self.add_event_handler('session_end', self.disconnected) self.priority = 0 self.status = None self.show = None self.logger = logging.getLogger(__name__) for room in self.rooms: self.add_event_handler('muc::%s::got_offline' % room, self.muc_offline) def talked_to_me(self, text): return text[:len(self.nick)].lower() == self.nick.lower() def disconnected(self, _): self.logger.warn("Disconnected! dbg: {}".format(str(_))) self.disconnect(wait=True) def session_start(self, _): self.get_roster() self.send_presence(ppriority=self.priority, pstatus=self.status, pshow=self.show) for room in self.rooms: self.logger.info('%s: joining' % room) ret = self.plugin['xep_0045'].joinMUC( room, self.nick, wait=True ) self.logger.info('%s: joined with code %s' % (room, ret)) def muc_message(self, msg_obj): """ Handle muc messages, return if irrelevant content or die by hangup. :param msg_obj: :return: """ # don't talk to yourself if msg_obj['mucnick'] == self.nick or 'groupchat' != msg_obj['type']: return False elif self.talked_to_me(msg_obj['body']) and 'hangup' in msg_obj['body']: self.logger.warn("got 'hangup' from '%s': '%s'" % ( msg_obj['mucnick'], msg_obj['body'] )) self.hangup() return False # elif msg_obj['mucnick'] in config.runtimeconf_get("other_bots", ()): # self.logger.debug("not talking to the other bot named {}".format( msg_obj['mucnick'])) # return False else: return True def muc_offline(self, msg_obj): if 'muc' in msg_obj.values: room = msg_obj.values['muc']['room'] user = msg_obj.values['muc']['nick'] if user == config.conf_get('bot_nickname'): self.logger.warn("Left my room, rejoin") self.plugin['xep_0045'].joinMUC( room, self.nick, wait=True ) def hangup(self): """ disconnect and exit """ self.disconnect(wait=True) def start(botclass, active=False): logging.basicConfig( level=config.conf_get('loglevel'), format=sys.argv[0] + ' %(asctime)s %(levelname).1s %(funcName)-15s %(message)s' ) logger = logging.getLogger(__name__) logger.info(VERSION) jid = config.conf_get('jid') if '/' not in jid: jid = '%s/%s' % (jid, botclass.__name__) bot = botclass( jid=jid, password=config.conf_get('password'), rooms=config.conf_get('rooms'), nick=config.conf_get('bot_nickname') ) bot.connect() bot.register_plugin('xep_0045') bot.register_plugin('xep_0199', {'keepalive': True}) bot.register_plugin('xep_0308') bot.process() config.runtimeconf_set('start_time', -time.time()) if active: pass events.event_loop.start() if '__main__' == __name__: start(IdleBot)