max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
samples/snippets/feature_store_service/create_entity_type_sample.py | lclc19/python-aiplatform | 180 | 12707330 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create an entity type so that you can create its related features.
# See https://cloud.google.com/vertex-ai/docs/featurestore/setup before running
# the code snippet
# [START aiplatform_create_entity_type_sample]
from google.cloud import aiplatform
def create_entity_type_sample(
project: str,
featurestore_id: str,
entity_type_id: str,
description: str = "sample entity type",
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
timeout: int = 300,
):
# The AI Platform services require regional API endpoints, which need to be
# in the same region or multi-region overlap with the Feature Store location.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.FeaturestoreServiceClient(client_options=client_options)
parent = f"projects/{project}/locations/{location}/featurestores/{featurestore_id}"
create_entity_type_request = aiplatform.gapic.CreateEntityTypeRequest(
parent=parent,
entity_type_id=entity_type_id,
entity_type=aiplatform.gapic.EntityType(description=description),
)
lro_response = client.create_entity_type(request=create_entity_type_request)
print("Long running operation:", lro_response.operation.name)
create_entity_type_response = lro_response.result(timeout=timeout)
print("create_entity_type_response:", create_entity_type_response)
# [END aiplatform_create_entity_type_sample]
|
pyramda/relation/min.py | sergiors/pyramda | 124 | 12707335 | <reponame>sergiors/pyramda
from pyramda.function.curry import curry
import builtins
@curry
def min(xs):
return builtins.min(xs)
|
setup.py | djm/python-scrapyd-api | 233 | 12707350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
readme = open('README.md').read()
history = open('HISTORY.md').read()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
if sys.argv[-1] == 'publish':
print("Use `make release` instead.")
sys.exit()
setup(
name='python-scrapyd-api',
version='2.1.2',
description='A Python wrapper for working with the Scrapyd API',
keywords='python-scrapyd-api scrapyd scrapy api wrapper',
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/djm/python-scrapyd-api',
packages=[
'scrapyd_api',
],
package_dir={
'scrapyd_api': 'scrapyd_api'
},
include_package_data=True,
setup_requires=['setuptools>=38.6.0'],
install_requires=[
'requests'
],
license="BSD",
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
],
cmdclass={
'test': PyTest
}
)
|
rest-service/manager_rest/test/endpoints/test_deployment_update_step_extraction.py | cloudify-cosmo/cloudify-manager | 124 | 12707371 | <reponame>cloudify-cosmo/cloudify-manager
import json
import unittest
from manager_rest.test.attribute import attr
from manager_rest.test.base_test import LATEST_API_VERSION
from manager_rest.storage import models
from manager_rest.deployment_update.step_extractor import (
PROPERTY, PROPERTIES, OUTPUT, OUTPUTS, WORKFLOW, WORKFLOWS, NODE,
NODES, OPERATION, OPERATIONS, RELATIONSHIP, RELATIONSHIPS,
SOURCE_OPERATIONS, TARGET_OPERATIONS, TYPE, GROUP, GROUPS, POLICY_TYPE,
POLICY_TYPES, POLICY_TRIGGER, POLICY_TRIGGERS, HOST_ID, PLUGIN,
DEPLOYMENT_PLUGINS_TO_INSTALL, PLUGINS_TO_INSTALL, DESCRIPTION,
extract_steps,
_update_topology_order_of_add_node_steps,
_find_relationship
)
from manager_rest.deployment_update.step_extractor import DeploymentUpdateStep
from manager_rest.test.utils import get_resource
@attr(client_min_version=2.1, client_max_version=LATEST_API_VERSION)
class StepExtractorTestCase(unittest.TestCase):
@staticmethod
def _get_node_scheme(node_id='node1', **params):
node = {
'id': node_id,
OPERATIONS: {},
PROPERTIES: {},
RELATIONSHIPS: [],
TYPE: '',
HOST_ID: '',
PLUGINS_TO_INSTALL: []
}
node.update(params)
return node
@staticmethod
def _get_relationship_scheme():
return {
SOURCE_OPERATIONS: {},
"target_id": "",
TARGET_OPERATIONS: {},
TYPE: "",
PROPERTIES: {}
}
def setUp(self):
super(StepExtractorTestCase, self).setUp()
self.deployment = models.Deployment(id='deployment_id')
self.deployment_plan = {
DESCRIPTION: None,
NODES: {},
OPERATIONS: {},
PROPERTIES: {},
RELATIONSHIPS: [],
TYPE: '',
GROUPS: {},
POLICY_TYPES: {},
POLICY_TRIGGERS: {},
DEPLOYMENT_PLUGINS_TO_INSTALL: {},
OUTPUTS: {},
WORKFLOWS: {}
}
def test_entity_name(self):
step = DeploymentUpdateStep(action='add',
entity_type=NODE,
entity_id='nodes:node1')
self.assertEqual('node1', step.entity_name)
def test_update_topology_order_of_add_node_steps(self):
add_node_a_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_a')
add_node_b_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_b')
add_node_c_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_c')
add_node_d_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_d')
add_node_e_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_e')
add_node_f_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node_f')
steps = [add_node_a_step, add_node_b_step, add_node_c_step,
add_node_d_step, add_node_e_step, add_node_f_step]
# Imagine the following relationships between the added nodes:
#
# e
# ^^
# | \
# c d
# ^ ^
# / \
# a b f
topologically_sorted_added_nodes = ['node_f', 'node_a', 'node_b',
'node_c', 'node_d', 'node_e']
_update_topology_order_of_add_node_steps(
steps, topologically_sorted_added_nodes)
self.assertEqual(5, add_node_e_step.topology_order)
self.assertEqual(4, add_node_d_step.topology_order)
self.assertEqual(3, add_node_c_step.topology_order)
self.assertEqual(2, add_node_b_step.topology_order)
self.assertEqual(1, add_node_a_step.topology_order)
self.assertEqual(0, add_node_f_step.topology_order)
def test_create_added_nodes_graph(self):
self.deployment_plan[NODES] = [
self._get_node_scheme('node_a', relationships=[
{"target_id": 'node_c'}
]),
self._get_node_scheme('node_b', relationships=[
{"target_id": 'node_c'}
]),
self._get_node_scheme('node_c', relationships=[
{"target_id": 'node_e'}
]),
self._get_node_scheme('node_d', relationships=[
{"target_id": 'node_e'}
]),
self._get_node_scheme('node_e'),
self._get_node_scheme('node_f'),
]
steps, _ = extract_steps([], self.deployment, self.deployment_plan)
order_by_id = {s.entity_id: s.topology_order for s in steps}
assert order_by_id['nodes:node_c'] > order_by_id['nodes:node_a']
assert order_by_id['nodes:node_c'] > order_by_id['nodes:node_b']
assert order_by_id['nodes:node_e'] > order_by_id['nodes:node_c']
assert order_by_id['nodes:node_e'] > order_by_id['nodes:node_d']
def test_description_no_change(self):
self.deployment.description = 'description'
self.deployment_plan[DESCRIPTION] = 'description'
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_description_modify_description(self):
self.deployment.description = 'description_old'
self.deployment_plan[DESCRIPTION] = 'description_new'
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=DESCRIPTION,
entity_id='description')
]
def test_outputs_no_change(self):
self.deployment.outputs = {'output1': 'output1_value'}
self.deployment_plan[OUTPUTS] = self.deployment.outputs
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_outputs_add_output(self):
self.deployment_plan[OUTPUTS] = {'output1': 'output1_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_outputs_remove_output(self):
self.deployment.outputs = {'output1': 'output1_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_outputs_modify_output(self):
self.deployment.outputs = {'output1': 'output1_value'}
self.deployment_plan[OUTPUTS] = {'output1': 'output1_modified_value'}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OUTPUT,
entity_id='outputs:output1')
]
def test_workflows_no_change(self):
self.deployment.workflows = {
'intact_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
self.deployment_plan[WORKFLOWS] = self.deployment.workflows
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == []
def test_workflows_add_workflow_of_existing_plugin(self):
self.deployment_plan[WORKFLOWS] = {
'added_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=WORKFLOW,
entity_id='workflows:added_workflow')
]
def test_workflows_add_workflow_script(self):
self.deployment_plan[WORKFLOWS] = {
'new_workflow': {
'plugin': 'script',
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=WORKFLOW,
entity_id='workflows:new_workflow')
]
def test_workflows_remove_workflow(self):
self.deployment.workflows = {
'removed_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=WORKFLOW,
entity_id='workflows:removed_workflow')
]
def test_workflows_modify_workflow_of_existing_plugin(self):
self.deployment.workflows = {
'added_workflow': {
'operation': 'module_name.foo',
'plugin': 'plugin_for_workflows'
}
}
self.deployment_plan[WORKFLOWS] = {
'added_workflow': {
'operation': 'module_name.bar',
'plugin': 'plugin_for_workflows'
}
}
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=WORKFLOW,
entity_id='workflows:added_workflow')
]
def test_nodes_no_change(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_nodes_add_node(self):
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps({}, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='nodes:node1')
]
def test_nodes_remove_node(self):
nodes = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='nodes:node1')
]
def test_nodes_add_and_remove_node_changed_type(self):
nodes = [self._get_node_scheme(type='old_type')]
self.deployment_plan[NODES] = [self._get_node_scheme(type='new_type')]
supported_steps, unsupported_steps = \
extract_steps(nodes, self.deployment, self.deployment_plan)
assert len(supported_steps) == 0
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_nodes_add_and_remove_node_changed_type_and_host_id(self):
nodes = [self._get_node_scheme(host_id='old_host_id')]
self.deployment_plan[NODES] = [
self._get_node_scheme(type='new_host_id')]
supported_steps, unsupported_steps = \
extract_steps(nodes, self.deployment, self.deployment_plan)
assert len(supported_steps) == 0
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_node_properties_no_change(self):
nodes = [self._get_node_scheme(
properties={'property1': 'property1_value'}
)]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_node_properties_add_property(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [
self._get_node_scheme(properties={'property1': 'property1_value'})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_properties_remove_property(self):
nodes = [self._get_node_scheme(properties={
'property1': 'property1_value'})]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_properties_modify_property(self):
nodes = [self._get_node_scheme(properties={
'property1': 'property1_value'})]
self.deployment_plan[NODES] = [self._get_node_scheme(properties={
'property1': 'property1_modified_value'})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='nodes:node1:properties:property1')
]
def test_node_operations_no_change(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_node_operations_add_operation(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_node_operations_remove_operation(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_node_operations_modify_operation(self):
nodes = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_field_value'
}
})]
self.deployment_plan[NODES] = [self._get_node_scheme(operations={
'full.operation1.name': {
'operation1_field': 'operation1_modified_field_value'
}
})]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:operations:full.operation1.name')
]
def test_relationships_no_change(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_add_relationship(self):
nodes = [self._get_node_scheme()]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_remove_relationship(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme()]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_type(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'different_relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_target_non_contained_in(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'relationship_target',
'type_hierarchy': ['rel_hierarchy']
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'different_relationship_target',
'type_hierarchy': ['rel_hierarchy']
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_change_target_contained_in(self):
nodes = [self._get_node_scheme(relationships=[
{
'target_id': 'relationship_target',
'type_hierarchy': ['rel_hierarchy',
'cloudify.relationships.contained_in']
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'target_id': 'different_relationship_target',
'type_hierarchy': ['rel_hierarchy',
'cloudify.relationships.contained_in']}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id='nodes:node1',
supported=False),
]
def test_relationships_change_type_and_target(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'different_relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'different_relationship_target'
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
]
def test_relationships_modify_order(self):
nodes = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# we don't care for the order the steps were created in
assert set(steps) == {
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]:[3]'),
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[1]:[0]'),
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[3]:[1]')
}
def test_relationships_modify_order_with_add_and_remove(self):
nodes = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_3'},
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_5'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_2'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_4'},
{'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target_1'}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# we don't care for the order the steps were created in
assert set(steps) == {
DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]:[3]'),
DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[2]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[2]'),
DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='nodes:node1:relationships:[0]')
}
def test_relationships_add_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {'full.operation1': {}}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_relationships_remove_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {'full.operation1': {}}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_duplicate_relationship(self):
rel = {
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
}
nodes = [self._get_node_scheme(relationships=[rel, rel])]
self.deployment_plan[NODES] = [
self._get_node_scheme(relationships=[rel, rel])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_modify_source_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {
'full.operation1': {
'op1_old_field': 'op1_field_value'
}
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
SOURCE_OPERATIONS: {
'full.operation1': {
'op1_new_field': 'op1_field_value'
}
}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'source_operations:full.operation1')
]
def test_relationships_add_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {'full.operation1': {}}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_relationships_remove_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {'full.operation1': {}}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='remove',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_relationships_modify_target_operation(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {
'full.operation1': {
'op1_old_field': 'op1_field_value'
}
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
TARGET_OPERATIONS: {
'full.operation1': {
'op1_new_field': 'op1_field_value'
}
}
}
])]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=OPERATION,
entity_id='nodes:node1:relationships:[0]:'
'target_operations:full.operation1')
]
def test_get_matching_relationship(self):
relationships_with_match = [
{'type': 'typeA', 'target_id': 'id_1', 'field2': 'value2'},
{'type': 'typeB', 'target_id': 'id_1'},
{'type': 'typeB', 'target_id': 'id_2'},
{'type': 'typeA', 'target_id': 'id_2'}
]
relationships_with_no_match = [
{'type': 'typeB', 'target_id': 'id_1'},
{'type': 'typeB', 'target_id': 'id_2'},
{'type': 'typeA', 'target_id': 'id_2'}
]
assert _find_relationship(
relationships_with_match, 'typeA', 'id_1'
) == ({'type': 'typeA', 'target_id': 'id_1', 'field2': 'value2'}, 0)
assert _find_relationship(
relationships_with_no_match, 'typeA', 'id_1'
) == (None, None)
def test_sort_steps_compare_action(self):
add_step = DeploymentUpdateStep(
action='add',
entity_type='',
entity_id='')
remove_step = DeploymentUpdateStep(
action='remove',
entity_type='',
entity_id='')
modify_step = DeploymentUpdateStep(
action='modify',
entity_type='',
entity_id='')
steps = [add_step, remove_step, modify_step]
expected_step_order = [remove_step, add_step, modify_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_add_node_before_add_relationship(self):
add_node_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
add_relationship_step = DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='')
steps = [add_relationship_step, add_node_step]
expected_step_order = [add_node_step, add_relationship_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_remove_relationship_before_remove_node(self):
remove_relationship_step = DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='')
remove_node_step = DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='')
steps = [remove_node_step, remove_relationship_step]
expected_step_order = [remove_relationship_step, remove_node_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_higher_topology_before_lower_topology(self):
default_topology_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
topology_order_1_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=1)
topology_order_2_step = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=2)
steps = [topology_order_1_step,
default_topology_step,
topology_order_2_step]
expected_step_order = [
topology_order_2_step,
topology_order_1_step,
default_topology_step]
steps.sort()
assert steps == expected_step_order
def test_sort_steps_all_comparison_considerations(self):
add_node_step_default_topology = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='')
add_node_step_topology_order_1 = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=1)
add_node_step_topology_order_2 = DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id='',
topology_order=2)
remove_relationship_step = DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id='')
remove_node_step = DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id='')
add_relationship_step = DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id='')
modify_property_step = DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='')
steps = [add_node_step_topology_order_1, remove_node_step,
modify_property_step, add_relationship_step,
add_node_step_default_topology, remove_relationship_step,
add_node_step_topology_order_2]
expected_step_order = [
remove_relationship_step,
remove_node_step,
add_node_step_topology_order_2,
add_node_step_topology_order_1,
add_node_step_default_topology,
add_relationship_step,
modify_property_step]
steps.sort()
assert steps == expected_step_order
def test_relationships_intact_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_value'
}
}
])]
self.deployment_plan[NODES] = nodes
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == []
def test_relationships_add_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
'properties': {}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_relationships_remove_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
'properties': {}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_relationships_modify_property(self):
nodes = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_value'
}
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(relationships=[
{
'type': 'relationship_type',
'type_hierarchy': ['rel_hierarchy'],
'target_id': 'relationship_target',
PROPERTIES: {
'property1': 'property1_different_value'
}
}
])]
_, unsupported_steps = extract_steps(
nodes, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PROPERTY,
entity_id='nodes:node1:relationships:[0]:'
'properties:property1',
supported=False)
]
def test_extract_steps_policy_types_no_change(self):
policy_types = {'policy_type1': 'policy_type1_value'}
self.deployment.policy_types = policy_types
self.deployment_plan[POLICY_TYPES] = policy_types
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_policy_types_add_policy_type(self):
self.deployment_plan[POLICY_TYPES] = {
'policy_type1': 'policy_type1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_policy_types_remove_policy_type(self):
self.deployment.policy_types = {'policy_type1': 'policy_type1_value'}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_policy_types_modify_policy_type(self):
self.deployment.policy_types = {'policy_type1': 'policy_type1_value'}
self.deployment_plan[POLICY_TYPES] = {
'policy_type1': 'policy_type1_modified_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=POLICY_TYPE,
entity_id='policy_types:policy_type1',
supported=False)
]
def test_extract_steps_policy_triggers_no_change(self):
policy_triggers = {'policy_trigger1': 'policy_trigger1_value'}
self.deployment.policy_triggers = policy_triggers
self.deployment_plan[POLICY_TRIGGERS] = policy_triggers
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_policy_triggers_add_policy_trigger(self):
self.deployment_plan[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_policy_triggers_remove_policy_trigger(self):
self.deployment.policy_triggers = {
'policy_trigger1': 'policy_trigger1_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_policy_triggers_modify_policy_trigger(self):
self.deployment.policy_triggers = {
'policy_trigger1': 'policy_trigger1_value'
}
self.deployment_plan[POLICY_TRIGGERS] = {
'policy_trigger1': 'policy_trigger1_modified_value'
}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=POLICY_TRIGGER,
entity_id='policy_triggers:policy_trigger1',
supported=False)
]
def test_groups_no_change(self):
groups = {'group1': {}}
self.deployment.groups = groups
self.deployment_plan[GROUPS] = groups
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_groups_add_group(self):
self.deployment_plan[GROUPS] = {'group1': {}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='add',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_remove_group(self):
self.deployment.groups = {'group1': {}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='remove',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_modify_group(self):
self.deployment.groups = {'group1': {'members': []}}
self.deployment_plan[GROUPS] = {'group1': {'members': ['a']}}
_, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert unsupported_steps == [
DeploymentUpdateStep(
action='modify',
entity_type=GROUP,
entity_id='groups:group1',
supported=False)
]
def test_groups_member_order(self):
self.deployment.groups = {'group1': {'members': ['a', 'b']}}
self.deployment_plan[GROUPS] = {'group1': {'members': ['b', 'a']}}
steps, unsupported_steps = extract_steps(
{}, self.deployment, self.deployment_plan)
assert steps == []
assert unsupported_steps == []
def test_ha_plugins_no_install(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{'name': 'old', 'install': True}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[{'name': 'new', 'install': False}]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
# Although install is set to False on the new plugin, we are still
# creating the step. We won't need to install the plugin (the
# PluginHandler takes care of that), but the value still needs to be
# updated in the node in the DB
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:new'
)
]
def test_ha_plugins_add_ha_plugin(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{'name': 'old', 'install': True}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[{'name': 'new', 'install': True}]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='add',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:new',
supported=True)
]
def test_ha_plugins_modify_ha_plugin(self):
nodes = [self._get_node_scheme(plugins_to_install=[
{
'name': 'name',
'executor': 'host_agent',
'install': True,
'source': 'old'
}
])]
self.deployment_plan[NODES] = [self._get_node_scheme(
plugins_to_install=[
{
'name': 'name',
'executor': 'host_agent',
'install': True,
'source': 'new'
}
]
)]
steps, _ = extract_steps(nodes, self.deployment, self.deployment_plan)
assert steps == [
DeploymentUpdateStep(
action='modify',
entity_type=PLUGIN,
entity_id='plugins_to_install:node1:name',
supported=True)
]
def test_all_changes_combined(self):
path_before = get_resource(
'deployment_update/combined_changes_before.json')
path_after = get_resource(
'deployment_update/combined_changes_after.json')
with open(path_before) as fp_before, open(path_after) as fp_after:
plan_before = json.load(fp_before)
plan_after = json.load(fp_after)
nodes = list(plan_before['nodes'].values())
plan_after['nodes'] = list(plan_after['nodes'].values())
self.deployment.groups = plan_before['groups']
self.deployment.workflows = plan_before['workflows']
self.deployment.policy_types = plan_before['policy_types']
self.deployment.policy_triggers = plan_before['policy_triggers']
self.deployment.outputs = plan_before['outputs']
expected_steps = {
'modify_description': DeploymentUpdateStep(
'modify',
DESCRIPTION,
'description'),
'remove_node': DeploymentUpdateStep(
'remove',
NODE,
'nodes:node1'),
'add_node': DeploymentUpdateStep(
'add',
NODE,
'nodes:node2',
topology_order=0),
'modify_node_changed_type': DeploymentUpdateStep(
'modify',
NODE,
'nodes:node3',
supported=False),
'add_property': DeploymentUpdateStep(
'add',
PROPERTY,
'nodes:node4:properties:added_prop'),
'remove_property': DeploymentUpdateStep(
'remove',
PROPERTY,
'nodes:node4:properties:removed_prop'),
'modify_property': DeploymentUpdateStep(
'modify',
PROPERTY,
'nodes:node4:properties:modified_prop'),
'remove_relationship': DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node6:relationships:[0]'),
'add_relationship': DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node7:relationships:[0]'),
'remove_relationship_changed_target': DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node9:relationships:[0]'),
'add_relationship_changed_target': DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node9:relationships:[0]'),
'remove_relationship_changed_type_and_target':
DeploymentUpdateStep(
'remove',
RELATIONSHIP,
'nodes:node10:relationships:[0]'),
'add_relationship_changed_type_and_target':
DeploymentUpdateStep(
'add',
RELATIONSHIP,
'nodes:node10:relationships:[0]'),
'add_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node11:operations:interface1.added_operation'),
'add_operation_shortened': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node11:operations:added_operation'),
'remove_operation': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node11:operations:interface1.removed_operation'),
'remove_operation_shortened': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node11:operations:removed_operation'),
'modify_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node11:operations:interface1.modified_operation'),
'modify_operation_shortened': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node11:operations:modified_operation'),
'add_relationship_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'interface_for_modified_and_added.added_operation'),
'add_relationship_operation_shortened':
DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'added_operation'),
'remove_relationship_operation': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node12:relationships:[0]:source_operations:'
'interface_for_intact_and_removed.removed_operation'),
'remove_relationship_operation_shortened':
DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node12:relationships:[0]:source_operations:'
'removed_operation'),
'modify_relationship_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'interface_for_modified_and_added.modified_operation'),
'modify_relationship_operation_shortened':
DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node12:relationships:[0]:target_operations:'
'modified_operation'),
'add_output': DeploymentUpdateStep(
'add',
OUTPUT,
'outputs:added_output'),
'remove_output': DeploymentUpdateStep(
'remove',
OUTPUT,
'outputs:removed_output'),
'modify_output': DeploymentUpdateStep(
'modify',
OUTPUT,
'outputs:modified_output'),
'add_workflow_same_plugin': DeploymentUpdateStep(
'add',
WORKFLOW,
'workflows:added_workflow_same_plugin'),
'add_workflow_new_plugin': DeploymentUpdateStep(
'add',
WORKFLOW,
'workflows:added_workflow_new_plugin'),
'remove_workflow': DeploymentUpdateStep(
'remove',
WORKFLOW,
'workflows:removed_workflow'),
'modify_workflow_same_plugin': DeploymentUpdateStep(
'modify',
WORKFLOW,
'workflows:modified_workflow_same_plugin'),
'modify_workflow_new_plugin': DeploymentUpdateStep(
'modify',
WORKFLOW,
'workflows:modified_workflow_new_plugin'),
'add_policy_type': DeploymentUpdateStep(
'add',
POLICY_TYPE,
'policy_types:added_policy_type',
supported=False),
'remove_policy_type': DeploymentUpdateStep(
'remove',
POLICY_TYPE,
'policy_types:removed_policy_type',
supported=False),
'modify_policy_type': DeploymentUpdateStep(
'modify',
POLICY_TYPE,
'policy_types:modified_policy_type',
supported=False),
'add_policy_trigger': DeploymentUpdateStep(
'add',
POLICY_TRIGGER,
'policy_triggers:added_policy_trigger',
supported=False),
'remove_policy_trigger': DeploymentUpdateStep(
'remove',
POLICY_TRIGGER,
'policy_triggers:removed_policy_trigger',
supported=False),
'modify_policy_trigger': DeploymentUpdateStep(
'modify',
POLICY_TRIGGER,
'policy_triggers:modified_policy_trigger',
supported=False),
'add_group': DeploymentUpdateStep(
'add',
GROUP,
'groups:added_group',
supported=False),
'remove_group': DeploymentUpdateStep(
'remove',
GROUP,
'groups:removed_group',
supported=False),
'modify_group': DeploymentUpdateStep(
'modify',
GROUP,
'groups:modified_group',
supported=False),
'add_relationship_property': DeploymentUpdateStep(
'add',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:added_relationship_prop',
supported=False),
'remove_relationship_property': DeploymentUpdateStep(
'remove',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:removed_relationship_prop',
supported=False),
'modify_relationship_property': DeploymentUpdateStep(
'modify',
PROPERTY,
'nodes:node13:relationships:[0]:'
'properties:modified_relationship_prop',
supported=False),
'add_ha_plugin_plugins_to_install': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins_to_install:node18:plugin3_name'),
'add_ha_plugin_plugin3_name': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins:node18:plugin3_name'),
'add_cda_plugin_used_by_host': DeploymentUpdateStep(
'add',
PLUGIN,
'plugins:node16:cda_plugin_for_operations2'),
# the steps below are intended just to make the test pass.
# ideally, they should be removed since they are incorrect
'modify_node_add_contained_in_relationship':
DeploymentUpdateStep(
'modify',
NODE,
'nodes:node8',
supported=False),
'add_cda_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node16:operations:'
'interface_for_plugin_based_operations.'
'added_operation_new_cda_plugin',
supported=True),
'add_cda_operation_shortened': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node16:operations:added_operation_new_cda_plugin',
supported=True),
'add_ha_operation': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node17:operations:'
'interface_for_plugin_based_operations.'
'ha_operation_after',
supported=True),
'add_ha_operation_shortened': DeploymentUpdateStep(
'add',
OPERATION,
'nodes:node17:operations:ha_operation_after',
supported=True),
'remove_ha_operation': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node17:operations:'
'interface_for_plugin_based_operations.'
'ha_operation_before',
supported=True),
'remove_ha_operation_shortened': DeploymentUpdateStep(
'remove',
OPERATION,
'nodes:node17:operations:ha_operation_before',
supported=True),
'modify_ha_operation': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node18:operations:'
'interface_for_plugin_based_operations.'
'ha_operation_before',
supported=True),
'modify_ha_operation_shortened': DeploymentUpdateStep(
'modify',
OPERATION,
'nodes:node18:operations:ha_operation_before',
supported=True)
}
steps, unsupported_steps = extract_steps(
nodes, self.deployment, plan_after)
steps.extend(unsupported_steps)
self.assertEqual(set(expected_steps.values()), set(steps))
|
QT_Py_NeoPixels/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12707377 | <filename>QT_Py_NeoPixels/code.py<gh_stars>100-1000
"""Basic NeoPixel LED animations for the QT Py."""
import time
import board
from rainbowio import colorwheel
import neopixel
# Update this to match the pin to which you connected the NeoPixels
pixel_pin = board.A3
# Update this to match the number of NeoPixels connected
num_pixels = 30
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, auto_write=False)
# Set to 0-1 to change the brightness of the NeoPixels
pixels.brightness = 0.2
def blink(color, wait):
"""Blink animation. Blinks all pixels."""
pixels.fill(color)
pixels.show()
time.sleep(wait)
pixels.fill((0, 0, 0))
pixels.show()
time.sleep(wait)
def chase(color, spacing=3, iteration_step=1):
"""Theatre chase animation. Chases across all pixels."""
if spacing < 2:
raise ValueError("Spacing must be greater than 1 to show chase pattern.")
# Use modulo division to create the spacing between pixels.
chase_pixel = iteration_step % spacing
# Loop over pixels and turn on expected pixels to provided color.
for pixel in range(0, len(pixels), spacing):
# If the pixel is outside the total pixel range, break.
if pixel + chase_pixel > len(pixels) - 1:
break
pixels[pixel + chase_pixel] = color
pixels.show()
# Loop over pixels and turn off expected pixels.
for pixel in range(0, len(pixels), spacing):
# If the pixel is outside the total pixel range, break.
if pixel + chase_pixel > len(pixels) - 1:
break
pixels[pixel + chase_pixel] = (0, 0, 0)
def color_wipe(color, wait):
"""Color wipe animation. Wipes across all pixels."""
for pixel in range(num_pixels):
pixels[pixel] = color
time.sleep(wait)
pixels.show()
time.sleep(0.5)
def rainbow_cycle(wait):
"""Rainbow cycle animation. Cycles across all pixels."""
for color_index in range(255):
for pixel in range(num_pixels):
pixel_index = (pixel * 256 // num_pixels) + color_index
pixels[pixel] = colorwheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
while True:
# Blink 5 times. Increase or decrease the range for more or less blinking.
for blinks in range(5):
blink(RED, 0.5) # Increase number to slow down blinking, decrease to speed up.
# Chase. Increase or decrease the range for longer or shorter chase animation.
for step in range(50):
chase(PURPLE, spacing=4, iteration_step=step)
time.sleep(0.05)
# Fill all pixels.
pixels.fill(RED)
pixels.show()
# Increase or decrease the time to change the speed of the solid color change in seconds.
time.sleep(0.5)
pixels.fill(GREEN)
pixels.show()
time.sleep(0.5)
pixels.fill(BLUE)
pixels.show()
time.sleep(0.5)
# Color wipe.
color_wipe(YELLOW, 0.01) # Increase the number to slow down the color chase.
color_wipe(CYAN, 0.01)
color_wipe(PURPLE, 0.01)
# Rainbow cycle.
rainbow_cycle(0) # Increase the number to slow down the rainbow.
|
maya/Tests/mayaReload_test.py | ryu-sw/alembic | 921 | 12707386 | <reponame>ryu-sw/alembic<gh_stars>100-1000
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME>, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from maya import cmds as MayaCmds
import maya.OpenMaya as OpenMaya
import os
import unittest
import util
class MayaReloadTest(unittest.TestCase):
def setUp(self):
MayaCmds.file(new=True, force=True)
self.__files = []
def tearDown(self):
for f in self.__files:
os.remove(f)
# this test makes sure that not just the vertex positions but the
# connection info is all correct
def testAnimMeshReload(self):
MayaCmds.polyCube( name = 'mesh')
MayaCmds.setKeyframe('meshShape.vtx[0:7]', time=[1, 24])
MayaCmds.setKeyframe('meshShape.vtx[0:7]')
MayaCmds.currentTime(12, update=True)
MayaCmds.select('meshShape.vtx[0:7]')
MayaCmds.scale(5, 5, 5, r=True)
MayaCmds.setKeyframe('meshShape.vtx[0:7]', time=[12])
self.__files.append(util.expandFileName('testAnimMeshReadWrite.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root mesh -f ' + self.__files[-1])
# reading test
MayaCmds.AbcImport(self.__files[-1], mode='open')
# save as a maya file
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# reload as a maya file
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.AbcImport(self.__files[-2], mode='import')
retVal = True
mesh1 = '|mesh|meshShape'
mesh2 = '|mesh1|meshShape'
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareMesh( mesh1, mesh2 ):
self.fail('%s and %s were not equal at frame %d' % (mesh1,
mesh2, t))
#-------------------------------------------------------------------------------
# The following tests each creates four animated nodes of the same data
# type, writes out to Abc file, loads back the file and deletes one node.
# Then the scene is saved as a Maya file, and load back to check if the
# reload works as expected
#-------------------------------------------------------------------------------
def testAnimPolyDeleteReload(self):
# create a poly cube and animate
shapeName = 'pCube'
MayaCmds.polyCube( name=shapeName )
MayaCmds.move(5, 0, 0, r=True)
MayaCmds.setKeyframe( shapeName+'.vtx[2:5]', time=[1, 24] )
MayaCmds.currentTime( 12 )
MayaCmds.select( shapeName+'.vtx[2:5]',replace=True )
MayaCmds.move(0, 4, 0, r=True)
MayaCmds.setKeyframe( shapeName+'.vtx[2:5]', time=[12] )
# create a poly sphere and animate
shapeName = 'pSphere'
MayaCmds.polySphere( name=shapeName )
MayaCmds.move(-5, 0, 0, r=True)
MayaCmds.setKeyframe( shapeName+'.vtx[200:379]',
shapeName+'.vtx[381]', time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select( shapeName+'.vtx[200:379]',
shapeName+'.vtx[381]',replace=True)
MayaCmds.scale(0.5, 0.5, 0.5, relative=True)
MayaCmds.setKeyframe( shapeName+'.vtx[200:379]',
shapeName+'.vtx[381]', time=[12])
MayaCmds.currentTime(1)
# create a poly torus and animate
shapeName = 'pTorus'
MayaCmds.polyTorus(name=shapeName)
MayaCmds.setKeyframe(shapeName+'.vtx[200:219]',time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[200:219]',replace=True)
MayaCmds.scale(2, 1, 2, relative=True)
MayaCmds.setKeyframe(shapeName+'.vtx[200:219]', time=[12])
# create a poly cone and animate
shapeName = 'pCone'
MayaCmds.polyCone(name=shapeName)
MayaCmds.move(0, 0, -5, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[20]', time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[20]',replace=True)
MayaCmds.move(0, 4, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[20]', time=[12])
# write it out to Abc file and load back in
self.__files.append(util.expandFileName('testPolyReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root pCube -root pSphere -root pTorus -root pCone -file %s' %
self.__files[-1])
# load back the Abc file, delete the sphere and save to a maya file
MayaCmds.AbcImport( self.__files[-1], mode='open')
MayaCmds.delete('pSphere')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('pCube', 'pTorus', 'pCone', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
shapeList = MayaCmds.ls(type='mesh')
self.failUnlessEqual(len(shapeList), 7)
meshes = [('|pCube|pCubeShape', '|ReloadGrp|pCube|pCubeShape'),
('|pTorus|pTorusShape', '|ReloadGrp|pTorus|pTorusShape'),
('|pCone|pConeShape', '|ReloadGrp|pCone|pConeShape')]
for m in meshes:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareMesh(m[0], m[1]):
self.fail('%s and %s are not the same at frame %d' %
(m[0], m[1], t))
def testAnimSubDDeleteReload(self):
# create a subD cube and animate
shapeName = 'cube'
MayaCmds.polyCube( name=shapeName )
MayaCmds.select('cubeShape')
MayaCmds.addAttr(longName='SubDivisionMesh', attributeType='bool',
defaultValue=True)
MayaCmds.move(5, 0, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[2:5]', time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[2:5]',replace=True)
MayaCmds.move(0, 4, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[2:5]', time=[12])
# create a subD sphere and animate
shapeName = 'sphere'
MayaCmds.polySphere(name=shapeName)
MayaCmds.select('sphereShape')
MayaCmds.addAttr(longName='SubDivisionMesh', attributeType='bool',
defaultValue=True)
MayaCmds.move(-5, 0, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[200:379]', shapeName+'.vtx[381]',
time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[200:379]', shapeName+'.vtx[381]',
replace=True)
MayaCmds.scale(0.5, 0.5, 0.5, relative=True)
MayaCmds.setKeyframe(shapeName+'.vtx[200:379]', shapeName+'.vtx[381]',
time=[12])
MayaCmds.currentTime(1)
# create a subD torus and animate
shapeName = 'torus'
MayaCmds.polyTorus(name=shapeName)
MayaCmds.select('torusShape')
MayaCmds.addAttr(longName='SubDivisionMesh', attributeType='bool',
defaultValue=True)
MayaCmds.setKeyframe(shapeName+'.vtx[200:219]',time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[200:219]',replace=True)
MayaCmds.scale(2, 1, 2, relative=True)
MayaCmds.setKeyframe(shapeName+'.vtx[200:219]', time=[12])
# create a subD cone and animate
shapeName = 'cone'
MayaCmds.polyCone( name=shapeName )
MayaCmds.select('coneShape')
MayaCmds.addAttr(longName='SubDivisionMesh', attributeType='bool',
defaultValue=True)
MayaCmds.move(0, 0, -5, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[20]', time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[20]',replace=True)
MayaCmds.move(0, 4, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[20]', time=[12])
self.__files.append(util.expandFileName('testSubDReload.abc'))
# write it out to Abc file and load back in
MayaCmds.AbcExport(j='-fr 1 24 -root cube -root sphere -root torus -root cone -file ' +
self.__files[-1])
# load back the Abc file, delete the sphere and save to a maya file
MayaCmds.AbcImport( self.__files[-1], mode='open' )
MayaCmds.delete('sphere')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('cube', 'torus', 'cone', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
shapeList = MayaCmds.ls(type='mesh')
self.failUnlessEqual(len(shapeList), 7)
# test the equality of cubes
meshes = [('|cube|cubeShape', '|ReloadGrp|cube|cubeShape'),
('|torus|torusShape', '|ReloadGrp|torus|torusShape'),
('|cone|coneShape', '|ReloadGrp|cone|coneShape')]
for m in meshes:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareMesh(m[0], m[1]):
self.fail('%s and %s are not the same at frame %d' %
(m[0], m[1], t))
def testAnimNSurfaceDeleteReload(self):
# create an animated Nurbs sphere
MayaCmds.sphere(ch=False, name='nSphere')
MayaCmds.move(5, 0, 0, relative=True)
MayaCmds.select('nSphere.cv[0:1][0:7]', 'nSphere.cv[5:6][0:7]',
replace=True)
MayaCmds.setKeyframe(time=[1, 24])
MayaCmds.currentTime(12, update=True)
MayaCmds.scale(1.5, 1, 1, relative=True)
MayaCmds.setKeyframe(time=12)
# create an animated Nurbs torus
MayaCmds.torus(ch=False, name='nTorus')
MayaCmds.move(-5, 0, 0, relative=True)
MayaCmds.select('nTorus.cv[0][0:7]', 'nTorus.cv[2][0:7]',
replace=True)
MayaCmds.setKeyframe(time=[1, 24])
MayaCmds.currentTime(12, update=True)
MayaCmds.scale(1, 2, 2, relative=True)
MayaCmds.setKeyframe(time=12)
# create an animated Nurbs plane
# should add the trim curve test on this surface, will be easier
# than the rest
MayaCmds.nurbsPlane(ch=False, name='nPlane')
MayaCmds.move(-5, 5, 0, relative=True)
MayaCmds.select('nPlane.cv[0:3][0:3]', replace=True)
MayaCmds.setKeyframe(time=1)
MayaCmds.currentTime(12, update=True)
MayaCmds.rotate(0, 0, 90, relative=True)
MayaCmds.setKeyframe(time=12)
MayaCmds.currentTime(24, update=True)
MayaCmds.rotate(0, 0, 90, relative=True)
MayaCmds.setKeyframe(time=24)
# create an animated Nurbs cylinder
MayaCmds.cylinder(ch=False, name='nCylinder')
MayaCmds.select('nCylinder.cv[0][0:7]', replace=True)
MayaCmds.setKeyframe(time=[1, 24])
MayaCmds.currentTime(12, update=True)
MayaCmds.move(-3, 0, 0, relative=True)
MayaCmds.setKeyframe(time=12)
# write it out to Abc file and load back in
self.__files.append(util.expandFileName('testNSurfaceReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root nSphere -root nTorus -root nPlane -root nCylinder -file ' +
self.__files[-1])
# load back the Abc file, delete the torus and save to a maya file
MayaCmds.AbcImport(self.__files[-1], mode='open')
MayaCmds.delete('nTorus')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('nSphere', 'nPlane', 'nCylinder', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
surfaceList = MayaCmds.ls(type='nurbsSurface')
self.failUnlessEqual(len(surfaceList), 7)
surfaces = [('|nSphere|nSphereShape',
'|ReloadGrp|nSphere|nSphereShape'),
('|nPlane|nPlaneShape', '|ReloadGrp|nPlane|nPlaneShape'),
('|nCylinder|nCylinderShape',
'|ReloadGrp|nCylinder|nCylinderShape')]
for s in surfaces:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareNurbsSurface(s[0], s[1]):
self.fail('%s and %s are not the same at frame %d' %
(s[0], s[1], t))
def testAnimNSurfaceAndPolyDeleteReload(self):
# create a poly cube and animate
shapeName = 'pCube'
MayaCmds.polyCube(name=shapeName)
MayaCmds.move(5, 0, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[2:5]', time=[1, 24])
MayaCmds.currentTime(12)
MayaCmds.select(shapeName+'.vtx[2:5]',replace=True)
MayaCmds.move(0, 4, 0, r=True)
MayaCmds.setKeyframe(shapeName+'.vtx[2:5]', time=[12])
# create an animated Nurbs plane
MayaCmds.nurbsPlane(ch=False, name='nPlane')
MayaCmds.move(-5, 5, 0, relative=True)
MayaCmds.select('nPlane.cv[0:3][0:3]', replace=True)
MayaCmds.setKeyframe(time=1)
MayaCmds.currentTime(12, update=True)
MayaCmds.rotate(0, 0, 90, relative=True)
MayaCmds.setKeyframe(time=12)
MayaCmds.currentTime(24, update=True)
MayaCmds.rotate(0, 0, 90, relative=True)
MayaCmds.setKeyframe(time=24)
# write it out to Abc file and load back in
self.__files.append(util.expandFileName('testNSurfaceAndPolyReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root pCube -root nPlane -file ' + self.__files[-1])
# load back the Abc file, delete the cube and save to a maya file
MayaCmds.AbcImport(self.__files[-1], mode='open')
MayaCmds.delete('pCube')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('nPlane', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
shapeList = MayaCmds.ls(type='mesh')
self.failUnlessEqual(len(shapeList), 1)
surfaceList = MayaCmds.ls(type='nurbsSurface')
self.failUnlessEqual(len(surfaceList), 2)
# test the equality of plane
surface1 = '|nPlane|nPlaneShape'
surface2 = '|ReloadGrp|nPlane|nPlaneShape'
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareNurbsSurface( surface1, surface2 ):
self.fail('%s and %s are not the same at frame %d' %
(surface1, surface2, t))
def testAnimCameraDeleteReload(self):
# cam1
MayaCmds.camera(name='cam1')
MayaCmds.setAttr('cam1Shape1.horizontalFilmAperture', 0.962)
MayaCmds.setAttr('cam1Shape1.verticalFilmAperture', 0.731)
MayaCmds.setAttr('cam1Shape1.focalLength', 50)
MayaCmds.setAttr('cam1Shape1.focusDistance', 5)
MayaCmds.setAttr('cam1Shape1.shutterAngle', 144)
MayaCmds.setAttr('cam1Shape1.centerOfInterest', 1384.825)
# cam2
MayaCmds.duplicate('cam1', returnRootsOnly=True)
# cam3
MayaCmds.duplicate('cam1', returnRootsOnly=True)
# cam4
MayaCmds.duplicate('cam1', returnRootsOnly=True)
# animate each camera slightly different
MayaCmds.currentTime(1, update=True)
MayaCmds.setKeyframe('cam1Shape1', attribute='horizontalFilmAperture')
MayaCmds.setKeyframe('cam2Shape', attribute='focalLength')
MayaCmds.setKeyframe('cam3Shape', attribute='focusDistance')
MayaCmds.setKeyframe('cam4Shape', attribute='shutterAngle')
MayaCmds.setKeyframe('cam4Shape', attribute='centerOfInterest')
MayaCmds.currentTime(24, update=True)
MayaCmds.setKeyframe('cam1Shape1', attribute='horizontalFilmAperture',
value=0.95)
MayaCmds.setKeyframe('cam2Shape', attribute='focalLength', value=40)
MayaCmds.setKeyframe('cam3Shape', attribute='focusDistance', value=5.4)
MayaCmds.setKeyframe('cam4Shape', attribute='shutterAngle',
value=174.94)
MayaCmds.setKeyframe('cam4Shape', attribute='centerOfInterest',
value=67.418)
# write them out to an Abc file and load back in
self.__files.append(util.expandFileName('testCamReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root cam1 -root cam2 -root cam3 -root cam4 -file ' +
self.__files[-1])
# load back the Abc file, delete the 2nd camera and save to a maya file
MayaCmds.AbcImport(self.__files[-1], mode='open')
MayaCmds.delete('cam2')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('cam1', 'cam3', 'cam4', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
camList = MayaCmds.ls(type='camera')
# should be 7, but this query will return the four standard cameras in
# the scene too
self.failUnlessEqual(len(camList), 11)
# test the equality of cameras
cameras = [('|cam1|cam1Shape1', '|ReloadGrp|cam1|cam1Shape1'),
('|cam3|cam3Shape', '|ReloadGrp|cam3|cam3Shape'),
('|cam4|cam4Shape', '|ReloadGrp|cam4|cam4Shape')]
for c in cameras:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareCamera(c[0], c[1]):
self.fail('%s and %s are not the same at frame %d' %
(c[0], c[1], t))
def testAnimNCurvesDeleteReload(self):
# create some animated curves
MayaCmds.textCurves(ch=False, t='Maya', name='Curves', font='Courier')
MayaCmds.currentTime(1, update=True)
MayaCmds.select('curve1.cv[0:27]', 'curve2.cv[0:45]',
'curve3.cv[0:15]', 'curve4.cv[0:19]', 'curve5.cv[0:45]',
'curve6.cv[0:15]', replace=True)
MayaCmds.setKeyframe()
MayaCmds.currentTime(24, update=True)
MayaCmds.select('curve1.cv[0:27]', replace=True)
MayaCmds.move(-3, 3, 0, relative=True)
MayaCmds.select('curve2.cv[0:45]', 'curve3.cv[0:15]', replace=True)
MayaCmds.scale(1.5, 1.5, 1.5, relative=True)
MayaCmds.select('curve4.cv[0:19]', replace=True)
MayaCmds.move(1.5, 0, 0, relative=True)
MayaCmds.rotate(0, 90, 0, relative=True)
MayaCmds.select('curve5.cv[0:45]', 'curve6.cv[0:15]', replace=True)
MayaCmds.move(3, 0, 0, relative=True)
MayaCmds.select('curve1.cv[0:27]', 'curve2.cv[0:45]',
'curve3.cv[0:15]', 'curve4.cv[0:19]', 'curve5.cv[0:45]',
'curve6.cv[0:15]', replace=True)
MayaCmds.setKeyframe()
# write them out to an Abc file and load back in
self.__files.append(util.expandFileName('testNCurvesReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root CurvesShape -file ' + self.__files[-1])
# load back the Abc file, delete the 2nd letter and save to a maya file
MayaCmds.AbcImport(self.__files[-1], mode='open')
# delete letter "a" which has two curves
MayaCmds.delete('Char_a_1')
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('CurvesShape', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
curveList = MayaCmds.ls(type='nurbsCurve')
self.failUnlessEqual(len(curveList), 10)
# test the equality of curves
curves = [('|CurvesShape|Char_M_1|curve1|curveShape1',
'|ReloadGrp|CurvesShape|Char_M_1|curve1|curveShape1'),
('|CurvesShape|Char_y_1|curve4|curveShape4',
'|ReloadGrp|CurvesShape|Char_y_1|curve4|curveShape4'),
('|CurvesShape|Char_a_2|curve5|curveShape5',
'|ReloadGrp|CurvesShape|Char_a_2|curve5|curveShape5'),
('|CurvesShape|Char_a_2|curve6|curveShape6',
'|ReloadGrp|CurvesShape|Char_a_2|curve6|curveShape6')]
for c in curves:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareNurbsCurve(c[0], c[1]):
self.fail('%s and %s are not the same at frame %d' %
(c[0], c[1], t))
#-------------------------------------------------------------------------
def testAnimNCurveGrpDeleteReload(self):
# create an animated curves group
MayaCmds.textCurves(ch=False, t='haka', name='Curves', font='Courier')
MayaCmds.addAttr(longName='riCurves', at='bool', dv=True)
MayaCmds.currentTime(1, update=True)
MayaCmds.select('curve1.cv[0:27]', 'curve2.cv[0:45]',
'curve3.cv[0:15]', 'curve4.cv[0:19]', 'curve5.cv[0:45]',
'curve6.cv[0:15]', replace=True)
MayaCmds.setKeyframe()
MayaCmds.currentTime(24, update=True)
MayaCmds.select('curve1.cv[0:27]', replace=True)
MayaCmds.move(-3, 3, 0, relative=True)
MayaCmds.select('curve2.cv[0:45]', 'curve3.cv[0:15]', replace=True)
MayaCmds.scale(1.5, 1.5, 1.5, relative=True)
MayaCmds.select('curve4.cv[0:19]', replace=True)
MayaCmds.move(1.5, 0, 0, relative=True)
MayaCmds.rotate(0, 90, 0, relative=True)
MayaCmds.select('curve5.cv[0:45]', 'curve6.cv[0:15]', replace=True)
MayaCmds.move(3, 0, 0, relative=True)
MayaCmds.select('curve1.cv[0:27]', 'curve2.cv[0:45]',
'curve3.cv[0:15]', 'curve4.cv[0:19]', 'curve5.cv[0:45]',
'curve6.cv[0:15]', replace=True)
MayaCmds.setKeyframe()
# write them out to an Abc file and load back in
self.__files.append(util.expandFileName('testNCurveGrpReload.abc'))
MayaCmds.AbcExport(j='-fr 1 24 -root CurvesShape -file ' + self.__files[-1])
# load back the Abc file, delete the 2nd letter and save to a maya file
MayaCmds.AbcImport(self.__files[-1], mode='open')
# delete letter "a" which has two curves, but as a curve group.
# the curve shapes are renamed under the group node
MayaCmds.delete('CurvesShape1')
MayaCmds.delete('CurvesShape2')
self.__files.append(util.expandFileName('testCurves.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('|CurvesShape', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
curveList = MayaCmds.ls(type='nurbsCurve')
self.failUnlessEqual(len(curveList), 10)
curves = [('|CurvesShape|CurvesShape',
'|ReloadGrp|CurvesShape|CurvesShape'),
('|CurvesShape|CurvesShape8',
'|ReloadGrp|CurvesShape|CurvesShape3'),
('|CurvesShape|CurvesShape9',
'|ReloadGrp|CurvesShape|CurvesShape4'),
('|CurvesShape|CurvesShape10',
'|ReloadGrp|CurvesShape|CurvesShape5')]
for c in curves:
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
if not util.compareNurbsCurve(c[0], c[1]):
self.fail('%s and %s are not the same at frame %d' %
(c[0], c[1], t))
def testAnimPropDeleteReload(self):
# create some animated properties on a transform node ( could be any type )
nodeName = MayaCmds.polyPrism(ch=False, name = 'prism')
MayaCmds.addAttr(longName='SPT_int8', defaultValue=0,
attributeType='byte', keyable=True)
MayaCmds.addAttr(longName='SPT_int16', defaultValue=100,
attributeType='short', keyable=True)
MayaCmds.addAttr(longName='SPT_int32', defaultValue=1000,
attributeType='long', keyable=True)
MayaCmds.addAttr(longName='SPT_float', defaultValue=0.57777777,
attributeType='float', keyable=True)
MayaCmds.addAttr(longName='SPT_double', defaultValue=5.0456435,
attributeType='double', keyable=True)
MayaCmds.currentTime(1, update=True)
MayaCmds.setKeyframe(nodeName, attribute='SPT_int8')
MayaCmds.setKeyframe(nodeName, attribute='SPT_int16')
MayaCmds.setKeyframe(nodeName, attribute='SPT_int32')
MayaCmds.setKeyframe(nodeName, attribute='SPT_float')
MayaCmds.setKeyframe(nodeName, attribute='SPT_double')
MayaCmds.currentTime(24, update=True)
MayaCmds.setKeyframe(nodeName, attribute='SPT_int8', value=8)
MayaCmds.setKeyframe(nodeName, attribute='SPT_int16', value=16)
MayaCmds.setKeyframe(nodeName, attribute='SPT_int32', value=32)
MayaCmds.setKeyframe(nodeName, attribute='SPT_float', value=5.24847)
MayaCmds.setKeyframe(nodeName, attribute='SPT_double', value=3.14154)
# create SPT_HWColor on the shape node
MayaCmds.select('prismShape')
MayaCmds.addAttr(longName='SPT_HwColorR', defaultValue=1.0,
minValue=0.0, maxValue=1.0)
MayaCmds.addAttr(longName='SPT_HwColorG', defaultValue=1.0,
minValue=0.0, maxValue=1.0)
MayaCmds.addAttr(longName='SPT_HwColorB', defaultValue=1.0,
minValue=0.0, maxValue=1.0)
MayaCmds.addAttr( longName='SPT_HwColor', usedAsColor=True,
attributeType='float3')
MayaCmds.currentTime(1, update=True)
MayaCmds.setKeyframe(at='SPT_HwColorR')
MayaCmds.setKeyframe(at='SPT_HwColorG')
MayaCmds.setKeyframe(at='SPT_HwColorB')
MayaCmds.currentTime(24, update=True)
MayaCmds.setKeyframe(at='SPT_HwColorR', value=0.5)
MayaCmds.setKeyframe(at='SPT_HwColorG', value=0.15)
MayaCmds.setKeyframe(at='SPT_HwColorB', value=0.75)
# write them out to an Abc file and load back in
self.__files.append(util.expandFileName('testPropReload.abc'))
MayaCmds.AbcExport(j='-atp SPT_ -fr 1 24 -root prism -file ' + self.__files[-1])
# load back the Abc file, delete the 2nd letter and save to a maya file
abcNode = MayaCmds.AbcImport(
self.__files[-1], mode='open' )
# delete connections to animated props
prop = MayaCmds.listConnections('|prism.SPT_float', p=True)[0]
MayaCmds.disconnectAttr(prop, '|prism.SPT_float')
attr = '|prism|prismShape.SPT_HwColorG'
prop = MayaCmds.listConnections(attr, p=True)[0]
MayaCmds.disconnectAttr(prop, attr)
self.__files.append(util.expandFileName('test.mb'))
MayaCmds.file(rename=self.__files[-1])
MayaCmds.file(save=True)
# import the saved maya file to compare with the original scene
MayaCmds.file(self.__files[-1], open=True)
MayaCmds.select('prism', replace=True)
MayaCmds.group(name='ReloadGrp')
MayaCmds.AbcImport(self.__files[-2], mode='import')
# test the equality of props
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
self.failUnlessEqual(MayaCmds.getAttr('|prism.SPT_int8'),
MayaCmds.getAttr('|ReloadGrp|prism.SPT_int8'),
'prism.SPT_int8 not equal' )
self.failUnlessEqual(MayaCmds.getAttr('|prism.SPT_int16'),
MayaCmds.getAttr('|ReloadGrp|prism.SPT_int16'),
'prism.SPT_int16 not equal')
self.failUnlessEqual(MayaCmds.getAttr('|prism.SPT_int32'),
MayaCmds.getAttr('|ReloadGrp|prism.SPT_int32'),
'prism.SPT_int32 not equal')
self.failUnlessAlmostEqual(MayaCmds.getAttr('|prism.SPT_double'),
MayaCmds.getAttr('|ReloadGrp|prism.SPT_double'), 4,
'prism.SPT_double not equal')
self.failUnlessAlmostEqual(
MayaCmds.getAttr('|prism|prismShape.SPT_HwColorR'),
MayaCmds.getAttr('|ReloadGrp|prism|prismShape.SPT_HwColorR'),
4, 'prismShape.SPT_HwColorR not equal')
self.failUnlessAlmostEqual(
MayaCmds.getAttr('|prism|prismShape.SPT_HwColorB'),
MayaCmds.getAttr('|ReloadGrp|prism|prismShape.SPT_HwColorB'),
4, 'prismShape.SPT_HwColorB not equal')
|
etcmodel/models/wikihop/wikihop_eval.py | deepneuralmachine/google-research | 23,901 | 12707414 | <filename>etcmodel/models/wikihop/wikihop_eval.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for WikiHop eval."""
import json
from typing import Any, Dict, List, Text, Tuple
import numpy as np
import tensorflow.compat.v1 as tf
from etcmodel.models import tokenization
from etcmodel.models.wikihop import data_utils
tf.compat.v1.disable_v2_behavior()
flags = tf.flags
FLAGS = flags.FLAGS
FLAGS = flags.FLAGS
# Populate these constants appropriately at the time of submisssion.
MODEL_PATH = "105/"
SPM_MODEL_VOCAB = "vocab_gpt.model"
class WikiHopInference(object):
"""WikiHop for inference / prediction using SavedModel."""
def __init__(self, model_dir_path: Text, session_target: Text):
"""Loads the WikiHop from an exported `tf.SavedModel`.
Args:
model_dir_path: Path to the exported directory of the model.
session_target: The session target.
"""
self.sess = tf.Session(graph=tf.Graph(), target=session_target)
# Loads the saved model (graph + variables) to the given session.
graph_def = tf.saved_model.load(
self.sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=model_dir_path)
signature = graph_def.signature_def[
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.input_tensor_name = signature.inputs["serialized_tf_example"].name
self.logits_tensor_name = signature.outputs["logits"].name
def predict(self,
serialized_tf_examples: List[Text]) -> List[List[List[float]]]:
"""Retrieves logits for the given list of serialized tf examples.
Args:
serialized_tf_examples: Batched input serialized_tf_examples.
Returns:
A List[List[float]] representing the logits. Each entry i in the list
corresponds to the result i-th serialized_tf_example.
"""
feed_dict = {self.input_tensor_name: serialized_tf_examples}
logits = self.sess.run([self.logits_tensor_name], feed_dict=feed_dict)
return logits
def get_serialized_tf_example(wikihop_example: data_utils.WikiHopExample,
tokenizer: tokenization.FullTokenizer,
long_seq_len: int = 4096,
global_seq_len: int = 430,
max_num_sentences: int = 200) -> Text:
"""Returns serialized TF example from the given json example."""
converter = data_utils.WikiHopTFExampleConverter(
tokenizer=tokenizer,
long_seq_len=long_seq_len,
global_seq_len=global_seq_len,
max_num_sentences=max_num_sentences)
tf_example = converter.convert_single_example(example=wikihop_example)
return tf_example.SerializeToString()
def get_predicted_answer(wikihop_example: data_utils.WikiHopExample,
logits: List[float],
global_seq_len: int = 430) -> Text:
"""Returns prediçted answer for the given example and its logits."""
assert len(logits) == global_seq_len, (
"Mismatch in logits len. Expected: {}, found: {}, logits are: {} ".format(
global_seq_len, len(logits), logits))
logits = logits[0:len(wikihop_example.candidate_answers)]
max_label_index = np.argmax(logits)
assert max_label_index >= 0 and (max_label_index < len(
wikihop_example.candidate_answers))
answer = wikihop_example.candidate_answers[max_label_index]
answer = answer.lower().strip()
return answer
def get_output_single_example(
tokenizer: tokenization.FullTokenizer,
wikihop_inference: WikiHopInference,
json_obj: Dict[Text, Any],
long_seq_len: int = 4096,
global_seq_len: int = 430,
max_num_sentences: int = 200) -> Tuple[Text, Text]:
"""Generates output for a single example."""
wikihop_example = data_utils.WikiHopExample.from_json(single_example=json_obj)
serialized_tf_example = get_serialized_tf_example(
wikihop_example=wikihop_example,
tokenizer=tokenizer,
long_seq_len=long_seq_len,
global_seq_len=global_seq_len,
max_num_sentences=max_num_sentences)
logits = wikihop_inference.predict([serialized_tf_example])[0][0]
assert len(logits) == global_seq_len, (
"Mismatch in0 logits len. Expected: {}, found: {} for example_id: {}. "
"Actual logits are: {}".format(global_seq_len, len(logits),
wikihop_example.example_id, logits))
answer = get_predicted_answer(
wikihop_example=wikihop_example,
logits=logits,
global_seq_len=global_seq_len)
return (wikihop_example.example_id, answer)
def generate_eval_output_bulk(json_examples: List[Dict[Text, Any]],
model_dir_path: Text,
tokenizer: tokenization.FullTokenizer,
long_seq_len: int = 4096,
global_seq_len: int = 430,
max_num_sentences: int = 200,
batch_size: int = 4,
session_target: Text = "") -> Dict[Text, Any]:
"""Bulk mode inference."""
serialized_tf_examples = []
wikihop_examples = []
output = {}
for json_obj in json_examples:
wikihop_example = data_utils.WikiHopExample.from_json(
single_example=json_obj)
wikihop_examples.append(wikihop_example)
serialize_tf_example = get_serialized_tf_example(
wikihop_example=wikihop_example,
tokenizer=tokenizer,
long_seq_len=long_seq_len,
global_seq_len=global_seq_len,
max_num_sentences=max_num_sentences)
serialized_tf_examples.append(serialize_tf_example)
wikihop_inference = WikiHopInference(
model_dir_path=model_dir_path, session_target=session_target)
index = 0
num_examples = len(serialized_tf_examples)
# Note that we getting "all" the serialized examples and then "batching"
# only for prediction. The bottleneck is almost always going to be the
# GPU anyway (for both memory and compute).
while index < num_examples:
predict_batch = serialized_tf_examples[index:min(index +
batch_size, num_examples)]
batch_logits = wikihop_inference.predict(predict_batch)[0]
for (offset, logits) in enumerate(batch_logits):
answer = get_predicted_answer(
wikihop_example=wikihop_examples[index + offset],
logits=logits,
global_seq_len=global_seq_len)
output[wikihop_examples[index + offset].example_id] = answer
index += batch_size
return output
def generate_eval_output(json_examples: List[Dict[Text, Any]],
tokenizer: tokenization.FullTokenizer,
model_dir_path: Text,
long_seq_len: int = 4096,
global_seq_len: int = 430,
max_num_sentences: int = 200,
batch_inference: bool = False,
batch_size: int = 4,
session_target: Text = "") -> Dict[Text, Any]:
"""Generates output for the input json.
Returns the dict output key'ed by the example_id, with the value being the
answer string.
Args:
json_examples: List of examples loaded from json input file.
tokenizer: The BERT or ALBERT tokenizer.
model_dir_path: The path to the directory containing the SavedModel.
long_seq_len: The long input.
global_seq_len: The global input.
max_num_sentences: The max num sentences to be used per example.
batch_inference: If True, we batch together all the examples at once for
faster inference. Given that there are only 1K test examples, we might be
able to fit everything in memeroy (500K per example * 1K).
batch_size: Number of examples to be batched in one to predict. Applicable
only when `batch_inference` is set to True.
session_target: The TF session target.
Returns:
Dict[Text, Text] key'ed by the example_id to the corresponding prediction
answer.
"""
output = {}
if batch_inference:
return generate_eval_output_bulk(
json_examples=json_examples,
model_dir_path=model_dir_path,
tokenizer=tokenizer,
long_seq_len=long_seq_len,
global_seq_len=global_seq_len,
max_num_sentences=max_num_sentences,
batch_size=batch_size,
session_target=session_target)
wikihop_inference = WikiHopInference(
model_dir_path=model_dir_path, session_target=session_target)
for json_obj in json_examples:
(example_id, label) = get_output_single_example(
tokenizer=tokenizer,
wikihop_inference=wikihop_inference,
json_obj=json_obj,
long_seq_len=long_seq_len,
global_seq_len=global_seq_len,
max_num_sentences=max_num_sentences)
output[example_id] = label
return output
def main(argv):
if len(argv) != 3:
raise tf.app.UsageError("Exactly two arguments expected.")
input_json_filepath = argv[1].strip()
output_json_filepath = argv[2].strip()
tokenizer = tokenization.FullTokenizer(
vocab_file=None, do_lower_case=None, spm_model_file=SPM_MODEL_VOCAB)
with tf.gfile.Open(input_json_filepath, "r") as test_data:
json_examples = json.load(test_data)
predictions = generate_eval_output(
tokenizer=tokenizer,
json_examples=json_examples,
model_dir_path=MODEL_PATH)
with tf.gfile.GFile(output_json_filepath, "w") as output_writer:
json.dump(predictions, output_writer)
if __name__ == "__main__":
tf.app.run()
|
samcli/lib/iac/constants.py | torresxb1/aws-sam-cli | 2,959 | 12707415 | """
General IaC constants
"""
PARAMETER_OVERRIDES = "parameter_overrides"
GLOBAL_PARAMETER_OVERRIDES = "global_parameter_overrides"
|
samples/CSP/sco2_analysis_python_V2/example/mspt_default_generate_sco2_udpc.py | jkelroy/SAM | 219 | 12707420 | ##################################################
## Set relative file paths ##
import csv
import sys
import os
import numpy as np
import json
absFilePath = os.path.abspath(__file__)
fileDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(fileDir)
newPath = os.path.join(parentDir, 'core')
sys.path.append(newPath)
import sco2_cycle_ssc as sco2_solve
import sco2_plots as cy_plt
##################################################
##################################################
def get_sco2_design_parameters():
des_par = {}
des_par["htf"] = 17 # [-] Solar salt
des_par["T_htf_hot_des"] = 574.0 # [C] HTF design hot temperature (PHX inlet) [cite: sunshot]
des_par["dT_PHX_hot_approach"] = 20.0 # [C/K] default 20. Temperature difference between hot HTF and turbine inlet [cite: neises/turchi]
des_par["dT_PHX_cold_approach"] = 20 # [C/K] default 20. Temperature difference between cold HTF and cold CO2 PHX inlet [enforces CR = 1]
des_par["T_amb_des"] = 40.0 # [C] Ambient temperature at design [cite: neises/turchi]
des_par["dT_mc_approach"] = 6.0 # [C] Use 6 here per [Neises & Turchi 19]. Temperature difference between main compressor CO2 inlet and ambient air
des_par["site_elevation"] = 588 # [m] Elevation of Daggett, CA. Used to size air cooler...
des_par["W_dot_net_des"] = 115.0 # [MWe] Design cycle power output (no cooling parasitics)
des_par["design_method"] = 3 # [-] 1 = specify efficiency, 2 = specify total recup UA, 3 = Specify each recuperator design (see inputs below)
des_par["eta_thermal_des"] = -1 # [-] Power cycle thermal efficiency, not used here
des_par["UA_recup_tot_des"] = -1 # [kW/K]
des_par["cycle_config"] = 1 # [1] = RC, [2] = PC
des_par["is_recomp_ok"] = 1 # [-] Use simple cycle for now. 1 = Yes, 0 = simple cycle only
des_par["is_P_high_fixed"] = 1 # [-] 0 = No, optimize. 1 = Yes
des_par["is_PR_fixed"] = 0 # [-] 0 = No, >0 = Yes
des_par["des_objective"] = 1 # [-] 2 = hit min deltaT then max efficiency, != 2 = max efficiency
des_par["min_phx_deltaT"] = 1000 # [C] Min allowable deltaT across PHX
des_par["rel_tol"] = 3 # [-] Baseline solver and optimization relative tolerance exponent (10^-rel_tol)
# Weiland & Thimsen 2016
# In most studies, 85% is an accepted isentropic efficiency for either the main or recompression compressors, and is the recommended assumption.
des_par["eta_isen_mc"] = 0.85 # [-] Main compressor isentropic efficiency
des_par["eta_isen_rc"] = 0.85 # [-] Recompressor isentropic efficiency
des_par["eta_isen_pc"] = 0.85 # [-] Precompressor isentropic efficiency
# Weiland & Thimsen 2016
# Recommended turbine efficiencies are 90% for axial turbines above 30 MW, and 85% for radial turbines below 30 MW.
des_par["eta_isen_t"] = 0.90 # [-] Turbine isentropic efficiency
des_par["P_high_limit"] = 25 # [MPa] Cycle high pressure limit
# <NAME> 2016
# Multiple literature sources suggest that recuperator cold side (high pressure) pressure drop of
# approximately 140 kPa (20 psid) and a hot side (low pressure) pressure drop of 280 kPa (40 psid) can be reasonably used.
# Note: Unclear what the low pressure assumption is in this study, could be significantly lower for direct combustion cycles
eff_max = 1.0
deltaP_recup_HP = 0.0056 # [-] 0.0056 = 0.14[MPa]/25[MPa]
deltaP_recup_LP = 0.0311 # [-] 0.0311 = 0.28[MPa]/9[MPa]
# LTR
des_par["LTR_design_code"] = 2 # 1 = UA, 2 = min dT, 3 = effectiveness
des_par["LTR_UA_des_in"] = -1 # [kW/K] (required if LTR_design_code == 1 and design_method == 3) not used
des_par[
"LTR_min_dT_des_in"] = 10.0 # [C] (required if LTR_design_code == 2 and design_method == 3) "reasonable value" from Neises/Turchi
des_par["LTR_eff_des_in"] = -1 # [-] (required if LTR_design_code == 3 and design_method == 3)
des_par["LT_recup_eff_max"] = eff_max # [-] Maximum effectiveness low temperature recuperator
des_par["LTR_LP_deltaP_des_in"] = deltaP_recup_LP # [-]
des_par["LTR_HP_deltaP_des_in"] = deltaP_recup_HP # [-]
# HTR
des_par["HTR_design_code"] = 2 # 1 = UA, 2 = min dT, 3 = effectiveness
des_par["HTR_UA_des_in"] = -1 # [kW/K] (required if LTR_design_code == 1 and design_method == 3)
des_par[
"HTR_min_dT_des_in"] = 10.0 # [C] (required if LTR_design_code == 2 and design_method == 3) "reasonable value" from Neises/Turchi
des_par["HTR_eff_des_in"] = -1 # [-] (required if LTR_design_code == 3 and design_method == 3)
des_par["HT_recup_eff_max"] = eff_max # [-] Maximum effectiveness high temperature recuperator
des_par["HTR_LP_deltaP_des_in"] = deltaP_recup_LP # [-]
des_par["HTR_HP_deltaP_des_in"] = deltaP_recup_HP # [-]
# PHX
des_par["PHX_co2_deltaP_des_in"] = deltaP_recup_HP # [-]
# Air Cooler
des_par[
"deltaP_cooler_frac"] = 0.005 # [-] Fraction of CO2 inlet pressure that is design point cooler CO2 pressure drop
des_par[
"fan_power_frac"] = 0.02 # [-] Fraction of net cycle power consumed by air cooler fan. 2% here per Turchi et al.
# Default
des_par[
"deltaP_counterHX_frac"] = -1 # [-] Fraction of CO2 inlet pressure that is design point counterflow HX (recups & PHX) pressure drop
# Off Design
des_par["od_rel_tol"] = 3 # [-] Baseline off-design relative convergence tolerance exponent (10^-od_rel_tol)
return des_par
def make_udpc_plots_from_json_dict(json_file_name):
udpc_dict = json.load(open(json_file_name))
print("HTF cold design = " + str(udpc_dict["T_htf_cold_des"]) + " C")
T_hot_str = "HTF Hot Temperature (Design page) = " + str(udpc_dict["T_htf_hot_des"]) + " C"
T_cold_str = "HTF Cold Temperature (Design page) = " + str(udpc_dict["T_htf_cold_des"]) + " C"
eta_str = "Cycle Thermal Efficiency (Design page) = " + str(udpc_dict["eta_thermal_calc"]) + " -"
T_amb_str = "Ambient Temperature (Power Cycle page) = " + str(udpc_dict["T_amb_des"]) + " C"
W_dot_cool_str = "Cooling Parasitic (Power Cycle page) = " + str(udpc_dict["fan_power_frac"]) + " -"
od_T_t_in_mode = udpc_dict["od_T_t_in_mode"]
n_T_htf = int(udpc_dict["udpc_n_T_htf"])
n_T_amb = int(udpc_dict["udpc_n_T_amb"])
n_m_dot_htf = int(udpc_dict["udpc_n_m_dot_htf"])
udpc_data = udpc_dict["udpc_table"]
s_cycle_des = T_hot_str + "\n" + T_cold_str + "\n" + eta_str + "\n" + T_amb_str + "\n" + W_dot_cool_str + "\n"
cy_plt.plot_udpc_results(udpc_data, n_T_htf, n_T_amb, n_m_dot_htf, "updc_data_read", s_cycle_des, od_T_t_in_mode)
######################################
######################################
"Generate data for SAM's User Defined Power Cycle Model"
# Instantiate sco2 cycle simulation class
c_sco2 = sco2_solve.C_sco2_sim(1) # Initialize as same cycle config as specified above
# Get default design parameters. These are different than the "baseline" default parameters in "sco2_cycle_ssc.py"
sco2_des_par_default = get_sco2_design_parameters()
c_sco2.overwrite_default_design_parameters(sco2_des_par_default)
# Setup string for naming files
des_sim_label_str = "T_amb_des" + '{:.1f}'.format(sco2_des_par_default["T_amb_des"])
mod_base_dict = {"od_generate_udpc": [1.0]}
mod_base_dict["od_rel_tol"] = 2
c_sco2.overwrite_des_par_base(mod_base_dict) # Overwrite baseline design parameters
c_sco2.solve_sco2_case() # Run design simulation
print(c_sco2.m_solve_dict["eta_thermal_calc"])
print("\nDid the simulation code with "
"modified design parameters solve successfully = ", c_sco2.m_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_solve_dict("" + des_sim_label_str + "_UDPC_mspt_default") # Save design
solved_dict = c_sco2.m_solve_dict
udpc_data = solved_dict["udpc_table"]
HTF_cold_str = "HTF cold design = " + str(solved_dict["T_htf_cold_des"]) + " C"
T_co2_in_str = "CO2 PHX in Temp design = " + str(solved_dict["T_co2_PHX_in"]) + " C"
P_co2_in_str = "CO2 PHX in Pressure design = " + str(solved_dict["P_co2_PHX_in"]) + " MPa"
T_turb_str = "CO2 Turbine in Temp design = " + str(solved_dict["T_turb_in"]) + " C"
P_turb_str = "CO2 Turbine in Pressure design = " + str(solved_dict["t_P_in_des"]) + " MPa"
eta_str = "Cycle Thermal Efficiency (Design page) = " + str(solved_dict["eta_thermal_calc"]) + " -"
T_amb_str = "Ambient Temperature (Power Cycle page) = " + str(solved_dict["T_amb_des"]) + " C"
W_dot_cool_str = "Cooling Parasitic (Power Cycle page) = " + str(solved_dict["fan_power_frac"]) + " -"
#SSC_OUTPUT, SSC_MATRIX, "udpc_table", "Columns (7): HTF Temp [C], HTF ND mass flow [-], Ambient Temp [C], ND Power, ND Heat In, ND Fan Power, ND Water. Rows = runs"
with open("udpc_outputs" + '.csv', 'w', newline='') as f:
w = csv.writer(f)
w.writerows(solved_dict["udpc_table"])
f.close()
n_T_htf = int(solved_dict["udpc_n_T_htf"])
n_T_amb = int(solved_dict["udpc_n_T_amb"])
n_m_dot_htf = int(solved_dict["udpc_n_m_dot_htf"])
s_cycle_des = HTF_cold_str + "\n" + T_co2_in_str + "\n"\
+ P_co2_in_str+ "\n" + T_turb_str + "\n" + P_turb_str + "\n"\
+ eta_str + "\n" + T_amb_str + "\n" + W_dot_cool_str +"\n"
cy_plt.plot_udpc_results(udpc_data, n_T_htf, n_T_amb, n_m_dot_htf, "updc_mspt_default", s_cycle_des)
|
Paddle_Enterprise_CaseBook/Waybill_Information_Extraction/run_ernie.py | wwhio/awesome-DeepLearning | 1,150 | 12707485 | <gh_stars>1000+
import paddle
import paddle.nn as nn
import paddlenlp
from functools import partial
from paddlenlp.datasets import MapDataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.layers import LinearChainCrf, ViterbiDecoder, LinearChainCrfLoss
from paddlenlp.metrics import ChunkEvaluator
from utils import load_dict, evaluate, predict, parse_decodes1, parse_decodes2
from paddlenlp.transformers import ErnieTokenizer, ErnieForTokenClassification, ErnieGramTokenizer, ErnieGramForTokenClassification
from utils import convert_example
def load_dataset(datafiles):
def read(data_path):
with open(data_path, 'r', encoding='utf-8') as fp:
next(fp)
for line in fp.readlines():
words, labels = line.strip('\n').split('\t')
words = words.split('\002')
labels = labels.split('\002')
yield words, labels
if isinstance(datafiles, str):
return MapDataset(list(read(datafiles)))
elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
return [MapDataset(list(read(datafile))) for datafile in datafiles]
train_ds, dev_ds, test_ds = load_dataset(datafiles=(
'./waybill_data/train.txt', './waybill_data/dev.txt', './waybill_data/test.txt'))
label_vocab = load_dict('./conf/tag.dic')
# 设置想要使用模型的名称
MODEL_NAME = "ernie-1.0"
tokenizer = ErnieTokenizer.from_pretrained(MODEL_NAME)
trans_func = partial(convert_example, tokenizer=tokenizer, label_vocab=label_vocab)
train_ds.map(trans_func)
dev_ds.map(trans_func)
test_ds.map(trans_func)
ignore_label = -1
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(), # seq_len
Pad(axis=0, pad_val=ignore_label) # labels
): fn(samples)
train_loader = paddle.io.DataLoader(
dataset=train_ds,
batch_size=200,
return_list=True,
collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(
dataset=dev_ds,
batch_size=200,
return_list=True,
collate_fn=batchify_fn)
test_loader = paddle.io.DataLoader(
dataset=test_ds,
batch_size=200,
return_list=True,
collate_fn=batchify_fn)
# Define the model netword and its loss
model = ErnieForTokenClassification.from_pretrained("ernie-1.0", num_classes=len(label_vocab))
metric = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
optimizer = paddle.optimizer.AdamW(learning_rate=2e-5, parameters=model.parameters())
step = 0
for epoch in range(10):
# Switch the model to training mode
model.train()
for idx, (input_ids, token_type_ids, length, labels) in enumerate(train_loader):
logits = model(input_ids, token_type_ids)
loss = paddle.mean(loss_fn(logits, labels))
loss.backward()
optimizer.step()
optimizer.clear_grad()
step += 1
print("epoch:%d - step:%d - loss: %f" % (epoch, step, loss))
evaluate(model, metric, dev_loader)
paddle.save(model.state_dict(),
'./ernie_result/model_%d.pdparams' % step)
# model.save_pretrained('./checkpoint')
# tokenizer.save_pretrained('./checkpoint')
preds = predict(model, test_loader, test_ds, label_vocab)
file_path = "ernie_results.txt"
with open(file_path, "w", encoding="utf8") as fout:
fout.write("\n".join(preds))
# Print some examples
print(
"The results have been saved in the file: %s, some examples are shown below: "
% file_path)
print("\n".join(preds[:10]))
|
excel4lib/lang/excel4_translator.py | aaaddress1/boobsnail | 169 | 12707497 | <gh_stars>100-1000
from excel4lib.config import *
from excel4lib.config.excel4_translator_config import Excel4MissingTranslationLevel
from excel4lib.utils import *
from excel4lib.exception import *
class Excel4Translator(object):
'''
`Excel4Translator` class allows to translate english formulas to another language.
`Excel4Translator` stores translation files in the langs directory in .json format.
Translation files have the following format:
```
{
"arguments_separator": ",",
"name": "LANG_NAME",
"row_character": "ROW_CHARACTER",
"col_character": "COL_CHARACTER",
"translation": {
"ENG_FORMULA":"TRANSLATION_FORMULA",
(...)
}
}
```
- `arguments_separator` - stores character used to separate formula arguments;
- `name` - stores the name of language. It should be the same as the file name, with no extension for example, pl_PL (then file name is pl_pl.json);
- `row_character` - stores character used to translate ROW character in RC_STYLE;
- `col_character` - stores character used to translate COLUMN character when RC_STYLE is used;
- `translation` - stores formulas translations in form KEY:VALUE where KEY is formula in english and VALUE
is translation of this formula to corresponding language
'''
# Reference to configuration
config = Excel4Config.translator
# Current language - the language into which the text is to be translated
language = config.language
# Language from which translation is done
# By default we use formulas in English
# If you want to translate for example from Polish to English, then change Excel4Translator.native_language to pl_PL
# and set Excel4Translator.language variable to en_US. Then create file en_US.json as translations file.
# If Excel4Translator.language is equal to Excel4Translator.native_language then translation is not done
native_language = "en_US"
# Current language translations
translations = {native_language:{}}
# Default arguments separator. Returned when arguments_separator key is not defined in translations
arguments_separator = ","
# Default characters for rows and cols.
row_character = "R"
col_character = "C"
@staticmethod
def init():
'''
Initializes translator and loads `Excel4Translator.language` translation into memory.
'''
Excel4Translator.load_translations()
@staticmethod
def check_translations():
'''
Checks if translations have required keys. If not then `Excel4RequiredKeyMissingException` is raised.
'''
# Do not check if current language is equal to native
if Excel4Translator.is_native():
return
req = ["translation"]
translations_path = join_path(Excel4Translator.config.translations_directory,
Excel4Translator.language + Excel4Translator.config.translations_ext)
for k in req:
if k not in Excel4Translator.translations[Excel4Translator.language]:
raise Excel4RequiredKeyMissingException(
"{} key is missing in translations {}".format(k, translations_path))
@staticmethod
def load_translations(lang=None):
'''
Loads translation defined in `lang` into memory. If `lang` is None then `Excel4Translator.language` is loaded.
If translation file does not exist or could not be found then `Excel4PathNotExistException` is raiesd.
'''
# Do not load if current language is equal to native
if (not lang) and Excel4Translator.is_native():
return
if not lang:
lang = Excel4Translator.language
if lang in Excel4Translator.translations:
return
translations_path = join_path(Excel4Translator.config.translations_directory,
lang + Excel4Translator.config.translations_ext)
# Check if file with translations exists
if not is_path(translations_path):
raise Excel4PathNotExistException("File with translations {} does not exist".format(translations_path))
Excel4Translator.translations[lang] = load_json_file(translations_path)
# Check if translations have all required keys
Excel4Translator.check_translations()
@staticmethod
def set_language(lang):
'''
Sets current language (`Excel4Translator.langauge`) to `lang` and loads translation.
:param lang: name of the language
'''
# Save current language
temp = Excel4Translator.language
Excel4Translator.language = lang
try:
Excel4Translator.load_translations()
except Exception as ex:
# Restore language
Excel4Translator.language = temp
raise ex
@staticmethod
def is_native():
'''
Checks if `Excel4Translator.language` is equal to `Excel4Translator.native_language`
:return: True if yes and False if not
'''
return Excel4Translator.language == Excel4Translator.native_language
@staticmethod
def translate(formula, lang = None):
'''
Translates formula to `lang`. If `lang` is None then current language `Excel4Translator.language` is used.
:param formula: name of formula to translate
:param lang: name of the language
:return: string translated formula
'''
lang_b = None
# Init translations
if not Excel4Translator.translations:
Excel4Translator.init()
# If formula is empty or it contains spaces then do not translate
if (not formula) or (" " in formula):
return formula
if lang and (lang != Excel4Translator.language):
lang_b = Excel4Translator.language
Excel4Translator.set_language(lang)
# Do not translate if current language is equal to native
if Excel4Translator.is_native():
return formula
if not Excel4Translator.get_value("translation"):
return
if formula not in Excel4Translator.translations[Excel4Translator.language]["translation"]:
# Raise exception if translation is missing
if Excel4Translator.config.missing_translation == Excel4MissingTranslationLevel.EXCEPTION:
translations_path = join_path(Excel4Translator.config.translations_directory,
Excel4Translator.language + Excel4Translator.config.translations_ext)
raise Excel4TranslationMissingException(
"Translation of {} formula is missing in translations {} file".format(formula, translations_path))
# Print if translation is missing
elif Excel4Translator.config.missing_translation == Excel4MissingTranslationLevel.LOG:
translations_path = join_path(Excel4Translator.config.translations_directory,
Excel4Translator.language + Excel4Translator.config.translations_ext)
print("[!] Translation of {} formula is missing in translations {} file".format(formula, translations_path))
return formula
translation_f = Excel4Translator.translations[Excel4Translator.language]["translation"][formula]
if lang_b:
Excel4Translator.set_language(lang_b)
return translation_f
@staticmethod
def t(formula, lang=None):
'''
Translates formula to `lang`. If `lang` is None then current language `Excel4Translator.language` is used.
:param formula: name of formula to translate
:param lang: name of the language
:return: string translated formula
'''
return Excel4Translator.translate(formula, lang)
@staticmethod
def translate_address(address):
'''
Translates cell address
:param address: address of cell to translate in RC_STYLE reference style
:return: string translated address
'''
# Init translations
if not Excel4Translator.translations:
Excel4Translator.init()
# Do not translate if current language is equal to native
if Excel4Translator.is_native():
return address
# Do not translate if reference style is set to A1
if not Excel4Config.rc_reference_style:
return address
return address.replace(Excel4Translator.row_character, Excel4Translator.get_row_character()).replace(Excel4Translator.col_character, Excel4Translator.get_col_character())
@staticmethod
def t_a(address):
'''
Translates cell address
:param address: address of cell to translate in RC_STYLE reference style
:return: string translated address
'''
return Excel4Translator.translate_address(address)
@staticmethod
def get_value(key_name):
'''
Returns value stored under `key_name` from `Excel4Translator.translations`.
If key does not exist then `Excel4RequiredKeyMissingException` is raised.
:param key_name:
:return: value stored under `key_name` in `Excel4Translator.translations` object
'''
if key_name not in Excel4Translator.translations[Excel4Translator.language]:
translations_path = join_path(Excel4Translator.config.translations_directory,
Excel4Translator.language + Excel4Translator.config.translations_ext)
raise Excel4RequiredKeyMissingException(
"{} key is missing in translations {}".format(key_name, translations_path))
return Excel4Translator.translations[Excel4Translator.language][key_name]
@staticmethod
def get_arguments_separator(lang=None):
'''
Returns arguments separator for `lang`. If `lang` is None then current lanauge is used (`Excel4Translator.language`).
:param lang: name of the language
'''
if (not lang) and Excel4Translator.is_native():
return Excel4Translator.arguments_separator
if not lang:
lang = Excel4Translator.language
if lang not in Excel4Translator.translations:
Excel4Translator.load_translations(lang)
return Excel4Translator.translations[lang].get("arguments_separator", Excel4Translator.arguments_separator)
@staticmethod
def get_row_character(lang=None):
'''
Returns row character for `lang`. If `lang` is None then current lanauge is used (`Excel4Translator.language`).
:param lang: name of the language
'''
if (not lang) and Excel4Translator.is_native():
return Excel4Translator.row_character
if not lang:
lang = Excel4Translator.language
if lang not in Excel4Translator.translations:
Excel4Translator.load_translations(lang)
return Excel4Translator.translations[lang].get("row_character", Excel4Translator.row_character)
@staticmethod
def get_col_character(lang=None):
'''
Returns column character for `lang`. If `lang` is None then current lanauge is used (`Excel4Translator.language`).
:param lang: name of the language
'''
if (not lang) and Excel4Translator.is_native():
return Excel4Translator.col_character
if not lang:
lang = Excel4Translator.language
if lang not in Excel4Translator.translations:
Excel4Translator.load_translations(lang)
return Excel4Translator.translations[lang].get("col_character", Excel4Translator.col_character)
@staticmethod
def get_languages():
'''
Returns list of available languages.
'''
translations_path = Excel4Translator.config.translations_directory
langs = []
for l in os.listdir(translations_path):
if (Excel4Translator.config.translations_ext == l.lower().split(".")[-1]) or (Excel4Translator.config.translations_ext == "."+l.lower().split(".")[-1]):
langs.append(".".join(l.split(".")[:-1]))
return langs |
mayan/apps/locales/forms.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 12707506 | <gh_stars>100-1000
from django import forms
from mayan.apps.views.forms import DetailForm
from .models import UserLocaleProfile
class LocaleProfileForm(forms.ModelForm):
class Meta:
fields = ('language', 'timezone')
model = UserLocaleProfile
widgets = {
'language': forms.Select(
attrs={
'class': 'select2'
}
),
'timezone': forms.Select(
attrs={
'class': 'select2'
}
)
}
class LocaleProfileForm_view(DetailForm):
class Meta:
fields = ('language', 'timezone')
model = UserLocaleProfile
|
2021_04_28/dojo_test.py | devppjr/dojo | 114 | 12707523 | import unittest
from dojo import criar_fita, main, pinta_fita
class DojoTest(unittest.TestCase):
def test_main(self):
self.assertEqual(main(4, [1]), 3)
def test_main_outro(self):
self.assertEqual(main(13, [2, 3, 6]), 3)
def test_main_outro(self):
self.assertEqual(main(10, [9, 10]), 8)
def test_criar_fita(self):
fita = [1, 0, 0, 0]
self.assertListEqual(criar_fita(4, [1]), fita)
def test_criar_fita_vazia(self):
self.assertListEqual(criar_fita(0, []), [])
def test_criar_fita_3(self):
fita = [1,1,0]
self.assertListEqual(criar_fita(3, [1,2]), fita)
def test_criar_fita_3(self):
fita = [1,1,0]
self.assertListEqual(criar_fita(3, [1,2]), fita)
def test_pinta_fita(self):
fita = [1,1,0]
fita_pintada = [1,1,1]
self.assertListEqual(pinta_fita(fita), fita_pintada)
def test_pinta_fita2(self):
fita = [1, 0, 0, 0]
fita_pintada = [1, 1, 0, 0]
self.assertListEqual(pinta_fita(fita), fita_pintada)
def test_pinta_fita2(self):
fita = [0, 1, 0]
fita_pintada = [1, 1, 1]
self.assertListEqual(pinta_fita(fita), fita_pintada)
# [0,1,0,0,0,1,0,0,0,0,0,1]
if __name__ == '__main__':
unittest.main()
#Joao - Ingrid - Lara - Juan - Tiago
# n = tamanho da fita => [0,0,1,0,1]
# x,y,x = as posições dos pingos
# espera a quantidade de dias que vai demorar pra ela estar completamente preta
# transforma input em array
#funcao que pinta
# while existe algum elemento = 0 no array
# vamos pintando o antes e depois de todos os uns
# 13 3
# 2 6 13
# 10 2
# 9 10 |
plots/chapter5.py | haeseung81/PyTorchStepByStep | 170 | 12707548 | import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
def plot_images(images, targets, n_plot=30):
n_rows = n_plot // 10 + ((n_plot % 10) > 0)
fig, axes = plt.subplots(n_rows, 10, figsize=(15, 1.5 * n_rows))
axes = np.atleast_2d(axes)
for i, (image, target) in enumerate(zip(images[:n_plot], targets[:n_plot])):
row, col = i // 10, i % 10
ax = axes[row, col]
ax.set_title('#{} - Label:{}'.format(i, target), {'size': 12})
# plot filter channel in grayscale
ax.imshow(image.squeeze(), cmap='gray', vmin=0, vmax=1)
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
ax.label_outer()
plt.tight_layout()
return fig |
tests/test/plugin/test_isolation_update.py | ActorForth/brownie | 1,595 | 12707552 | #!/usr/bin/python3
import json
from pathlib import Path
import pytest
test_source = """
def test_stuff(BrownieTester, accounts):
c = accounts[0].deploy(BrownieTester, True)
c.doNothing({'from': accounts[0]})"""
def test_update_no_isolation(plugintester):
result = plugintester.runpytest()
result.assert_outcomes(passed=1)
result = plugintester.runpytest("-U")
result.assert_outcomes(passed=1)
@pytest.mark.parametrize("arg", ["", "-n 2"])
def test_update_isolation(isolatedtester, arg):
result = isolatedtester.runpytest(arg)
result.assert_outcomes(passed=1)
result = isolatedtester.runpytest("-U", arg)
result.assert_outcomes(skipped=1)
@pytest.mark.parametrize("arg", ["", "-n 2"])
def test_update_isolation_coverage(isolatedtester, arg):
result = isolatedtester.runpytest("-U", arg)
result.assert_outcomes(passed=1)
result = isolatedtester.runpytest("-C", "-U", arg)
result.assert_outcomes(passed=1)
result = isolatedtester.runpytest("-C", "-U", arg)
result.assert_outcomes(skipped=1)
result = isolatedtester.runpytest("-U", arg)
result.assert_outcomes(skipped=1)
isolatedtester.runpytest(arg)
result = isolatedtester.runpytest("-C", "-U", arg)
result.assert_outcomes(skipped=1)
@pytest.mark.parametrize("arg", ["", "-n 2"])
def test_update_isolation_contract_changed(isolatedtester, arg):
isolatedtester.runpytest()
path = Path(isolatedtester.tmpdir).joinpath("contracts/BrownieTester.sol")
with path.open() as fp:
source = fp.read()
source = source.replace("two", "tree fiddy")
with path.open("w") as fp:
fp.write(source)
result = isolatedtester.runpytest("-U", arg)
result.assert_outcomes(passed=1)
@pytest.mark.parametrize("arg", ["", "-n 2"])
def test_update_isolation_testfile_changed(json_path, isolatedtester, arg):
isolatedtester.runpytest()
with json_path.open() as fp:
build = json.load(fp)
build["tests"]["tests/test_0.py"]["sha1"] = "potato"
with json_path.open("w") as fp:
build = json.dump(build, fp)
result = isolatedtester.runpytest("-U", arg)
result.assert_outcomes(passed=1)
|
utils/convert-pokec.py | wangxiaoyunanne/GraphDefense | 140 | 12707555 | import os
import sys
import h5py
import numpy as np
import pandas as pd
import networkx as nx
from convert import make_adjacency, make_sparse_adjacency, save_problem, spadj2edgelist
np.random.seed(123)
def load_ages(path):
ages = pd.read_csv(path, header=None, sep='\t')
ages.columns = ('id', 'age')
ages = ages[ages.age != 'null']
ages.age = ages.age.astype(int)
ages = ages[ages.age > 0]
return ages
max_degree = 128
inpath = '../data/pokec/'
# --
# Load data
ages = load_ages(os.path.join(inpath, 'soc-pokec-ages.tsv'))
edges = pd.read_csv(os.path.join(inpath, 'soc-pokec-relationships.txt'), header=None, sep='\t')
edges.columns = ('src', 'trg')
edges = edges[edges.src.isin(ages.id)]
edges = edges[edges.trg.isin(ages.id)]
ages = ages[ages.id.isin(edges.src) | ages.id.isin(edges.trg)]
ages['uid'] = np.arange(ages.shape[0])
edges = pd.merge(edges, ages, left_on='src', right_on='id')
edges = edges[['uid', 'trg']]
edges.columns = ('src', 'trg')
edges = pd.merge(edges, ages, left_on='trg', right_on='id')
edges = edges[['src', 'uid']]
edges.columns = ('src', 'trg')
ages = ages[['uid', 'age']]
targets = np.array(ages.age).astype(float).reshape(-1, 1)
folds = np.random.choice(['train', 'val'], targets.shape[0], p=[0.5, 0.5])
G = nx.from_edgelist(np.array(edges))
# --
# Dense version
adj = make_adjacency(G, max_degree, sel=None) # Adds dummy node
aug_targets = np.vstack([targets, np.zeros((targets.shape[1],), dtype='float64')])
aug_folds = np.hstack([folds, ['dummy']])
save_problem({
"task" : 'regression_mae',
"n_classes" : None,
"feats" : None,
"adj" : adj,
"train_adj" : adj,
"targets" : aug_targets,
"folds" : aug_folds,
}, '../data/pokec/problem.h5')
spadj = make_sparse_adjacency(G, sel=None)
aug_targets = np.vstack([np.zeros((targets.shape[1],), dtype='float64'), targets])
aug_folds = np.hstack([['dummy'], folds])
save_problem({
"task" : 'regression_mae',
"n_classes" : None,
"feats" : None,
"sparse" : True,
"adj" : spadj2edgelist(spadj),
"train_adj" : spadj2edgelist(spadj),
"targets" : aug_targets,
"folds" : aug_folds,
}, '../data/pokec/sparse-problem.h5')
|
paasta_tools/broadcast_log_to_services.py | sobolevn/paasta | 1,711 | 12707586 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from paasta_tools.kubernetes_tools import get_all_kubernetes_services_running_here
from paasta_tools.marathon_tools import marathon_services_running_here
from paasta_tools.mesos_tools import MesosSlaveConnectionError
from paasta_tools.tron_tools import tron_jobs_running_here
from paasta_tools.utils import _log
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_system_paasta_config
def broadcast_log_all_services_running_here(line: str, soa_dir=DEFAULT_SOA_DIR) -> None:
"""Log a line of text to paasta logs of all services running on this host.
:param line: text to log
"""
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
services = get_all_services_running_here(cluster, soa_dir)
for service, instance, _ in services:
_log(
line=line,
service=service,
instance=instance,
component="monitoring",
cluster=cluster,
)
def get_all_services_running_here(cluster, soa_dir):
try:
marathon_services = marathon_services_running_here()
except MesosSlaveConnectionError:
marathon_services = []
try:
tron_services = tron_jobs_running_here()
except MesosSlaveConnectionError:
tron_services = []
try:
kubernetes_services = get_all_kubernetes_services_running_here()
except Exception:
kubernetes_services = []
return marathon_services + tron_services + kubernetes_services
def main() -> None:
broadcast_log_all_services_running_here(sys.stdin.read().strip())
if __name__ == "__main__":
main()
|
android/toolchains/emulator/toolchain.bzl | artem-zinnatullin/rules_android | 140 | 12707641 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the emulator_toolchain rule to allow configuring emulator binaries to use."""
EmulatorInfo = provider(
doc = "Information used to launch a specific version of the emulator.",
fields = {
"emulator": "A label for the emulator launcher executable at stable version.",
"emulator_deps": "Additional files required to launch the stable version of emulator.",
"emulator_head": "A label for the emulator launcher executable at head version.",
"emulator_head_deps": "Additional files required to launch the head version of emulator.",
},
)
def _emulator_toolchain_impl(ctx):
toolchain_info = platform_common.ToolchainInfo(
info = EmulatorInfo(
emulator = ctx.attr.emulator,
emulator_deps = ctx.attr.emulator_deps,
emulator_head = ctx.attr.emulator_head,
emulator_head_deps = ctx.attr.emulator_head_deps,
),
)
return [toolchain_info]
emulator_toolchain = rule(
implementation = _emulator_toolchain_impl,
attrs = {
"emulator": attr.label(
allow_files = True,
cfg = "host",
executable = True,
mandatory = True,
),
"emulator_deps": attr.label_list(
allow_files = True,
cfg = "host",
),
"emulator_head": attr.label(
allow_files = True,
cfg = "host",
executable = True,
),
"emulator_head_deps": attr.label_list(
allow_files = True,
cfg = "host",
),
},
)
|
tests/data/expected/main/main_openapi_http_refs/output.py | adaamz/datamodel-code-generator | 891 | 12707655 | # generated by datamodel-codegen:
# filename: https://example.com/refs.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Optional
from pydantic import AnyUrl, BaseModel, Field, conint
class Problem(BaseModel):
detail: Optional[str] = Field(
None,
description='A human readable explanation specific to this occurrence of the\nproblem. You MUST NOT expose internal informations, personal\ndata or implementation details through this field.\n',
example='Request took too long to complete.',
)
instance: Optional[AnyUrl] = Field(
None,
description='An absolute URI that identifies the specific occurrence of the problem.\nIt may or may not yield further information if dereferenced.\n',
)
status: Optional[conint(ge=100, lt=600)] = Field(
None,
description='The HTTP status code generated by the origin server for this occurrence\nof the problem.\n',
example=503,
)
title: Optional[str] = Field(
None,
description='A short, summary of the problem type. Written in english and readable\nfor engineers (usually not suited for non technical stakeholders and\nnot localized); example: Service Unavailable\n',
)
type: Optional[AnyUrl] = Field(
'about:blank',
description='An absolute URI that identifies the problem type. When dereferenced,\nit SHOULD provide human-readable documentation for the problem type\n(e.g., using HTML).\n',
example='https://tools.ietf.org/html/rfc7231#section-6.6.4',
)
|
slimta/logging/log.py | slimta/python-slimta | 141 | 12707695 | # Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import absolute_import
from slimta.util.pycompat import reprlib
__all__ = ['log_repr', 'logline']
log_repr = reprlib.Repr()
log_repr.maxstring = 100
log_repr.maxother = 100
def logline(log, type, typeid, operation, **data):
if not data:
log('{0}:{1}:{2}'.format(type, typeid, operation))
else:
data_str = ' '.join(['='.join((key, log_repr.repr(val)))
for key, val in sorted(data.items())])
log('{0}:{1}:{2} {3}'.format(type, typeid, operation, data_str))
|
tdc/test/dev_tests/chem_utils_test/test_molconverter.py | Shicheng-Guo/TDC | 577 | 12707696 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
import shutil
# temporary solution for relative imports in case TDC is not installed
# if TDC is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../../')))
class TestMolConvert(unittest.TestCase):
def setUp(self):
print(os.getcwd())
pass
def test_MolConvert(self):
from tdc.chem_utils import MolConvert
converter = MolConvert(src = 'SMILES', dst = 'Graph2D')
converter(['Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC',
'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'])
from tdc.chem_utils import MolConvert
MolConvert.eligible_format()
def tearDown(self):
print(os.getcwd())
if os.path.exists(os.path.join(os.getcwd(), "data")):
shutil.rmtree(os.path.join(os.getcwd(), "data")) |
soundata/jams_utils.py | lucaspbastos/soundata | 177 | 12707699 | """Utilities for converting soundata Annotation classes to jams format.
"""
import logging
import os
from typing import Callable, List
from typing_extensions import ParamSpecKwargs
import jams
import librosa
from soundata import annotations
def jams_converter(
audio_path=None, spectrogram_path=None, metadata=None, tags=None, events=None
):
"""Convert annotations from a clip to JAMS format.
Args:
audio_path (str or None):
A path to the corresponding audio file, or None. If provided,
the audio file will be read to compute the duration. If None,
'duration' must be a field in the metadata dictionary, or the
resulting jam object will not validate.
spectrogram_path (str or None):
A path to the corresponding spectrum file, or None.
tags (annotations.Tags or annotations.MultiAnnotator or None):
An instance of annotations.Tags/annotations.MultiAnnotator describing the audio tags.
events (annotations.Events or annotations.MultiAnnotator or None):
An instance of annotations.Events/annotations.MultiAnnotator describing the sound events.
Returns:
jams.JAMS: A JAMS object containing the annotations.
"""
jam = jams.JAMS()
# duration
duration = None
if audio_path is not None:
if os.path.exists(audio_path):
duration = librosa.get_duration(filename=audio_path)
else:
raise OSError(
"jams conversion failed because the audio file "
+ "for this clip cannot be found, and it is required "
+ "to compute duration."
)
if spectrogram_path is not None:
if audio_path is None:
duration = metadata["duration"]
# metadata
if metadata is not None:
for key in metadata:
if (
key == "duration"
and duration is not None
and metadata[key] != duration
and audio_path is not None
):
logging.warning(
"Duration provided in metadata does not"
+ "match the duration computed from the audio file."
+ "Using the duration provided by the metadata."
)
if metadata[key] is None:
continue
if hasattr(jam.file_metadata, key):
setattr(jam.file_metadata, key, metadata[key])
else:
setattr(jam.sandbox, key, metadata[key])
if jam.file_metadata.duration is None:
jam.file_metadata.duration = duration
# soundata tags
if tags is not None:
if isinstance(tags, annotations.Tags):
jam.annotations.append(
tags_to_jams(tags, duration=jam.file_metadata.duration)
)
elif isinstance(tags, annotations.MultiAnnotator):
jam.annotations.extend(multiannotator_to_jams(tags, tags_to_jams))
else:
raise TypeError(
"tags should be of type annotations.Tags or annotations.MultiAnnotator"
)
# soundata events
if events is not None:
if isinstance(events, annotations.Events):
jam.annotations.append(events_to_jams(events))
elif isinstance(events, annotations.MultiAnnotator):
jam.annotations.extend(multiannotator_to_jams(events, events_to_jams))
else:
raise TypeError(
"events should be of type annotations.Events or annotations.MultiAnnotator"
)
return jam
def multiannotator_to_jams(
multiannot: annotations.MultiAnnotator,
converter: Callable[..., annotations.Annotation],
**kwargs,
) -> List[jams.Annotation]:
"""Convert tags annotations into jams format.
Args:
tags (annotations.MultiAnnotator): MultiAnnotator object
converter (Callable[..., annotations.Annotation]): a function that takes an annotation object, its annotator, (and other optional arguments), and return a jams annotation object
Returns:
List[jams.Annotation]: List of jams annotation objects.
"""
jams_annot = []
for annotator, annotation in zip(multiannot.annotators, multiannot.annotations):
jams_annot.append(converter(annotation, annotator=annotator, **kwargs))
return jams_annot
def tags_to_jams(
tags, annotator=None, duration=0, namespace="tag_open", description=None
):
"""Convert tags annotations into jams format.
Args:
tags (annotations.Tags): tags annotation object
annotator (str): annotator id
namespace (str): the jams-compatible tag namespace
description (str): annotation description
Returns:
jams.Annotation: jams annotation object.
"""
ann = jams.Annotation(namespace=namespace)
ann.annotation_metadata = jams.AnnotationMetadata(
data_source="soundata",
annotator={"id": annotator} if annotator is not None else None,
)
for t, c in zip(tags.labels, tags.confidence):
ann.append(time=0.0, duration=duration, value=t, confidence=c)
if description is not None:
ann.sandbox = jams.Sandbox(name=description)
return ann
def events_to_jams(events, annotator=None, description=None):
"""Convert events annotations into jams format.
Args:
events (annotations.Events): events data object
annotator (str): annotator id
description (str): annotation description
Returns:
jams.Annotation: jams annotation object.
"""
jannot_events = jams.Annotation(namespace="segment_open")
jannot_events.annotation_metadata = jams.AnnotationMetadata(
data_source="soundata",
annotator={"id": annotator} if annotator is not None else None,
)
for inter, label, conf in zip(events.intervals, events.labels, events.confidence):
jannot_events.append(
time=inter[0], duration=inter[1] - inter[0], value=label, confidence=conf
)
if description is not None:
jannot_events.sandbox = jams.Sandbox(name=description)
return jannot_events
|
Python3/940.py | rakhi2001/ecom7 | 854 | 12707704 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 36 ms submission
from collections import defaultdict
class Solution:
def distinctSubseqII(self, S: str) -> int:
total = 1
dic = defaultdict(int)
m = 10 ** 9 + 7
for c in S:
t = total
total += total - dic[c]
total %= m
dic[c] = t
return (total - 1) % m
__________________________________________________________________________________________________
sample 13096 kb submission
class Solution:
def distinctSubseqII(self, S):
N = len(S)
dp, seen = [1] + [0] * N, dict()
for i, c in enumerate(S, start = 1):
dp[i] = (dp[i-1] * 2) % (10**9 + 7)
if c in seen:
dp[i] -= dp[seen[c] - 1]
seen[c] = i
return (dp[N] - 1) % (10**9 + 7)
__________________________________________________________________________________________________
|
tests/test_observable/test_debounce.py | mmpio/RxPY | 4,342 | 12707705 | <gh_stars>1000+
import unittest
from rx import empty, never, throw, operators as _
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestDebounce(unittest.TestCase):
def test_debounce_timespan_allpass(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(
300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_completed(550))
def create():
return xs.pipe(_.debounce(40))
results = scheduler.start(create)
assert results.messages == [on_next(290, 3), on_next(340, 4), on_next(
390, 5), on_next(440, 6), on_next(490, 7), on_next(540, 8), on_completed(550)]
def test_debounce_timespan_allpass_error_end(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(
300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_error(550, ex))
def create():
return xs.pipe(_.debounce(40))
results = scheduler.start(create)
assert results.messages == [on_next(290, 3), on_next(340, 4), on_next(
390, 5), on_next(440, 6), on_next(490, 7), on_next(540, 8), on_error(550, ex)]
def test_debounce_timespan_alldrop(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(
300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_completed(550))
def create():
return xs.pipe(_.debounce(60))
results = scheduler.start(create)
assert results.messages == [on_next(550, 8), on_completed(550)]
def test_debounce_timespan_alldrop_error_end(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(200, 2), on_next(250, 3), on_next(
300, 4), on_next(350, 5), on_next(400, 6), on_next(450, 7), on_next(500, 8), on_error(550, ex))
def create():
return xs.pipe(_.debounce(60))
results = scheduler.start(create)
assert results.messages == [on_error(550, ex)]
def test_debounce_timespan_some_drop(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(250, 2), on_next(350, 3), on_next(
370, 4), on_next(421, 5), on_next(480, 6), on_next(490, 7), on_next(500, 8), on_completed(600))
def create():
return xs.pipe(_.debounce(50))
results = scheduler.start(create)
assert results.messages == [on_next(300, 2), on_next(
420, 4), on_next(471, 5), on_next(550, 8), on_completed(600)]
def test_debounce_empty(self):
scheduler = TestScheduler()
def create():
return empty().pipe(_.debounce(10))
results = scheduler.start(create)
assert results.messages == [on_completed(200)]
def test_debounce_error(self):
ex = 'ex'
scheduler = TestScheduler()
def create():
return throw(ex).pipe(_.debounce(10))
results = scheduler.start(create)
assert results.messages == [on_error(200, ex)]
def test_debounce_never(self):
scheduler = TestScheduler()
def create():
return never().pipe(_.debounce(10))
results = scheduler.start(create)
assert results.messages == []
def test_debounce_duration_delay_behavior(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, -1),
on_next(250, 0),
on_next(280, 1),
on_next(310, 2),
on_next(350, 3),
on_next(400, 4),
on_completed(550))
ys = [scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(
on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99))]
def create():
def mapper(x):
return ys[x]
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 20, 0), on_next(280 + 20, 1), on_next(310 + 20, 2),
on_next(350 + 20, 3), on_next(400 + 20, 4), on_completed(550)]
assert xs.subscriptions == [subscribe(200, 550)]
assert ys[0].subscriptions == [subscribe(250, 250 + 20)]
assert ys[1].subscriptions == [subscribe(280, 280 + 20)]
assert ys[2].subscriptions == [subscribe(310, 310 + 20)]
assert ys[3].subscriptions == [subscribe(350, 350 + 20)]
assert ys[4].subscriptions == [subscribe(400, 400 + 20)]
def test_debounce_duration_throttle_behavior(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, -1), on_next(250, 0), on_next(280, 1),
on_next(310, 2), on_next(350, 3), on_next(400, 4), on_completed(550))
ys = [scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(40, 42), on_next(45, 99)), scheduler.create_cold_observable(
on_next(20, 42), on_next(25, 99)), scheduler.create_cold_observable(on_next(60, 42), on_next(65, 99)), scheduler.create_cold_observable(on_next(20, 42), on_next(25, 99))]
def create():
def mapper(x):
return ys[x]
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 20, 0), on_next(310 + 20, 2), on_next(400 + 20, 4), on_completed(550)]
assert xs.subscriptions == [subscribe(200, 550)]
assert ys[0].subscriptions == [subscribe(250, 250 + 20)]
assert ys[1].subscriptions == [subscribe(280, 310)]
assert ys[2].subscriptions == [subscribe(310, 310 + 20)]
assert ys[3].subscriptions == [subscribe(350, 400)]
assert ys[4].subscriptions == [subscribe(400, 400 + 20)]
def test_debounce_duration_early_completion(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, -1),
on_next(250, 0),
on_next(280, 1),
on_next(310, 2),
on_next(350, 3),
on_next(400, 4),
on_completed(410))
ys = [
scheduler.create_cold_observable(
on_next(20, 42),
on_next(25, 99)),
scheduler.create_cold_observable(
on_next(40, 42),
on_next(45, 99)),
scheduler.create_cold_observable(
on_next(20, 42),
on_next(25, 99)),
scheduler.create_cold_observable(
on_next(60, 42),
on_next(65, 99)),
scheduler.create_cold_observable(
on_next(20, 42),
on_next(25, 99))
]
def create():
def mapper(x):
return ys[x]
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 20, 0), on_next(310 + 20, 2), on_next(410, 4), on_completed(410)]
assert xs.subscriptions == [subscribe(200, 410)]
assert ys[0].subscriptions == [subscribe(250, 250 + 20)]
assert ys[1].subscriptions == [subscribe(280, 310)]
assert ys[2].subscriptions == [subscribe(310, 310 + 20)]
assert ys[3].subscriptions == [subscribe(350, 400)]
assert ys[4].subscriptions == [subscribe(400, 410)]
def test_debounce_duration_inner_error(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(
250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
ex = 'ex'
def create():
def mapper(x):
if x < 4:
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
else:
return scheduler.create_cold_observable(on_error(x * 10, ex))
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(450 + 4 * 10, ex)]
assert xs.subscriptions == [subscribe(200, 490)]
def test_debounce_duration_outer_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(
250, 2), on_next(350, 3), on_next(450, 4), on_error(460, ex))
def create():
def mapper(x):
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(460, ex)]
assert xs.subscriptions == [subscribe(200, 460)]
def test_debounce_duration_mapper_throws(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(
250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
def mapper(x):
if x < 4:
return scheduler.create_cold_observable(on_next(x * 10, "Ignore"), on_next(x * 10 + 5, "Aargh!"))
else:
_raise(ex)
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 2 * 10, 2), on_next(350 + 3 * 10, 3), on_error(450, ex)]
assert xs.subscriptions == [subscribe(200, 450)]
def test_debounce_duration_inner_done_delay_behavior(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(
250, 2), on_next(350, 3), on_next(450, 4), on_completed(550))
def create():
def mapper(x):
return scheduler.create_cold_observable(on_completed(x * 10))
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [
on_next(250 + 2 * 10, 2),
on_next(350 + 3 * 10, 3),
on_next(450 + 4 * 10, 4),
on_completed(550)]
assert xs.subscriptions == [subscribe(200, 550)]
def test_debounce_duration_inner_done_throttle_behavior(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_next(250, 2),
on_next(280, 3),
on_next(300, 4),
on_next(400, 5),
on_next(410, 6),
on_completed(550))
def create():
def mapper(x):
return scheduler.create_cold_observable(on_completed(x * 10))
return xs.pipe(_.throttle_with_mapper(mapper))
results = scheduler.start(create)
assert results.messages == [on_next(250 + 2 * 10, 2), on_next(300 + 4 * 10, 4),
on_next(410 + 6 * 10, 6), on_completed(550)]
assert xs.subscriptions == [subscribe(200, 550)]
|
curlylint/rules/meta_viewport/meta_viewport.py | adamchainz/curlylint | 155 | 12707709 | from curlylint import ast
from curlylint.check_node import CheckNode, build_tree
from curlylint.issue import Issue
META_VIEWPORT = "meta_viewport"
RULE = {
"id": "meta_viewport",
"type": "accessibility",
"docs": {
"description": "The `viewport` meta tag should not use `user-scalable=no`, and `maximum-scale` should be 2 or above, so end users can zoom",
"url": "https://www.curlylint.org/docs/rules/meta_viewport",
"impact": "Critical",
"tags": ["cat.language", "wcag2aa", "wcag144"],
"resources": [
"[Understanding WCAG SC 1.4.4 Resize Text](http://www.w3.org/TR/UNDERSTANDING-WCAG20/visual-audio-contrast-scale.html)",
"[axe-core, meta-viewport](https://dequeuniversity.com/rules/axe/3.5/meta-viewport)",
],
},
"schema": {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"oneOf": [
{
"const": True,
"title": "`user-scalable=no` must not be used, and `maximum-scale` should be 2 or above.",
"examples": [True],
},
],
},
}
def find_valid(node, file):
name = getattr(node.value, "name", None)
is_meta = (
isinstance(node.value, ast.Element) and name and name.lower() == "meta"
)
if is_meta:
attributes = []
if getattr(node.value, "opening_tag", None):
attributes = {}
for n in node.value.opening_tag.attributes.nodes:
attributes[str(n.name)] = str(n.value).strip("\"'")
if "name" in attributes and attributes["name"] == "viewport":
if "user-scalable=no" in attributes["content"]:
return [
Issue.from_node(
file,
node,
"Remove `user-scalable=no` from the viewport meta so users can zoom",
"meta_viewport",
)
]
if (
"maximum-scale=1" in attributes["content"]
or "maximum-scale=0" in attributes["content"]
):
return [
Issue.from_node(
file,
node,
"`maximum-scale` should not be less than 2",
"meta_viewport",
)
]
if not node.children:
return []
return sum((find_valid(child, file) for child in node.children), [])
def meta_viewport(file, config):
root = CheckNode(None)
build_tree(root, file.tree)
src = file.source.lower()
if r"user-scalable" in src or r"maximum-scale" in src:
return find_valid(root, file)
return []
|
functest/func_materials.py | Luvideria/lightmetrica-v3 | 101 | 12707728 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Materials
#
# This test showcases rendering with various materials provided by Lightmetrica. We render the images using ``renderer::pt``.
# %load_ext autoreload
# %autoreload 2
import lmenv
env = lmenv.load('.lmenv')
import os
import pickle
import json
import numpy as np
import matplotlib.pyplot as plt
import lightmetrica as lm
# %load_ext lightmetrica_jupyter
import lmscene
lm.init()
lm.log.init('jupyter')
lm.progress.init('jupyter')
lm.info()
lm.comp.load_plugin(os.path.join(env.bin_path, 'accel_embree'))
if not lm.Release:
lm.parallel.init('openmp', num_threads=1)
lm.debug.attach_to_debugger()
# +
def render(scene, name, **kwargs):
w = 854
h = 480
film = lm.load_film('film', 'bitmap', w=w, h=h)
renderer = lm.load_renderer('renderer', name,
scene=scene,
output=film,
max_verts=20,
scheduler='time',
render_time=30,
**kwargs)
renderer.render()
return np.copy(film.buffer())
def display_image(img, fig_size=15, scale=1):
f = plt.figure(figsize=(fig_size,fig_size))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img*scale,1/2.2),0,1), origin='lower')
ax.axis('off')
plt.show()
# -
# ## Scene setup
# Create scene
accel = lm.load_accel('accel', 'embree')
scene = lm.load_scene('scene', 'default', accel=accel)
mat = lm.load_material('mat_ut', 'diffuse', Kd=[1,1,1])
lmscene.bunny_with_area_light(scene, env.scene_path, mat_knob=mat)
scene.build()
# ## Rendering
# ### Diffse material
#
# `material::diffuse`
lm.load_material('mat_ut', 'diffuse', Kd=[.8,.2,.2])
img = render(scene, 'pt')
display_image(img)
# ### Glossy material
#
# `material::glossy`
lm.load_material('mat_ut', 'glossy', Ks=[.8,.2,.2], ax=0.2, ay=0.2)
img = render(scene, 'pt')
display_image(img)
# ### Perfect specular reflection
#
# `material::mirror`
lm.load_material('mat_ut', 'mirror')
img = render(scene, 'pt')
display_image(img)
# ### Fresnel reflection / refraction
#
# `material::fresnel`
lm.load_material('mat_ut', 'glass', Ni=1.5)
img = render(scene, 'pt')
display_image(img)
# ### Mixture material with constant weights using RR
#
# `material::constant_weight_mixture_rr`
mat_diffuse = lm.load_material('mat_diffuse', 'diffuse', Kd=[.1,.8,.1])
mat_glossy = lm.load_material('mat_glossy', 'glossy', Ks=[.8,.1,.1], ax=0.2, ay=0.2)
mat_mirror = lm.load_material('mat_mirror', 'mirror')
mat = lm.load_material('mat_ut', 'constant_weight_mixture_rr', [
{'material': mat_diffuse.loc(), 'weight': 0.2},
{'material': mat_glossy.loc(), 'weight': 0.4},
{'material': mat_mirror.loc(), 'weight': 0.4}
])
img = render(scene, 'pt')
display_image(img)
# ### Mixture material with constant weights using marginalization
#
# `material::constant_weight_mixture_marginalized`
mat = lm.load_material('mat_ut', 'constant_weight_mixture_marginalized', [
{'material': mat_diffuse.loc(), 'weight': 0.2},
{'material': mat_glossy.loc(), 'weight': 0.4},
{'material': mat_mirror.loc(), 'weight': 0.4}
])
img = render(scene, 'pt')
display_image(img)
# ### Mixture material with alpha texture
#
# `material::mixture_wavefrontobj`
#
# This material is the default material converted from MTL format of Wavefront OBJ.
tex = lm.load_texture('tex', 'bitmap',
path=os.path.join(env.scene_path, 'fireplace_room', 'textures', 'leaf.png'))
lm.load_material('mat_ut', 'mixture_wavefrontobj',
Kd=[.8,.8,.8],
mapKd=tex,
Ks=[0,0,0],
ax=0.2,
ay=0.2,
no_alpha_mask=False)
img = render(scene, 'pt')
display_image(img)
|
spyder/plugins/completion/providers/languageserver/widgets/messagebox.py | Earthman100/spyder | 7,956 | 12707753 | <reponame>Earthman100/spyder
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Language Server Protocol message boxes."""
# Standard library imports
import os
# Third party imports
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QMessageBox
# Local imports
from spyder.config.base import _
from spyder.widgets.helperwidgets import MessageCheckBox
class ServerDisabledMessageBox(MessageCheckBox):
sig_restart_spyder = Signal()
def __init__(self, parent, warn_str, set_conf):
super().__init__(icon=QMessageBox.Warning, parent=parent)
self.set_conf = set_conf
self.setWindowTitle(_("Warning"))
self.set_checkbox_text(_("Don't show again"))
self.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
self.setDefaultButton(QMessageBox.No)
self.set_checked(False)
self.set_check_visible(True)
self.setText(warn_str)
def exec_(self):
answer = super().exec_()
self.set_conf('show_lsp_down_warning', not self.is_checked())
if answer == QMessageBox.Yes:
self.sig_restart_spyder.emit()
@classmethod
def instance(cls, warn_str, set_conf):
def wrapper(parent):
return cls(parent, warn_str, set_conf)
return wrapper
|
esphome/components/lock/__init__.py | OttoWinter/esphomeyaml | 249 | 12707761 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.automation import Condition, maybe_simple_id
from esphome.components import mqtt
from esphome.const import (
CONF_ID,
CONF_ON_LOCK,
CONF_ON_UNLOCK,
CONF_TRIGGER_ID,
CONF_MQTT_ID,
)
from esphome.core import CORE, coroutine_with_priority
from esphome.cpp_helpers import setup_entity
CODEOWNERS = ["@esphome/core"]
IS_PLATFORM_COMPONENT = True
lock_ns = cg.esphome_ns.namespace("lock")
Lock = lock_ns.class_("Lock", cg.EntityBase)
LockPtr = Lock.operator("ptr")
LockCall = lock_ns.class_("LockCall")
UnlockAction = lock_ns.class_("UnlockAction", automation.Action)
LockAction = lock_ns.class_("LockAction", automation.Action)
OpenAction = lock_ns.class_("OpenAction", automation.Action)
LockPublishAction = lock_ns.class_("LockPublishAction", automation.Action)
LockCondition = lock_ns.class_("LockCondition", Condition)
LockLockTrigger = lock_ns.class_("LockLockTrigger", automation.Trigger.template())
LockUnlockTrigger = lock_ns.class_("LockUnlockTrigger", automation.Trigger.template())
LOCK_SCHEMA = cv.ENTITY_BASE_SCHEMA.extend(cv.MQTT_COMMAND_COMPONENT_SCHEMA).extend(
{
cv.OnlyWith(CONF_MQTT_ID, "mqtt"): cv.declare_id(mqtt.MQTTLockComponent),
cv.Optional(CONF_ON_LOCK): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(LockLockTrigger),
}
),
cv.Optional(CONF_ON_UNLOCK): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(LockUnlockTrigger),
}
),
}
)
async def setup_lock_core_(var, config):
await setup_entity(var, config)
for conf in config.get(CONF_ON_LOCK, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
for conf in config.get(CONF_ON_UNLOCK, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
if CONF_MQTT_ID in config:
mqtt_ = cg.new_Pvariable(config[CONF_MQTT_ID], var)
await mqtt.register_mqtt_component(mqtt_, config)
async def register_lock(var, config):
if not CORE.has_id(config[CONF_ID]):
var = cg.Pvariable(config[CONF_ID], var)
cg.add(cg.App.register_lock(var))
await setup_lock_core_(var, config)
LOCK_ACTION_SCHEMA = maybe_simple_id(
{
cv.Required(CONF_ID): cv.use_id(Lock),
}
)
@automation.register_action("lock.unlock", UnlockAction, LOCK_ACTION_SCHEMA)
@automation.register_action("lock.lock", LockAction, LOCK_ACTION_SCHEMA)
@automation.register_action("lock.open", OpenAction, LOCK_ACTION_SCHEMA)
async def lock_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(action_id, template_arg, paren)
@automation.register_condition("lock.is_locked", LockCondition, LOCK_ACTION_SCHEMA)
async def lock_is_on_to_code(config, condition_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(condition_id, template_arg, paren, True)
@automation.register_condition("lock.is_unlocked", LockCondition, LOCK_ACTION_SCHEMA)
async def lock_is_off_to_code(config, condition_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(condition_id, template_arg, paren, False)
@coroutine_with_priority(100.0)
async def to_code(config):
cg.add_global(lock_ns.using)
cg.add_define("USE_LOCK")
|
aiomcache/pool.py | lexdene/aiomcache | 121 | 12707784 | <filename>aiomcache/pool.py
import asyncio
from collections import namedtuple
__all__ = ['MemcachePool']
_connection = namedtuple('connection', ['reader', 'writer'])
class MemcachePool:
def __init__(self, host, port, *, minsize, maxsize, loop=None):
loop = loop if loop is not None else asyncio.get_event_loop()
self._host = host
self._port = port
self._minsize = minsize
self._maxsize = maxsize
self._loop = loop
self._pool = asyncio.Queue(loop=loop)
self._in_use = set()
@asyncio.coroutine
def clear(self):
"""Clear pool connections."""
while not self._pool.empty():
conn = yield from self._pool.get()
self._do_close(conn)
def _do_close(self, conn):
conn.reader.feed_eof()
conn.writer.close()
@asyncio.coroutine
def acquire(self):
"""Acquire connection from the pool, or spawn new one
if pool maxsize permits.
:return: ``tuple`` (reader, writer)
"""
while self.size() == 0 or self.size() < self._minsize:
_conn = yield from self._create_new_conn()
if _conn is None:
break
self._pool.put_nowait(_conn)
conn = None
while not conn:
_conn = yield from self._pool.get()
if _conn.reader.at_eof() or _conn.reader.exception():
self._do_close(_conn)
conn = yield from self._create_new_conn()
else:
conn = _conn
self._in_use.add(conn)
return conn
def release(self, conn):
"""Releases connection back to the pool.
:param conn: ``namedtuple`` (reader, writer)
"""
self._in_use.remove(conn)
if conn.reader.at_eof() or conn.reader.exception():
self._do_close(conn)
else:
self._pool.put_nowait(conn)
@asyncio.coroutine
def _create_new_conn(self):
if self.size() < self._maxsize:
reader, writer = yield from asyncio.open_connection(
self._host, self._port, loop=self._loop)
if self.size() < self._maxsize:
return _connection(reader, writer)
else:
reader.feed_eof()
writer.close()
return None
else:
return None
def size(self):
return self._pool.qsize() + len(self._in_use)
|
Tests/cu2qu/cli_test.py | odidev/fonttools | 2,705 | 12707790 | <reponame>odidev/fonttools
import os
import pytest
import py
ufoLib2 = pytest.importorskip("ufoLib2")
from fontTools.cu2qu.ufo import CURVE_TYPE_LIB_KEY
from fontTools.cu2qu.cli import main
DATADIR = os.path.join(os.path.dirname(__file__), 'data')
TEST_UFOS = [
py.path.local(DATADIR).join("RobotoSubset-Regular.ufo"),
py.path.local(DATADIR).join("RobotoSubset-Bold.ufo"),
]
@pytest.fixture
def test_paths(tmpdir):
result = []
for path in TEST_UFOS:
new_path = tmpdir / path.basename
path.copy(new_path)
result.append(new_path)
return result
class MainTest(object):
@staticmethod
def run_main(*args):
main([str(p) for p in args if p])
def test_single_input_no_output(self, test_paths):
ufo_path = test_paths[0]
self.run_main(ufo_path)
font = ufoLib2.Font.open(ufo_path)
assert font.lib[CURVE_TYPE_LIB_KEY] == "quadratic"
def test_single_input_output_file(self, tmpdir):
input_path = TEST_UFOS[0]
output_path = tmpdir / input_path.basename
self.run_main('-o', output_path, input_path)
assert output_path.check(dir=1)
def test_multiple_inputs_output_dir(self, tmpdir):
output_dir = tmpdir / "output_dir"
self.run_main('-d', output_dir, *TEST_UFOS)
assert output_dir.check(dir=1)
outputs = set(p.basename for p in output_dir.listdir())
assert "RobotoSubset-Regular.ufo" in outputs
assert "RobotoSubset-Bold.ufo" in outputs
def test_interpolatable_inplace(self, test_paths):
self.run_main('-i', *test_paths)
self.run_main('-i', *test_paths) # idempotent
@pytest.mark.parametrize(
"mode", ["", "-i"], ids=["normal", "interpolatable"])
def test_copytree(self, mode, tmpdir):
output_dir = tmpdir / "output_dir"
self.run_main(mode, '-d', output_dir, *TEST_UFOS)
output_dir_2 = tmpdir / "output_dir_2"
# no conversion when curves are already quadratic, just copy
self.run_main(mode, '-d', output_dir_2, *output_dir.listdir())
# running again overwrites existing with the copy
self.run_main(mode, '-d', output_dir_2, *output_dir.listdir())
def test_multiprocessing(self, tmpdir, test_paths):
self.run_main(*(test_paths + ["-j"]))
def test_keep_direction(self, test_paths):
self.run_main('--keep-direction', *test_paths)
def test_conversion_error(self, test_paths):
self.run_main('--conversion-error', 0.002, *test_paths)
def test_conversion_error_short(self, test_paths):
self.run_main('-e', 0.003, test_paths[0])
|
matchzoo/utils/__init__.py | ChrisRBXiong/MatchZoo-py | 468 | 12707792 | from .one_hot import one_hot
from .tensor_type import TensorType
from .list_recursive_subclasses import list_recursive_concrete_subclasses
from .parse import parse_loss, parse_activation, parse_metric, parse_optimizer
from .average_meter import AverageMeter
from .timer import Timer
from .early_stopping import EarlyStopping
from .get_file import get_file, _hash_file
|
transistor/workers/workgroup.py | awesome-archive/transistor | 232 | 12707800 | <reponame>awesome-archive/transistor
# -*- coding: utf-8 -*-
"""
transistor.workers.workgroup
~~~~~~~~~~~~
This module implements WorkGroup.
See transistor.workers.__init__ for more notes on this module.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
from typing import NamedTuple, Type, Union, List, Optional
from transistor.workers.basegroup import BaseGroup
from transistor.persistence.loader import ItemLoader
from transistor.persistence.exporters.base import BaseItemExporter
from transistor.persistence.containers import Item
from transistor.scrapers.splash_scraper_abc import SplashScraper
from transistor.workers.baseworker import BaseWorker
from transistor.schedulers.books.bookstate import StatefulBook
class WorkGroup(NamedTuple):
"""
A container class to use when starting up a WorkGroupManager. Intended use
is, like below:
>>> groups = [
>>> WorkGroup(class_=MouseKeyGroup, workers=2, name='mousekey.cn',
>>> kwargs={'china':True, 'timeout': (3.0, 3.0)}),
>>>
>>> WorkGroup(class_=MouseKeyGroup, workers=2, name='mousekey.com',
>>> kwargs={'timeout':(3.0, 3.0)})
>>> ]
>>> manager = WorkGroupManager('part_number_job_1', book, groups=groups, pool=5)
:param name: name the group
:param url: the starting url for the group of Workers
:param spider: the custom defined Spider, i.e. subclass of SplashScraper
:param worker: the BaseWorker class or else a subclass of it
:param group: the <WorkerGroup> class object
:param items: a subclass of SplashItems, or some subclass of Item
:param loader: the ItemLoader class or else a subclass of it
:param exporter: the BaseItemExporter class or else a subclass of it
:param kwargs: to use for each <Worker> instance in the group
"""
name: str
url : str
# tasks: Optional[Type[Union[Type[StatefulBook], dict]]]
spider: Type[SplashScraper]
worker: Type[BaseWorker] = BaseWorker
group: Type[BaseGroup] = BaseGroup
items: Type[Item] = Item
loader: Type[ItemLoader] = ItemLoader
exporters: List[Type[Union[Type[BaseItemExporter]]]] = BaseItemExporter
workers: int = 1
kwargs: dict = {}
|
tests/__init__.py | phillips96/similarity | 706 | 12707872 | from pathlib import Path
CURRRENT_DIR = Path(__file__).parent.absolute()
DATA_DIR = CURRRENT_DIR / 'data/'
|
BiBloSA/exp_SST/src/model/template.py | mikimaus78/ml_monorepo | 319 | 12707922 | <gh_stars>100-1000
from configs import cfg
from src.utils.record_log import _logger
import tensorflow as tf
import numpy as np
from abc import ABCMeta, abstractmethod
class ModelTemplate(metaclass=ABCMeta):
def __init__(self, token_emb_mat, glove_emb_mat, tds, cds, tl, scope):
self.scope = scope
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
self.token_emb_mat, self.glove_emb_mat = token_emb_mat, glove_emb_mat
# ---- place holder -----
self.token_seq = tf.placeholder(tf.int32, [None, None], name='token_seq')
self.char_seq = tf.placeholder(tf.int32, [None, None, tl], name='context_char')
self.op_list = tf.placeholder(tf.int32, [None, None], name='op_lists') # bs,sol
self.reduce_mat = tf.placeholder(tf.int32, [None, None, None], name='reduce_mats') # [bs,sol,mc]
self.sentiment_label = tf.placeholder(tf.int32, [None], name='sentiment_label') # bs
self.is_train = tf.placeholder(tf.bool, [], name='is_train')
# ----------- parameters -------------
self.tds, self.cds = tds, cds
self.tl = tl
self.tel = cfg.word_embedding_length
self.cel = cfg.char_embedding_length
self.cos = cfg.char_out_size
self.ocd = list(map(int, cfg.out_channel_dims.split(',')))
self.fh = list(map(int, cfg.filter_heights.split(',')))
self.hn = cfg.hidden_units_num
self.finetune_emb = cfg.fine_tune
self.output_class = 5 if cfg.fine_grained else 2
self.bs = tf.shape(self.token_seq)[0]
self.sl = tf.shape(self.token_seq)[1]
self.ol = tf.shape(self.op_list)[1]
self.mc = tf.shape(self.reduce_mat)[2]
# ------------ other ---------
self.token_mask = tf.cast(self.token_seq, tf.bool)
self.char_mask = tf.cast(self.char_seq, tf.bool)
self.token_len = tf.reduce_sum(tf.cast(self.token_mask, tf.int32), -1)
self.char_len = tf.reduce_sum(tf.cast(self.char_mask, tf.int32), -1)
self.stack_mask = tf.not_equal(self.op_list, tf.zeros_like(self.op_list))
self.tensor_dict = {}
# ------ start ------
self.logits = None
self.loss = None
self.accuracy = None
self.var_ema = None
self.ema = None
self.summary = None
self.opt = None
self.train_op = None
@abstractmethod
def build_network(self):
pass
def build_loss(self):
# weight_decay
with tf.name_scope("weight_decay"):
for var in set(tf.get_collection('reg_vars', self.scope)):
weight_decay = tf.multiply(tf.nn.l2_loss(var), cfg.wd,
name="{}-wd".format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
reg_vars = tf.get_collection('losses', self.scope)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
_logger.add('regularization var num: %d' % len(reg_vars))
_logger.add('trainable var num: %d' % len(trainable_vars))
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.sentiment_label,
logits=self.logits
)
tf.add_to_collection('losses', tf.reduce_mean(losses, name='xentropy_loss_mean'))
loss = tf.add_n(tf.get_collection('losses', self.scope), name='loss')
tf.summary.scalar(loss.op.name, loss)
tf.add_to_collection('ema/scalar', loss)
return loss
def build_accuracy(self):
correct = tf.equal(
tf.cast(tf.argmax(self.logits, -1), tf.int32),
self.sentiment_label
) # [bs]
return tf.cast(correct, tf.float32)
def update_tensor_add_ema_and_opt(self):
self.logits = self.build_network()
self.loss = self.build_loss()
self.accuracy = self.build_accuracy()
# ------------ema-------------
if True:
self.var_ema = tf.train.ExponentialMovingAverage(cfg.var_decay)
self.build_var_ema()
if cfg.mode == 'train':
self.ema = tf.train.ExponentialMovingAverage(cfg.decay)
self.build_ema()
self.summary = tf.summary.merge_all()
# ---------- optimization ---------
if cfg.optimizer.lower() == 'adadelta':
assert cfg.learning_rate > 0.1 and cfg.learning_rate < 1.
self.opt = tf.train.AdadeltaOptimizer(cfg.learning_rate)
elif cfg.optimizer.lower() == 'adam':
assert cfg.learning_rate < 0.1
self.opt = tf.train.AdamOptimizer(cfg.learning_rate)
elif cfg.optimizer.lower() == 'rmsprop':
assert cfg.learning_rate < 0.1
self.opt = tf.train.RMSPropOptimizer(cfg.learning_rate)
else:
raise AttributeError('no optimizer named as \'%s\'' % cfg.optimizer)
self.train_op = self.opt.minimize(self.loss, self.global_step,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope))
def build_var_ema(self):
ema_op = self.var_ema.apply(tf.trainable_variables(),)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def build_ema(self):
tensors = tf.get_collection("ema/scalar", scope=self.scope) + \
tf.get_collection("ema/vector", scope=self.scope)
ema_op = self.ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = self.ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = self.ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_feed_dict(self, sample_batch, data_type='train'):
# max lens
sl, ol, mc = 0, 0, 0
for sample in sample_batch:
sl = max(sl, len(sample['root_node']['token_seq']))
ol = max(ol, len(sample['shift_reduce_info']['op_list']))
for reduce_list in sample['shift_reduce_info']['reduce_mat']:
mc = max(mc, len(reduce_list))
assert mc == 0 or mc == 2, mc
# token and char
token_seq_b = []
char_seq_b = []
for sample in sample_batch:
token_seq = np.zeros([sl], cfg.intX)
char_seq = np.zeros([sl, self.tl], cfg.intX)
for idx_t,(token, char_seq_v) in enumerate(zip(sample['root_node']['token_seq_digital'],
sample['root_node']['char_seq_digital'])):
token_seq[idx_t] = token
for idx_c, char in enumerate(char_seq_v):
if idx_c >= self.tl: break
char_seq[idx_t, idx_c] = char
token_seq_b.append(token_seq)
char_seq_b.append(char_seq)
token_seq_b = np.stack(token_seq_b)
char_seq_b = np.stack(char_seq_b)
# tree
op_list_b = []
reduce_mat_b = []
for sample in sample_batch:
op_list = np.zeros([ol], cfg.intX)
reduce_mat = np.zeros([ol, mc], cfg.intX)
for idx_o, (op, reduce_list) in enumerate(zip(sample['shift_reduce_info']['op_list'],
sample['shift_reduce_info']['reduce_mat'])):
op_list[idx_o] = op
for idx_m, red in enumerate(reduce_list):
reduce_mat[idx_o, idx_m] = red
op_list_b.append(op_list)
reduce_mat_b.append(reduce_mat)
op_list_b = np.stack(op_list_b)
reduce_mat_b = np.stack(reduce_mat_b)
# label
sentiment_label_b = []
for sample in sample_batch:
sentiment_float = sample['root_node']['sentiment_label']
sentiment_int = cfg.sentiment_float_to_int(sentiment_float)
sentiment_label_b.append(sentiment_int)
sentiment_label_b = np.stack(sentiment_label_b).astype(cfg.intX)
feed_dict = {self.token_seq: token_seq_b, self.char_seq: char_seq_b,
self.op_list: op_list_b, self.reduce_mat: reduce_mat_b,
self.sentiment_label: sentiment_label_b,
self.is_train: True if data_type == 'train' else False}
return feed_dict
def step(self, sess, batch_samples, get_summary=False):
assert isinstance(sess, tf.Session)
feed_dict = self.get_feed_dict(batch_samples, 'train')
cfg.time_counter.add_start()
if get_summary:
loss, summary, train_op = sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
cfg.time_counter.add_stop()
return loss, summary, train_op |
mmrotate/core/evaluation/__init__.py | Justice-Eternal/mmrotate | 449 | 12707926 | # Copyright (c) OpenMMLab. All rights reserved.
from .eval_map import eval_rbbox_map
__all__ = ['eval_rbbox_map']
|
Design/146. LRU Cache.py | beckswu/Leetcode | 138 | 12707927 | <reponame>beckswu/Leetcode<gh_stars>100-1000
#python OrderedDict
class LRUCache:
def __init__(self, capacity: int):
self.cache = collections.OrderedDict()
self.capacity = capacity
def get(self, key: int) -> int:
if key not in self.cache:
return -1
val = self.cache.pop[key]
self.cache[key] = val
return val
def put(self, key: int, value: int) -> None:
if key in self.cache:
del self.cache[key]
elif len(self.cache) == self.capacity:
self.cache.popitem(last = False)
self.cache[key] = value
class LRUCache:
def __init__(self, MSize):
self.size = MSize
self.cache = {}
self.next, self.before = {}, {}
self.head, self.tail = '#', '$'
self.connect(self.head, self.tail)
def connect(self, a, b):
self.next[a], self.before[b] = b, a
def delete(self, key):
self.connect(self.before[key], self.next[key])
del self.before[key], self.next[key], self.cache[key]
def append(self, k, v):
self.cache[k] = v
self.connect(self.before[self.tail], k)
self.connect(k, self.tail)
if len(self.cache) > self.size:
self.delete(self.next[self.head])
def get(self, key):
if key not in self.cache: return -1
val = self.cache[key]
self.delete(key)
self.append(key, val)
return val
def put(self, key, value):
if key in self.cache: self.delete(key)
self.append(key, value)
#Push in tail, delete from head
class ListNode:
def __init__(self,key, val):
self.key = key
self.val = val
self.next = None
self.prev = None
class LinkedList:
def __init__(self,):
self.head = None
self.tail = None
def insert(self, node):
node.next, node.prev = None, None
if self.head:
self.tail.next = node
node.prev = self.tail
else:
self.head = node
self.tail = node
def delete(self,node):
if node.prev:
node.prev.next = node.next
else:
self.head = node.next
if node.next:
node.next.prev = node.prev
else:
self.tail = node.prev
node.next, node.prev = None, None
class LRUCache:
def __init__(self, capacity: int):
self.List = LinkedList()
self.dic = {}
self.capacity = capacity
def __insert(self,key, val):
if key in self.dic:
self.List.delete(self.dic[key])
node = ListNode(key,val)
self.List.insert(node)
self.dic[key] = node
def get(self, key: int) -> int:
if key not in self.dic:
return -1
val = self.dic[key].val
self.__insert(key, val)
return val
def put(self, key: int, value: int) -> None:
if len(self.dic) == self.capacity and key not in self.dic:
#print("del ",self.List.head.key)
del self.dic[self.List.head.key]
self.List.delete(self.List.head)
self.__insert(key,value)
#Push in head, delete from tail
class ListNode:
def __init__(self,key, val):
self.key = key
self.val = val
self.next = None
self.prev = None
class LinkedList:
def __init__(self,):
self.head = None
self.tail = None
def insert(self, node):
node.next, node.prev = None, None
if not self.tail:
self.tail = node
if self.head:
node.next = self.head
self.head.prev = node
self.head = node
def delete(self,node):
if node.prev:
node.prev.next = node.next
else:
self.head = node.next
if node.next:
node.next.prev = node.prev
else:
self.tail = node.prev
node.next, node.prev = None, None
class LRUCache:
def __init__(self, capacity: int):
self.List = LinkedList()
self.dic = {}
self.capacity = capacity
def __insert(self,key, val):
if key in self.dic:
self.List.delete(self.dic[key])
node = ListNode(key,val)
self.List.insert(node)
self.dic[key] = node
def get(self, key: int) -> int:
if key not in self.dic:
return -1
val = self.dic[key].val
self.__insert(key, val)
return val
def put(self, key: int, value: int) -> None:
if len(self.dic) == self.capacity and key not in self.dic:
#print("del ",self.List.tail.key)
del self.dic[self.List.tail.key]
self.List.delete(self.List.tail)
self.__insert(key,value)
|
tests/components/google_travel_time/test_config_flow.py | MrDelik/core | 30,023 | 12707950 | """Test the Google Maps Travel Time config flow."""
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.google_travel_time.const import (
ARRIVAL_TIME,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_ORIGIN,
CONF_TIME,
CONF_TIME_TYPE,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_UNITS,
DEFAULT_NAME,
DEPARTURE_TIME,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM_IMPERIAL,
)
from tests.components.google_travel_time.const import MOCK_CONFIG
@pytest.mark.usefixtures("validate_config_entry", "bypass_setup")
async def test_minimum_fields(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == DEFAULT_NAME
assert result2["data"] == {
CONF_NAME: DEFAULT_NAME,
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
}
@pytest.mark.usefixtures("invalidate_config_entry")
async def test_invalid_config_entry(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
@pytest.mark.parametrize(
"data,options",
[
(
MOCK_CONFIG,
{
CONF_MODE: "driving",
CONF_ARRIVAL_TIME: "test",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
},
)
],
)
@pytest.mark.usefixtures("validate_config_entry")
async def test_options_flow(hass, mock_config):
"""Test options flow."""
result = await hass.config_entries.options.async_init(
mock_config.entry_id, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: ARRIVAL_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert mock_config.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
@pytest.mark.parametrize(
"data,options",
[(MOCK_CONFIG, {})],
)
@pytest.mark.usefixtures("validate_config_entry")
async def test_options_flow_departure_time(hass, mock_config):
"""Test options flow with departure time."""
result = await hass.config_entries.options.async_init(
mock_config.entry_id, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: DEPARTURE_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert mock_config.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
@pytest.mark.usefixtures("validate_config_entry", "bypass_setup")
async def test_dupe(hass):
"""Test setting up the same entry data twice is OK."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
src/pymap3d/tests/test_time.py | wrlssqi/pymap3d | 116 | 12707964 | <filename>src/pymap3d/tests/test_time.py
import pytest
from pytest import approx
from datetime import datetime
from pymap3d.timeconv import str2dt
import pymap3d.sidereal as pms
t0 = datetime(2014, 4, 6, 8)
def test_juliantime():
assert pms.juliandate(t0) == approx(2.456753833333e6)
def test_types():
np = pytest.importorskip("numpy")
assert str2dt(t0) == t0 # passthrough
assert str2dt("2014-04-06T08:00:00") == t0
ti = [str2dt("2014-04-06T08:00:00"), str2dt("2014-04-06T08:01:02")]
to = [t0, datetime(2014, 4, 6, 8, 1, 2)]
assert ti == to # even though ti is numpy array of datetime and to is list of datetime
t1 = [t0, t0]
assert (np.asarray(str2dt(t1)) == t0).all()
def test_datetime64():
np = pytest.importorskip("numpy")
t1 = np.datetime64(t0)
assert str2dt(t1) == t0
t1 = np.array([np.datetime64(t0), np.datetime64(t0)])
assert (str2dt(t1) == t0).all()
def test_xarray_time():
xarray = pytest.importorskip("xarray")
t = {"time": t0}
ds = xarray.Dataset(t)
assert str2dt(ds["time"]) == t0
t2 = {"time": [t0, t0]}
ds = xarray.Dataset(t2)
assert (str2dt(ds["time"]) == t0).all()
def test_pandas_time():
pandas = pytest.importorskip("pandas")
t = pandas.Series(t0)
assert (str2dt(t) == t0).all()
t = pandas.Series([t0, t0])
assert (str2dt(t) == t0).all()
|
t/o_tables_test.py | rohankumardubey/orioledb | 947 | 12707980 | #!/usr/bin/env python3
# coding: utf-8
import unittest
import testgres
from .base_test import BaseTest
from .base_test import ThreadQueryExecutor
from testgres.enums import NodeStatus
class OTablesTest(BaseTest):
def assertTblCount(self, size):
self.assertEqual(size,
self.node.execute('postgres',
'SELECT count(*) FROM orioledb_table_oids();')[0][0])
def test_o_tables_wal_commit(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n"
"INSERT INTO o_test VALUES(1, 'test');"
)
self.assertTblCount(1)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
self.assertEqual(node.execute('postgres', 'SELECT count(*) FROM o_test;')[0][0], 1)
node.stop()
def test_o_tables_wal_rollback(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n")
self.assertTblCount(0)
con1 = node.connect()
con1.begin()
con1.execute("CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
con1.execute("INSERT INTO o_test VALUES(1, 'test');")
self.assertTblCount(1)
con1.rollback()
self.assertTblCount(0)
con1.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.stop()
def test_o_tables_wal_drop_commit(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
self.assertTblCount(1)
node.safe_psql('postgres',
"INSERT INTO o_test VALUES(1, 'test');")
node.safe_psql('postgres',
"DROP TABLE o_test;")
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.stop()
def test_o_tables_wal_drop_rollback(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
self.assertTblCount(1)
node.safe_psql('postgres',
"INSERT INTO o_test VALUES(1, 'test');")
con1 = node.connect()
con1.begin()
con1.execute("DROP TABLE o_test;")
self.assertTblCount(1)
con1.rollback()
self.assertTblCount(1)
con1.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
self.assertEqual(node.execute('postgres', 'SELECT count(*) FROM o_test;')[0][0], 1)
node.stop()
def test_o_tables_xip_commit(self):
node = self.node
node.start()
node.safe_psql('postgres', "CREATE EXTENSION IF NOT EXISTS orioledb;\n")
con1 = node.connect()
con1.begin()
con1.execute("CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
con1.execute("INSERT INTO o_test VALUES(1);")
node.safe_psql("CHECKPOINT;");
con1.commit()
con1.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.stop()
def test_o_tables_xip_rollback(self):
node = self.node
node.start()
node.safe_psql('postgres', "CREATE EXTENSION IF NOT EXISTS orioledb;\n")
con1 = node.connect()
con1.begin()
con1.execute("CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
con1.execute("INSERT INTO o_test VALUES(1);")
node.safe_psql("CHECKPOINT;");
con1.rollback()
con1.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.stop()
def test_o_tables_wal_drop_extension_commit(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
self.assertTblCount(1)
node.safe_psql('postgres',
"DROP EXTENSION orioledb CASCADE;")
node.safe_psql('postgres',
"CREATE EXTENSION orioledb;")
self.assertTblCount(0)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.stop()
def test_o_tables_wal_drop_extension_rollback(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
self.assertTblCount(1)
con1 = node.connect()
con1.begin()
con1.execute("DROP EXTENSION orioledb CASCADE;")
con1.rollback()
con1.close()
self.assertTblCount(1)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.stop()
def test_o_tables_mix(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test(\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n")
self.assertTblCount(1)
node.safe_psql('postgres',
"DROP TABLE o_test;")
self.assertTblCount(0)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.safe_psql('postgres',
"CREATE TABLE IF NOT EXISTS o_test1(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
node.safe_psql('postgres',
"CREATE TABLE IF NOT EXISTS o_test2(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
node.safe_psql('postgres',
"CREATE TABLE IF NOT EXISTS o_test3(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
self.assertTblCount(3)
node.safe_psql('postgres',
"DROP TABLE o_test3;")
self.assertTblCount(2)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(2)
con1 = node.connect()
con1.begin()
con1.execute("CREATE TABLE IF NOT EXISTS o_test3(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
con1.rollback()
self.assertTblCount(2)
con1.begin()
con1.execute("CREATE TABLE IF NOT EXISTS o_test3(\n"
" id integer NOT NULL\n"
") USING orioledb;\n")
con1.commit()
con1.close()
self.assertTblCount(3)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(3)
node.safe_psql('postgres',
"DROP EXTENSION orioledb CASCADE;\n")
node.stop(['-m', 'immediate'])
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION orioledb;\n")
self.assertTblCount(0)
node.stop()
|
Lib/test/dtracedata/gc.py | shawwn/cpython | 52,316 | 12707982 | import gc
def start():
gc.collect(0)
gc.collect(1)
gc.collect(2)
l = []
l.append(l)
del l
gc.collect(2)
gc.collect()
start()
|
tests/unit/bivariate/test_independence.py | yzR1991/Copulas | 235 | 12708009 | from unittest import TestCase
import numpy as np
from copulas.bivariate.independence import Independence
class TestIndependence(TestCase):
def test___init__(self):
"""Independence copula can be instantiated directly."""
# Setup / Run
instance = Independence()
# Check
assert isinstance(instance, Independence)
assert instance.theta is None
assert instance.tau is None
def test_fit(self):
"""Fit checks that the given values are independent."""
# Setup
instance = Independence()
data = np.array([
[1, 2],
[4, 3]
])
# Run
instance.fit(data)
# Check
instance.tau is None
instance.theta is None
def test_cumulative_distribution(self):
"""cumulative_distribution is the product of both probabilities."""
# Setup
instance = Independence()
data = np.array([
[0.0, 0.0],
[0.1, 0.1],
[0.2, 0.2],
[0.5, 0.5],
[0.9, 0.9],
[1.0, 1.0]
])
expected_result = np.array([
0.00,
0.01,
0.04,
0.25,
0.81,
1.00,
])
# Run
result = instance.cumulative_distribution(data)
# Check
(result == expected_result).all().all()
|
examples/save_views.py | larsoner/PySurfer | 158 | 12708036 | <filename>examples/save_views.py
"""
===================
Save a set of views
===================
Save some views in png files.
"""
from surfer import Brain
print(__doc__)
sub = 'fsaverage'
hemi = 'lh'
surf = 'inflated'
brain = Brain(sub, hemi, surf)
###############################################################################
# save 1 image
brain.show_view('lat')
brain.save_image("%s_lat.png" % sub)
###############################################################################
# save some images
brain.save_imageset(sub, ['med', 'lat', 'ros', 'caud'], 'jpg')
|
Bot/cogs/welcome.py | Shuri2060/Nurevam | 145 | 12708038 | from .utils import utils
from discord.ext import commands
import traceback
import datetime
import discord
class Welcome(commands.Cog): #Allow to welcome new members who join guild. If it enable, will send them a message.
def __init__(self,bot):
self.bot = bot
self.redis = bot.db.redis
async def error(self,owner,e):
# await owner.send("There is an error with a newcomer, please report this to the creator.\n {}".format(e))
Current_Time = datetime.datetime.utcnow().strftime("%b/%d/%Y %H:%M:%S UTC")
utils.prRed(Current_Time)
utils.prRed("Error!")
utils.prRed(traceback.format_exc())
error = '```py\n{}\n```'.format(traceback.format_exc())
await self.bot.owner.send("```py\n{}```".format(Current_Time + "\n" + "ERROR!") + "\n" + error)
@commands.Cog.listener()
async def on_member_join(self,member):
if await self.redis.hget("{}:Config:Cogs".format(member.guild.id),"welcome") == "on":
config = await self.redis.hgetall("{}:Welcome:Message".format(member.guild.id))
try:
if config.get("enable_message") == "on":
msg = config["message"].format(user=member.name,server=member.guild,user_mention=member.mention)
if config.get("enable_delete") == "on":
time = int(config["delete_msg"])
else:
time = None
if config.get("whisper") == "on":
await member.send(msg,delete_after = time)
else:
await self.bot.get_channel(int(config["channel"])).send(msg,delete_after = time)
#Now assign a roles.
if config.get("role") == "on":
role_list = await self.redis.smembers('{}:Welcome:Assign_Roles'.format(member.guild.id))
role_obj=[]
for x in role_list:
if x == '': #if it return empty string
continue
# role_obj.append(discord.utils.get(member.guild.roles,id=int(x)))
role_obj.append(member.guild.get_role(int(x)))
try:
await member.add_roles(*role_obj,reason = "User has join the server,and an admin request to add role(s) to new person")
except discord.Forbidden:
pass #if unable to add user
except discord.NotFound:
pass #if it cant find that user. Assume it left server.
except Exception as e:
await self.error(member.guild.owner, e)
def setup(bot):
bot.add_cog(Welcome(bot))
|
tests/unit_tests/prepare_email/test_body_header_extraction.py | Erfard/melusine | 300 | 12708056 | import pandas as pd
import numpy as np
from melusine.prepare_email.body_header_extraction import extract_last_body
from melusine.prepare_email.body_header_extraction import extract_body
from melusine.prepare_email.body_header_extraction import extract_header
structured_body = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{"part": "Je vous remercie pour le document", "tags": "BODY"},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Mr Unknown", "tags": "BODY"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre connaissance du document ci-joint",
"tags": "BODY",
},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Votre mutuelle", "tags": "BODY"},
{
"part": "La visualisation des fichiers PDF nécessite Adobe Reader.",
"tags": "FOOTER",
},
],
},
},
]
def test_extract_last_body():
input_df = pd.DataFrame({"structured_body": [structured_body]})
output_df = pd.Series(["Je vous remercie pour le document "])
result = input_df.apply(extract_last_body, axis=1)
pd.testing.assert_series_equal(result, output_df)
message_dict = {
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <<EMAIL>> ",
"to": None,
},
"structured_text": {
"header": "demande document",
"text": [
{"part": "Bonjour. ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre connaissance du document ci-joint",
"tags": "BODY",
},
{"part": "Cordialement,", "tags": "GREETINGS"},
{"part": "Votre mutuelle", "tags": "BODY"},
{
"part": "La visualisation des fichiers PDF nécessite Adobe Reader.",
"tags": "FOOTER",
},
],
},
}
def test_extract_body():
input_dict = message_dict
output = "Merci de bien vouloir prendre connaissance du document ci-joint "
result = extract_body(input_dict)
np.testing.assert_string_equal(result, output)
def test_extract_header():
input_dict = message_dict
output = "demande document"
result = extract_header(input_dict)
np.testing.assert_string_equal(result, output)
|
RecoParticleFlow/PFTracking/python/particleFlowDisplacedVertexCandidate_cfi.py | ckamtsikis/cmssw | 852 | 12708067 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
particleFlowDisplacedVertexCandidate = cms.EDProducer("PFDisplacedVertexCandidateProducer",
# The track collection use for the fitting. May be any collection.
# The only condition is that it shall contain the hit pattern information
trackCollection = cms.InputTag("generalTracks"),
# verbosity
verbose = cms.untracked.bool(False),
# Debug flag
debug = cms.untracked.bool(False),
# maximum dca distance for two tracks to be linked
dcaCut = cms.double(0.5),
# minimum distance of secondary vertex with respect to the primary
primaryVertexCut = cms.double(1.8),
# maximum distance between the DCA Point and the inner hit of the track
# not used for the moment
dcaPInnerHitCut = cms.double(1000.0),
# Primary vertex information used for dxy calculation
mainVertexLabel = cms.InputTag("offlinePrimaryVertices", ""),
offlineBeamSpotLabel = cms.InputTag("offlineBeamSpot", ""),
# Tracks preselection to reduce the combinatorics in PFDisplacedVertexCandidates
# this cuts are repeated then in a smarter way in the PFDisplacedVertexFinder
# be sure you are consistent between them.
tracksSelectorParameters = cms.PSet(
# selection parameters for secondary tracks
nChi2_max = cms.double(5.),
pt_min = cms.double(.2),
# if the tracks is not a good candidate to be a secondary (dxy cut) restrict in minimal pt
# this cut reduce drastically the combinatorics. It is very useful to reduce the
# PFDisplacedVertex timing
pt_min_prim = cms.double(.8),
dxy = cms.double(.2),
)
)
|
hpOneView/resources/servers/id_pools.py | doziya/hpeOneView | 107 | 12708178 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient
class IdPools(object):
"""
Class for Id Pools API client.
"""
URI = '/rest/id-pools'
def __init__(self, con):
self._client = ResourceClient(con, self.URI)
def get(self, id_or_uri):
"""
Gets a pool.
Args:
id_or_uri: Can be either the range ID or URI.
Returns:
dict: Pool resource.
"""
return self._client.get(id_or_uri)
def enable(self, information, id_or_uri, timeout=-1):
"""
Enables or disables a pool.
Args:
information (dict): Information to update.
id_or_uri: ID or URI of range.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Updated resource.
"""
uri = self._client.build_uri(id_or_uri)
return self._client.update(information, uri, timeout=timeout)
def validate_id_pool(self, id_or_uri, ids_pools):
"""
Validates an ID pool.
Args:
id_or_uri:
ID or URI of range.
ids_pools (list):
List of Id Pools.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/validate?idList=" + "&idList=".join(ids_pools)
return self._client.get(uri)
def validate(self, information, id_or_uri, timeout=-1):
"""
Validates a set of user specified IDs to reserve in the pool.
This API can be used to check if the specified IDs can be allocated.
Args:
information (dict):
Information to update. Can result in system specified IDs or the system reserving user-specified IDs.
id_or_uri:
ID or URI of vSN range.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/validate"
return self._client.update(information, uri, timeout=timeout)
def allocate(self, information, id_or_uri, timeout=-1):
"""
Allocates a set of IDs from range.
The allocator returned contains the list of IDs successfully allocated.
Args:
information (dict):
Information to update. Can result in system specified IDs or the system reserving user-specified IDs.
id_or_uri:
ID or URI of vSN range.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/allocator"
return self._client.update(information, uri, timeout=timeout)
def collect(self, information, id_or_uri, timeout=-1):
"""
Collects one or more IDs to be returned to a pool.
Args:
information (dict):
The list of IDs to be collected
id_or_uri:
ID or URI of range
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Collector containing list of collected IDs successfully collected.
"""
uri = self._client.build_uri(id_or_uri) + "/collector"
return self._client.update(information, uri, timeout=timeout)
def get_check_range_availability(self, id_or_uri, ids_pools):
"""
Checks the range availability in the ID pool.
Args:
id_or_uri:
ID or URI of range.
ids_pools (list):
List of Id Pools.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/checkrangeavailability?idList=" + "&idList=".join(ids_pools)
return self._client.get(uri)
def generate(self, id_or_uri):
"""
Generates and returns a random range.
Args:
id_or_uri:
ID or URI of range.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._client.build_uri(id_or_uri) + "/generate"
return self._client.get(uri)
|
tests/test_cardxml.py | kvazario/python-hearthstone | 233 | 12708181 | from hearthstone import cardxml
def test_cardxml_load():
cardid_db, _ = cardxml.load()
dbf_db, _ = cardxml.load_dbf()
assert cardid_db
assert dbf_db
for card_id, card in cardid_db.items():
assert dbf_db[card.dbf_id].id == card_id
for dbf_id, card in dbf_db.items():
assert cardid_db[card.id].dbf_id == dbf_id
assert cardid_db["EX1_001"].quest_reward == ""
assert cardid_db["UNG_940"].quest_reward == "UNG_940t8"
|
moto/kinesisvideoarchivedmedia/__init__.py | gtourkas/moto | 5,460 | 12708199 | <gh_stars>1000+
from .models import kinesisvideoarchivedmedia_backends
from ..core.models import base_decorator
kinesisvideoarchivedmedia_backend = kinesisvideoarchivedmedia_backends["us-east-1"]
mock_kinesisvideoarchivedmedia = base_decorator(kinesisvideoarchivedmedia_backends)
|
samples/vmc/networks_nsxt/hello_world.py | VictorGardi/vsphere-automation-sdk-python | 589 | 12708208 | #!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2019. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
import pprint
from samples.vmc.helpers.sample_cli import parser
from com.vmware.nsx_policy_client_for_vmc import (
create_nsx_policy_client_for_vmc)
class AuthExample(object):
"""
Demonstrates how to authenticate to VMC using the NSX-T SDK
and perform a simple read operation.
Sample Prerequisites:
- An organization associated with the calling user.
- A SDDC in the organization
"""
def __init__(self):
args = parser.parse_args()
self.org_id = args.org_id
self.sddc_id = args.sddc_id
self.vmc_client = create_nsx_policy_client_for_vmc(
args.refresh_token, args.org_id, args.sddc_id)
def get_domains(self):
print('\n# Get Domains: List network domains:')
domains = self.vmc_client.infra.Domains.list()
pprint.pprint(domains)
def main():
auth_example = AuthExample()
auth_example.get_domains()
if __name__ == '__main__':
main()
|
benchmarks/exps/old/card275.py | SymbioticLab/Salus | 104 | 12708214 | # -*- coding: future_fstrings -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Get mem allocation for one iteration, to plot CDF. See card#275
LaneMgr: enabled
InLane Scheduler: pack
Collected data: allocation
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import inspect
from absl import flags
from benchmarks.driver.runner import Executor
from benchmarks.driver.server.config import presets
from benchmarks.driver.workload import WTL
from benchmarks.exps import run_seq, maybe_forced_preset, run_tfdist, case_switch_main
FLAGS = flags.FLAGS
def case1(argv):
model, bs, bn = 'inception3', 50, 10
name = inspect.currentframe().f_code.co_name
scfg = maybe_forced_preset(presets.AllocProf)
scfg.scheduler = 'pack'
wl = WTL.create(model, bs, bn)
run_seq(scfg.copy(output_dir=FLAGS.save_dir/name), wl)
def case2(argv):
model, bs, bn = 'inception3', 50, 10
name = inspect.currentframe().f_code.co_name
scfg = maybe_forced_preset(presets.OpTracing)
scfg.logconf = 'memop'
scfg.scheduler = 'pack'
wl = WTL.create(model, bs, bn)
run_seq(scfg.copy(output_dir=FLAGS.save_dir/name), wl)
@case_switch_main
def main():
return case1, case2
|
b2fuse/cached_bucket.py | sondree/b2_fuse | 253 | 12708271 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#Copyright (c) 2015 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from time import time
from b2.bucket import Bucket
#General cache used for B2Bucket
class Cache(object):
def __init__(self, cache_timeout):
self.data = {}
self.cache_timeout = cache_timeout
def update(self, result, params=""):
self.data[params] = (time(), result)
def get(self, params=""):
if self.data.get(params) is not None:
entry_time, result = self.data.get(params)
if time() - entry_time < self.cache_timeout:
return result
else:
del self.data[params]
return
class CacheNotFound(BaseException):
pass
class CachedBucket(Bucket):
def __init__(self, api, bucket_id):
super(CachedBucket, self).__init__(api, bucket_id)
self._cache = {}
self._cache_timeout = 120
def _reset_cache(self):
self._cache = {}
def _update_cache(self, cache_name, result, params=""):
self._cache[cache_name].update(result, params)
return result
def _get_cache(self, cache_name, params="", cache_type=Cache):
if self._cache.get(cache_name) is None:
self._cache[cache_name] = cache_type(self._cache_timeout)
if self._cache[cache_name].get(params) is not None:
return self._cache[cache_name].get(params)
raise CacheNotFound()
def ls(self):
func_name = "ls"
try:
return self._get_cache(func_name)
except CacheNotFound:
result = list(super(CachedBucket, self).ls())
return self._update_cache(func_name, result)
def delete_file_version(self, *args, **kwargs):
self._reset_cache()
return super(CachedBucket, self).delete_file_version(*args, **kwargs)
def upload_bytes(self, *args, **kwargs):
self._reset_cache()
return super(CachedBucket, self).upload_bytes(*args, **kwargs)
|
mppsolar/protocols/jk232.py | kchiem/mpp-solar | 132 | 12708279 | <filename>mppsolar/protocols/jk232.py
import logging
from .abstractprotocol import AbstractProtocol
from .protocol_helpers import crcJK232 as crc
log = logging.getLogger("jk232")
# Read basic information and status
# DD A5 03 00 FF FD 77
# start bit 0xDD
# status 0xA5 means read, status 0x5A means write.
# command code 0x03
# Data length: 1 byte, indicating the effective length of the data carried in the frame.
# Data content: N bytes, the content carried by the frame data, when the data length is 0, there is no such part.
# Verification: 2 bytes,
# the verification field is "command code + length byte + data segment content",
# the verification method is thesum of the above fields and then the inverse plus 1, the high bit is in the front and the low bit is in the back.
# Stop bit: 1 byte, indicating the end of a frame of data, fixed as 0x77;
COMMANDS = {
"getBalancerData": {
"name": "getBalancerData",
"command_code": "03",
"description": "Get Balancer Data",
"help": " -- Get Balancer Data",
"type": "QUERY",
"checksum_required": "True",
"response_type": "POSITIONAL",
"response": [
["Hex2Str", 1, "Start Byte", ""],
["Hex2Str", 1, "Command Code", ""],
["Hex2Str", 1, "Status", ""],
["Hex2Int", 1, "Data Length", ""],
["BigHex2Short:r/100", 2, "Total Battery Voltage", "V"],
["BigHex2Short:r/100", 2, "Total Current", "A"],
["BigHex2Short:r/100", 2, "Remaining Capacity", "Ah"],
["BigHex2Short:r/100", 2, "Nominal Capacity", "Ah"],
["BigHex2Short", 2, "Cycles", "cycles"],
["Hex2Str", 2, "Production Date", ""],
["Hex2Str", 2, "Equilibrium State (TODO)", ""],
["Hex2Str", 2, "Equilibrium State 2 (TODO)", ""],
["Hex2Str", 2, "Protection State (TODO)", ""],
["Hex2Str", 1, "Keep", ""],
["Hex2Int", 1, "Remaining Battery", "%"],
["Hex2Str", 1, "FET Control Status", ""],
["Hex2Int", 1, "Number of Battery Strings", ""],
["Hex2Int", 1, "Number of NTC", ""],
["BigHex2Short:(r-2731)/10", 2, "NTC 1", "°C"],
["BigHex2Short:(r-2731)/10", 2, "NTC 2", "°C"],
["Hex2Str", 2, "Checksum", ""],
["Hex2Str", 1, "End Byte", ""],
],
"test_responses": [
bytes.fromhex(
"DD 03 00 1B 17 00 00 00 02 D0 03 E8 00 00 20 78 00 00 00 00 00 00 10 48 03 0F 02 0B 76 0B 82 FB FF 77"
),
],
},
}
class jk232(AbstractProtocol):
def __init__(self, *args, **kwargs) -> None:
super().__init__()
self._protocol_id = b"JK232"
self.COMMANDS = COMMANDS
self.STATUS_COMMANDS = [
"getBalancerData",
]
self.SETTINGS_COMMANDS = [
"",
]
self.DEFAULT_COMMAND = "getBalancerData"
def get_full_command(self, command) -> bytes:
"""
Override the default get_full_command as its different
"""
log.info(f"Using protocol {self._protocol_id} with {len(self.COMMANDS)} commands")
# These need to be set to allow other functions to work`
self._command = command
self._command_defn = self.get_command_defn(command)
# End of required variables setting
if self._command_defn is None:
# Maybe return a default here?
return None
if "command_code" in self._command_defn:
# Read basic information and status
# DD A5 03 00 FF FD 77
# full command is 7 bytes long
cmd = bytearray(7)
# start bit 0xDD
cmd[0] = 0xDD
log.debug(f"cmd with start bit: {cmd}")
# status 0xA5 means read, status 0x5A means write.
if self._command_defn["type"] == "SETTER":
cmd[1] = 0x5A
else:
cmd[1] = 0xA5
# command code 0x03
command_code = int(self._command_defn["command_code"], 16)
# Data length: 1 byte, indicating the effective length of the data carried in the frame.
# Data content: N bytes, the content carried by the frame data, when the data length is 0, there is no such part.
data = ""
# TODO: data stuff here
data_len = len(data)
if data_len == 0:
crc_high, crc_low = crc([command_code, data_len])
cmd[2] = command_code
cmd[3] = data_len
cmd[4] = crc_high
cmd[5] = crc_low
cmd[6] = 0x77
log.debug(f"cmd with crc: {cmd}")
return cmd
def get_responses(self, response):
"""
Override the default get_responses as its different
"""
responses = []
# remove \n
# response = response.replace(b"\n", b"")
if self._command_defn is not None and self._command_defn["response_type"] == "POSITIONAL":
# Have a POSITIONAL type response, so need to break it up...
# example defn :
# "response": [
# ["discard", 1, "start flag", ""],
# ["discard", 1, "module address", ""],
# ["discard", 1, "command id", ""],
# ["discard", 1, "data length", ""],
# ]
# example response data b"\xa5\x01\x90\x08\x02\x10\x00\x00uo\x03\xbc\xf3",
for defn in self._command_defn["response"]:
size = defn[1]
item = response[:size]
responses.append(item)
response = response[size:]
if response:
responses.append(response)
log.debug(f"get_responses: responses {responses}")
return responses
else:
return bytearray(response)
|
scripts/update_vcpkg_baseline.py | manjunathnilugal/PyBaMM | 330 | 12708286 | """
Automatically update the baseline of vcpkg-configuration.json
"""
import json
import os
import pybamm
def update_baseline():
"""
Opens vcpkg-configuration.json and updates the baseline with the latest commit id
"""
# Get latest commit id from pybamm-team/sundials-vcpkg-registry
cmd = "git ls-remote https://github.com/pybamm-team/sundials-vcpkg-registry | grep refs/heads/main | cut -f 1 | tr -d '\n'" # noqa: E501
commit_id = os.popen(cmd).read()
# Open file and write it
with open(
os.path.join(pybamm.root_dir(), "vcpkg-configuration.json"), "r+"
) as file:
output = file.read()
json_commit_id = json.loads(output)["registries"][0]["baseline"]
output = output.replace(json_commit_id, commit_id)
file.truncate(0)
file.seek(0)
file.write(output)
if __name__ == "__main__":
update_baseline()
|
DQM/SiStripMonitorClient/scripts/DeadROCCounter_Phase1.py | Purva-Chaudhari/cmssw | 852 | 12708321 | #!/usr/bin/env python3
from __future__ import print_function
from ROOT import TFile, gStyle,gPad ,TObject, TCanvas, TH1, TH1F, TH2F, TLegend, TPaletteAxis, TList, TLine, TAttLine, TF1,TAxis
import re
import sys, string
def getRunNumber(filename):
global runNumber
pos=filename.find("__")
runNumber=int(filename[pos-6:pos])
#print runNumber
###########################################barrel########################################################
def countBadROCBarrel(fin, layerNo, os):
global bpix_tot_deadROC
global bpix_tot_ineffROC
global bpix_tot_Nrocspopulated
global bpix_tot_totalentries
barrelPath = commonPath + "PXBarrel/";
histoname = ["digi_occupancy_per_SignedModuleCoord_per_SignedLadderCoord_PXLayer_1", "digi_occupancy_per_SignedModuleCoord_per_SignedLadderCoord_PXLayer_2",
"digi_occupancy_per_SignedModuleCoord_per_SignedLadderCoord_PXLayer_3", "digi_occupancy_per_SignedModuleCoord_per_SignedLadderCoord_PXLayer_4"]
digi2D = fin.Get(barrelPath + histoname[layerNo-1])
#return status flag is histogram is empty!
if digi2D.GetEntries() == 0 :
return 1;
Nrocspopulated = 0
totalEntries = 0
NexpectedROC = [1536, 3584, 5632, 8192]
nLadders_bpx = [6, 14, 22, 32]
nx = digi2D.GetNbinsX()
ny = digi2D.GetNbinsY()
for xbin in range(1,nx+1):
if xbin >= 33 and xbin <= 40: continue;#region of cross on x-axis
for ybin in range(1,ny+1):
if (ybin == 2*nLadders_bpx[layerNo-1] + 1) or (ybin == 2*nLadders_bpx[layerNo-1] + 2): continue;#region of cross on y-axis
bentries = digi2D.GetBinContent(xbin,ybin)
if(bentries > 0):
Nrocspopulated+=1
totalEntries += bentries
meanEntries = float(totalEntries)/Nrocspopulated
NineffROC = 0
#Loop to chek inefficient ROC per layer
for xbin in range(1,nx+1):
if xbin >= 33 and xbin <= 40:
continue;#region of cross on x-axis
for ybin in range(1,ny+1):
if (ybin == 2*nLadders_bpx[layerNo-1] + 1) or (ybin == 2*nLadders_bpx[layerNo-1] + 2): continue;#region of cross on y-axis
bentries = digi2D.GetBinContent(xbin,ybin);
if(bentries > 0 and bentries < meanEntries/4. ):#Assume < 25% of MEAN = inefficient
NineffROC+=1;
##Printing Layer no., #dead ROC, #inefficienct ROC, #mean occupancy of Non-zer roc
tmpstr = "BPix L" + str(layerNo)
print(tmpstr, '{0:4d} {1:4d} {2:4.1f}'.format(NexpectedROC[layerNo-1] - Nrocspopulated, NineffROC, round(meanEntries,1)), file=os)
bpix_tot_deadROC += NexpectedROC[layerNo-1] - Nrocspopulated
bpix_tot_ineffROC += NineffROC
bpix_tot_Nrocspopulated += Nrocspopulated
bpix_tot_totalentries += float(totalEntries)
return 0;
#############################################endacp#########################################
def countBadROCForward(fin, ringNo, os):
global fpix_tot_deadROC
global fpix_tot_ineffROC
global fpix_tot_Nrocspopulated
global fpix_tot_totalentries
forwardPath = commonPath + "PXForward/";
histoname = ["digi_occupancy_per_SignedDiskCoord_per_SignedBladePanelCoord_PXRing_1",
"digi_occupancy_per_SignedDiskCoord_per_SignedBladePanelCoord_PXRing_2"]
digi2D = fin.Get(forwardPath + histoname[ringNo-1])
#return status flag is histogram is empty!
if digi2D.GetEntries() == 0 :
return 1;
nblades_perRing_fpx = [22, 34]
NexpectedROC_perRing = [704, 1088]
Nrocspopulated = [0] * 6
totalEntries = [0] * 6
dcounter = 0
nx = digi2D.GetNbinsX()
ny = digi2D.GetNbinsY()
for xbin in range(1,nx+1):
if xbin >= 25 and xbin <= 32: continue;#region of cross on x-axis
if xbin > 1 and (xbin-1)%8 == 0: dcounter += 1;
for ybin in range(1,ny+1):
if (ybin >= 2*nblades_perRing_fpx[ringNo-1] + 1) and (ybin <= 2*nblades_perRing_fpx[ringNo-1] + 4):
continue;#region of cross on y-axis
bentries = digi2D.GetBinContent(xbin,ybin)
if(bentries > 0):
Nrocspopulated[dcounter] += 1
totalEntries[dcounter] += bentries
#Loop to find inefficient modules
meanEntries = [6] * 6
for d in range(0,6):
meanEntries[d] = float(totalEntries[d])/Nrocspopulated[d]
NineffROC = [6] * 6
#set disc counter to 0 since it is now 5
dcounter = 0;
for xbin in range(1,nx+1):
if xbin >= 25 and xbin <= 32: continue;#region of cross on x-axis
if xbin > 1 and (xbin-1)%8 == 0: dcounter += 1
for ybin in range(1,ny+1):
if (ybin >= 2*nblades_perRing_fpx[ringNo-1] + 1) and (ybin <= 2*nblades_perRing_fpx[ringNo-1] + 4):
continue;#region of cross on y-axis
bentries = digi2D.GetBinContent(xbin,ybin)
if(bentries > 0):#//Assume < 25% of MEAN = inefficient
if bentries > 0 and bentries < meanEntries[dcounter]/4.:
NineffROC[dcounter] += 1
print("#Summary for FPix Ring", ringNo, file=os)
for d in range(0,6):
disc = 0
if d < 3: disc = "M" + str(3 - d)
else: disc = "P" + str(d - 2)
##Printing Disc no., #dead ROC, #inefficienct ROC, #mean occupancy of Non-zer roc
tmpstr = "FPix R" + str(ringNo) + "D" + str(disc)
print('{0:10s} {1:4d} {2:4d} {3:4.1f}'.format(tmpstr, NexpectedROC_perRing[ringNo-1] - Nrocspopulated[d], NineffROC[d], round(meanEntries[d],1)), file=os)
fpix_tot_deadROC += NexpectedROC_perRing[ringNo-1] - Nrocspopulated[d]
fpix_tot_ineffROC += NineffROC[d]
fpix_tot_Nrocspopulated += Nrocspopulated[d]
fpix_tot_totalentries += float(totalEntries[d])
return 0;
################################################main#######################################
fname=sys.argv[1]
getRunNumber(fname)
fin= TFile(fname)
outname="PixZeroOccROCs_run" + str(runNumber) + ".txt"
bpix_tot_deadROC = 0
bpix_tot_ineffROC = 0
bpix_tot_Nrocspopulated = 0
bpix_tot_totalentries = 0
fpix_tot_deadROC = 0
fpix_tot_ineffROC = 0
fpix_tot_Nrocspopulated = 0
fpix_tot_totalentries = 0
global commonPath
commonPath = "DQMData/Run " + str(runNumber) + "/PixelPhase1/Run summary/Phase1_MechanicalView/"
#histogram of no. of pixel clusters
hnpixclus_bpix = fin.Get(commonPath + "charge_PXBarrel")
hnpixclus_fpix = fin.Get(commonPath + "charge_PXForward")
out_file = open(outname, "w")
print("#Layer/Disc KEY NDeadROC NineffROC MeanOccupacy", file=out_file)
print("#Pixel Barrel Summary", file=out_file)
for l in range(1,5):
if countBadROCBarrel(fin, l, out_file) == 1:
print("DQM histogram for Layer", str(l), " is empty!", file=out_file)
print("BPix tot", '{0:4d} {1:4d} {2:4.1f}'.format(bpix_tot_deadROC, bpix_tot_ineffROC, round(float(bpix_tot_totalentries)/bpix_tot_Nrocspopulated,1)), file=out_file)
print("#Pixel Forward Summary", file=out_file)
for ring in range(1,3):
if countBadROCForward(fin, ring, out_file) == 1:
print("DQM histogram for Ring", str(ring), " is empty!", file=out_file)
print("FPix tot", '{0:4d} {1:4d} {2:4.1f}'.format(fpix_tot_deadROC, fpix_tot_ineffROC, round(float(fpix_tot_totalentries)/fpix_tot_Nrocspopulated,1)), file=out_file)
print("Number of clusters=", int(hnpixclus_bpix.GetEntries() + hnpixclus_fpix.GetEntries()), file=out_file)
out_file.close()
|
design/satellite_is_sunlit.py | facorazza/python-skyfield | 765 | 12708354 | <gh_stars>100-1000
"""Verdict: light-time delay from the Sun can be neglected in is_sunlit()!
For the Sun's position from Earth, does calling observe() really make
enough difference to justify the expense? Here we take two approaches
to answering the question: we compare the difference every day over 40
years, and then we do a back-of-the-envelope estimate of how big we
might have expected the effect to be. The two approaches agree! The
maximum difference is around 10 mas.
What difference does that make for a satellite? Let's take the ISS.
With its orbital period of 92 minutes, it sees the Earth swing in a full
circle around the sky in that amount of time. That's 360/90 = 4 degrees
per minute (!) = 240 arcseconds per second. At that speed, a difference
of 10 mas in the Sun's position would at most hasten or delay the moment
of sunrise for the ISS by 40 microseconds, which is far below the
accuracy of TLE position predictions and can thus be safely incurred.
"""
from skyfield import api
from skyfield.api import load
ts = load.timescale()
eph = load('de421.bsp')
sun = eph['sun']
earth = eph['earth']
t = ts.utc(2000, 1, range(40 * 365))
s1 = earth.at(t).observe(sun)
s2 = (sun - earth).at(t)
print('Milliarcseconds (mas) difference:', s1.separation_from(s2).mas().max())
print()
print('Does that make physical sense?')
# The Sun orbits around the Solar System barycenter which is usually
# inside the Sun's radius but occasionally a bit outside of it. So we
# can very roughly imagine the Sun's orbit as its own circumference,
# give or take.
solar_radius_km = 696340
distance_sun_travels_in_one_orbit = solar_radius_km * api.tau
# It takes the Sun more than a decade to travel that path, as its orbit
# is roughly the opposite of Jupiter's (which takes 12 years to circle
# the Sun). So it turns out that it travels a bit slowly.
sun_km_per_s = distance_sun_travels_in_one_orbit / 10 / 365.25 / 24 / 60 / 60
print('Sun km/s:', sun_km_per_s)
light_delay_seconds = s2[0].position.length().light_seconds()
print('Sample light delay from Sun to Earth (seconds):', light_delay_seconds)
print('How far does the Sun move in that time?')
travel_km = light_delay_seconds * sun_km_per_s
print('Sun km moved during that light travel time:', travel_km)
print('What angle does that many kilometers subtend from Earth?')
earth_sun_distance_km = 150e6
travel_angle = api.Angle(radians = travel_km / earth_sun_distance_km)
print('Angle traveled by sun in arcseconds:', travel_angle.arcseconds())
print('Angle traveled by sun in mas:', travel_angle.mas())
print()
print(__doc__.rstrip())
|
esphome/components/display/__init__.py | OttoWinter/esphomeyaml | 249 | 12708356 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import core, automation
from esphome.automation import maybe_simple_id
from esphome.const import (
CONF_AUTO_CLEAR_ENABLED,
CONF_ID,
CONF_LAMBDA,
CONF_PAGES,
CONF_PAGE_ID,
CONF_ROTATION,
CONF_FROM,
CONF_TO,
CONF_TRIGGER_ID,
)
from esphome.core import coroutine_with_priority
IS_PLATFORM_COMPONENT = True
display_ns = cg.esphome_ns.namespace("display")
DisplayBuffer = display_ns.class_("DisplayBuffer")
DisplayPage = display_ns.class_("DisplayPage")
DisplayPagePtr = DisplayPage.operator("ptr")
DisplayBufferRef = DisplayBuffer.operator("ref")
DisplayPageShowAction = display_ns.class_("DisplayPageShowAction", automation.Action)
DisplayPageShowNextAction = display_ns.class_(
"DisplayPageShowNextAction", automation.Action
)
DisplayPageShowPrevAction = display_ns.class_(
"DisplayPageShowPrevAction", automation.Action
)
DisplayIsDisplayingPageCondition = display_ns.class_(
"DisplayIsDisplayingPageCondition", automation.Condition
)
DisplayOnPageChangeTrigger = display_ns.class_(
"DisplayOnPageChangeTrigger", automation.Trigger
)
CONF_ON_PAGE_CHANGE = "on_page_change"
DISPLAY_ROTATIONS = {
0: display_ns.DISPLAY_ROTATION_0_DEGREES,
90: display_ns.DISPLAY_ROTATION_90_DEGREES,
180: display_ns.DISPLAY_ROTATION_180_DEGREES,
270: display_ns.DISPLAY_ROTATION_270_DEGREES,
}
def validate_rotation(value):
value = cv.string(value)
if value.endswith("°"):
value = value[:-1]
return cv.enum(DISPLAY_ROTATIONS, int=True)(value)
BASIC_DISPLAY_SCHEMA = cv.Schema(
{
cv.Optional(CONF_LAMBDA): cv.lambda_,
}
)
FULL_DISPLAY_SCHEMA = BASIC_DISPLAY_SCHEMA.extend(
{
cv.Optional(CONF_ROTATION): validate_rotation,
cv.Optional(CONF_PAGES): cv.All(
cv.ensure_list(
{
cv.GenerateID(): cv.declare_id(DisplayPage),
cv.Required(CONF_LAMBDA): cv.lambda_,
}
),
cv.Length(min=1),
),
cv.Optional(CONF_ON_PAGE_CHANGE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
DisplayOnPageChangeTrigger
),
cv.Optional(CONF_FROM): cv.use_id(DisplayPage),
cv.Optional(CONF_TO): cv.use_id(DisplayPage),
}
),
cv.Optional(CONF_AUTO_CLEAR_ENABLED, default=True): cv.boolean,
}
)
async def setup_display_core_(var, config):
if CONF_ROTATION in config:
cg.add(var.set_rotation(DISPLAY_ROTATIONS[config[CONF_ROTATION]]))
if CONF_AUTO_CLEAR_ENABLED in config:
cg.add(var.set_auto_clear(config[CONF_AUTO_CLEAR_ENABLED]))
if CONF_PAGES in config:
pages = []
for conf in config[CONF_PAGES]:
lambda_ = await cg.process_lambda(
conf[CONF_LAMBDA], [(DisplayBufferRef, "it")], return_type=cg.void
)
page = cg.new_Pvariable(conf[CONF_ID], lambda_)
pages.append(page)
cg.add(var.set_pages(pages))
for conf in config.get(CONF_ON_PAGE_CHANGE, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
if CONF_FROM in conf:
page = await cg.get_variable(conf[CONF_FROM])
cg.add(trigger.set_from(page))
if CONF_TO in conf:
page = await cg.get_variable(conf[CONF_TO])
cg.add(trigger.set_to(page))
await automation.build_automation(
trigger, [(DisplayPagePtr, "from"), (DisplayPagePtr, "to")], conf
)
async def register_display(var, config):
await setup_display_core_(var, config)
@automation.register_action(
"display.page.show",
DisplayPageShowAction,
maybe_simple_id(
{
cv.Required(CONF_ID): cv.templatable(cv.use_id(DisplayPage)),
}
),
)
async def display_page_show_to_code(config, action_id, template_arg, args):
var = cg.new_Pvariable(action_id, template_arg)
if isinstance(config[CONF_ID], core.Lambda):
template_ = await cg.templatable(config[CONF_ID], args, DisplayPagePtr)
cg.add(var.set_page(template_))
else:
paren = await cg.get_variable(config[CONF_ID])
cg.add(var.set_page(paren))
return var
@automation.register_action(
"display.page.show_next",
DisplayPageShowNextAction,
maybe_simple_id(
{
cv.Required(CONF_ID): cv.templatable(cv.use_id(DisplayBuffer)),
}
),
)
async def display_page_show_next_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(action_id, template_arg, paren)
@automation.register_action(
"display.page.show_previous",
DisplayPageShowPrevAction,
maybe_simple_id(
{
cv.Required(CONF_ID): cv.templatable(cv.use_id(DisplayBuffer)),
}
),
)
async def display_page_show_previous_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(action_id, template_arg, paren)
@automation.register_condition(
"display.is_displaying_page",
DisplayIsDisplayingPageCondition,
cv.maybe_simple_value(
{
cv.GenerateID(CONF_ID): cv.use_id(DisplayBuffer),
cv.Required(CONF_PAGE_ID): cv.use_id(DisplayPage),
},
key=CONF_PAGE_ID,
),
)
async def display_is_displaying_page_to_code(config, condition_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
page = await cg.get_variable(config[CONF_PAGE_ID])
var = cg.new_Pvariable(condition_id, template_arg, paren)
cg.add(var.set_page(page))
return var
@coroutine_with_priority(100.0)
async def to_code(config):
cg.add_global(display_ns.using)
|
desktop/core/ext-py/xlwt-1.3.0/tests/utils.py | zhoudahong/hue | 5,079 | 12708402 | import os
from os.path import join, dirname, abspath, isdir
def in_tst_dir(filename):
return join(dirname(abspath(__file__)), filename)
def in_tst_output_dir(filename):
output_dir = join(dirname(abspath(__file__)), 'output')
if not isdir(output_dir):
os.mkdir(output_dir, 0o755)
return join(output_dir, filename) |
dml/NN/neuralNetwork.py | justdark/dml | 708 | 12708470 | '''
the whole structure of dml.NN is just like
DeepLearnToolbox(https://github.com/rasmusbergpalm/DeepLearnToolbox)
I think the whole architecture of it is clear and easy to understand
so I copy it to python
I also recommend UFLDL(http://ufldl.stanford.edu/wiki/index.php/UFLDL_Tutorial)
to learn Neural Network
TODO:SAE,DAE and so on
'''
from __future__ import division
import numpy as np
import scipy as sp
import pylab as py
#from scipy.optimize import minimize
import datetime
from ..tool import sigmoid
from ..tool import normalize,normalize_by_extant
class NNC:
def __init__(self, architecture,option={}):
'''
architecture is a list formed by sevral number indicate the NN shape
for example:[784 200 10] shows a NN for mnist data with
one hidden layer of 200,
input layer of 784
output layer of 10
ps: the bias element is not counted in these number
'''
self.testing=False
self.architecture=np.array(architecture)
self.learningRate=2
self.momentum= 0.5
self.output= 'sigm'
self.activation ='sigm'
self.weightPenaltyL2 = 0
self.nonSparsityPenalty =0
self.sparsityTarget=0.05
self.inputZeroMaskedFraction=0
self.dropoutFraction=0
self.n=self.architecture.size
self.W={}
self.vW={}
self.dW={}
self.p={}
self.a={}
self.d={}
self.dropOutMask={}
for i in range(self.n):
self.a.setdefault(i)
self.d.setdefault(i)
self.p.setdefault(i)
for i in range(self.n-1):
self.W.setdefault(i)
self.vW.setdefault(i)
self.p[i+1] = np.zeros((1,architecture[i+1]))
self.W[i]=((np.random.rand(architecture[i]+1,architecture[i+1])-0.5)*2*4*np.sqrt(6/(architecture[i+1]+architecture[i])));
#print architecture[i+1],architecture[i]
self.vW[i] = np.zeros(self.W[i].shape)
#self.W.append((np.random.rand(architecture[i+1],architecture[i]+1)-0.5)*8/np.sqrt(6/(architecture[i+1]+architecture[i])))
def test(self):
self.nncost(self.W)
pass
def handle_y_4classify(self,hy):
groundTruth=np.zeros((self.architecture[self.n-1],hy.size))
q=np.arange(0,hy.size)
groundTruth[hy.transpose(),q]=1
return groundTruth
def nnbp(self):
n = self.n
sparsityError = 0
if (self.output=='softmax' or self.output=='linear'):
self.d[n-1] = -self.e
elif self.output=='sigm':
self.d[n-1] =-self.e*self.a[n-1]*(1-self.a[n-1])
for i in range(n-2,0,-1):
if (self.activation =='sigm'):
d_act = self.a[i] * (1 - self.a[i])
elif (self.activation =='tanh'):
d_act = 1 - self.a[i]**2
if (self.nonSparsityPenalty > 0):
pi = np.tile(self.p[i], (self.a[i].shape[1], 1))
#print pi,'============'
#print np.zeros((self.a[i].shape[1],1)).shape,pi.shape
sparsityError = np.concatenate((np.zeros((self.a[i].shape[1],1)),self.nonSparsityPenalty * (-self.sparsityTarget / pi + (1 - self.sparsityTarget) / (1 - pi))),axis=1).transpose()
#print self.W[i].shape
#print self.d[i + 1].shape,'sssss'
if i+1==n-1:
self.d[i] = (np.dot(self.W[i],self.d[i + 1] ) + sparsityError) * d_act;
else:
self.d[i] = (np.dot(self.W[i],self.d[i + 1][1:,:]) + sparsityError) * d_act;
if(self.dropoutFraction>0):
#print np.ones((self.d[i].shape[1],1)).shape
#print self.dropOutMask[i].shape
self.d[i] = self.d[i] * np.concatenate((np.ones((self.d[i].shape[1],1)).transpose() , self.dropOutMask[i]),axis=0)
for i in range(n-1):
#print self.a[i].shape
if (i+1==n-1):
self.dW[i] = np.dot(self.a[i],self.d[i + 1].transpose()) / self.d[i + 1].shape[1]
else:
self.dW[i] = np.dot(self.a[i],self.d[i + 1][1:,:].transpose()) /self.d[i + 1].shape[1]
#print self.dW[i].shape,'ssssssssssssssssssss'
def nnff(self,X,y=None):
'''
X is a matrix with shape N*M ,M is the number of train_case,N is the input size 784
y is the labels
W is a dictionary
'''
n = self.n;
M = X.shape[1];
X = np.concatenate((np.ones((1,M)),X),axis=0)
self.a[0]=X;
for i in range(n-1):
if i==0:
continue
#print self.a[i-1].shape
#print np.dot(self.W[i-1].transpose(),self.a[i-1])
if (self.activation =='sigm'):
self.a[i]=sigmoid(np.dot(self.W[i-1].transpose(),self.a[i-1]))
elif (self.activation=='tanh'):
self.a[i]=np.tanh(np.dot(self.W[i-1].transpose(),self.a[i-1]))
if (self.dropoutFraction>0):
if (self.testing):
self.a[i]=self.a[i]*(1-self.dropoutFraction)
else:
self.dropOutMask.setdefault(i)
self.dropOutMask[i]=(np.random.rand(self.a[i].shape[0],self.a[i].shape[1])>self.dropoutFraction)
self.a[i] = self.a[i]*self.dropOutMask[i];
if (self.nonSparsityPenalty>0):
self.p[i] = 0.8 * self.p[i] + 0.2*np.mean(self.a[i],axis=1);
#self.p[i] =np.mean(self.a[i],axis=1)
self.a[i]=np.concatenate((np.ones((1,M)),self.a[i]),axis=0)
#print self.a[n-1]
#set the output
#settle softmax
#print self.W[n - 2].transpose()
if (self.output=='softmax'):
self.a[n-1] = np.dot(self.W[n - 2].transpose(),self.a[n - 2])
self.a[n-1]=np.exp(self.a[n-1]-self.a[n-1].max())
self.a[n-1]=np.true_divide(self.a[n-1],np.sum(self.a[n-1],0))
elif (self.output=='linear'):
self.a[n-1] = np.dot(self.W[n - 2].transpose(),self.a[n - 2])
elif (self.output=='sigm'):
self.a[n-1] = sigmoid(np.dot(self.W[n - 2].transpose(),self.a[n - 2]))
if (y!=None):
self.e= y-self.a[n-1]
if (self.output=='sigm' or self.output=='linear'):
self.L = 1/2*(self.e**2).sum() / M;
elif (self.output=='softmax'):
self.L = (-y*np.log(self.a[n-1])).sum() / M;
#print self.L
def train(self,train_X,train_y,opts):
'''
train_X is a matrix with shape N*M ,M is the number of train_case,N is the input size,eg: 784 in MNIST data
'''
m = train_X.shape[1]
batchsize = opts['batchsize']
numepochs = opts['numepochs']
numbatches = int(m / batchsize)
kk=np.random.permutation(m)
for i in range(numepochs):
starttime = datetime.datetime.now()
print 'the ',i,' th epochs is running'
for j in range(numbatches):
batch_x=train_X[:,kk[(j)*batchsize:(j+1)*batchsize]].copy();
if(self.inputZeroMaskedFraction != 0):
batch_x = batch_x*(np.random.rand(batch_x.shape[0],batch_x.shape[1])>self.inputZeroMaskedFraction)
batch_y=train_y[:,kk[(j)*batchsize:(j+1)*batchsize]].copy()
self.nnff(batch_x, batch_y);
self.nnbp();
self.nnapplygrads();
endtime = datetime.datetime.now()
print 'cost ',(endtime - starttime).seconds,'seconds\n'
def nnapplygrads(self):
for i in range(self.n-1):
if(self.weightPenaltyL2>0):
#print np.zeros((self.W[i].shape[1],1)).shape,self.W[i][1:,:].shape
#print self.W[i]
#print self.dW[i].shape,np.concatenate((np.zeros((self.W[i].shape[1],1)).transpose(),self.W[i][1:,:]),axis=0).shape
##print self.W[i]
#dsaaaaaaaaaaaaaaaaa
dW = self.dW[i]+self.weightPenaltyL2*self.W[i]
#dW = self.dW[i] + self.weightPenaltyL2 *np.concatenate((np.zeros((self.W[i].shape[1],1)).transpose(),self.W[i][1:,:]),axis=0)
else:
dW = self.dW[i];
dW = self.learningRate * dW;
if(self.momentum>0):
self.vW[i] = self.momentum*self.vW[i] + dW;
dW = self.vW[i];
self.W[i] = self.W[i] - dW;
pass
def nnpred(self,test_X):
self.testing=True;
nn = self.nnff(test_X, np.zeros((test_X.shape[1], self.architecture[self.n-1])).transpose());
self.testing=False;
print self.a[self.n-1].shape
return self.a[self.n-1].argmax(axis=0)
|
components/iscesys/Parsers/rdf/language/grammar/syntax.py | vincentschut/isce2 | 1,133 | 12708507 | <reponame>vincentschut/isce2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""syntax handles syntax via tha Grammar class. It handles syntax
but farms out some work to cooperative classes"""
## \namespace rdf.language.grammar.syntax Syntax glues it all together
from __future__ import absolute_import
import itertools
import sys
from .. import errors
from . import punctuation, morpheme
from iscesys.Parsers.rdf.reserved import glyphs, words
from iscesys.Parsers.rdf.language.lexis import semantics, pragmatics
## Metaclass for Grammar gets defines the pragamatics and semantics at
## load-time" pragmatics.Verb instances are assigned according to the
## rdf.reserved.words.KEYWORDS, and the symantics.Noun instances are created-
## these are needed by Grammar.process
class metagrammar(type):
"""metagrammar meta class deal with the keywords defined in
verbs.
"""
## Create class and add pragmatics and semantics
def __new__(mcs, *args, **kwargs):
cls = type.__new__(mcs, *args, **kwargs)
_prags = []
## Instaniate Verbs for the Grammar's command interpretation
for p_cls, w_const in zip(pragmatics.VERBS, words.KEYWORDS):
_prags.append(p_cls(w_const))
setattr(cls, w_const, _prags[-1])
# note: metaclasses can access protect members of their instances...
cls._VERBS = tuple(_prags)
## Set up Noun instances by instantiaing NOUNS's classes
cls._NOUNS = ()
for x in semantics.NOUNS:
cls._NOUNS += (x(),)
# cls._NOUNS = tuple(map(apply, semantics.NOUNS))
return cls
## Grammar is the state of the grammar -it is simply the most important \n
## class there is-- though it does cooperate and leave details to its \n
## clients.
class Grammar(object, metaclass=metagrammar):
"""Grammar() is the state of the grammar. See __init__ for why
it supports only nullary instantiation.
ALL_CAP class attributes a Pragamatic (i.e. meta) words.
_lower_case private instance attributes are punctuation Glyphs
lower_case mutator methods ensure glyphs setting is kosher.
Capitalized class attributes are default valules for the lower_case version.
Overloads:
---------
Function Emulation
line --> __call__(line) ---> RDFRecord #That is, grammar is a (semipure)
function that makes lines into
RDFRecords.
(meta)line-> __call__(line)---> None # Pragamatic (KeyWord) lines
return None (they aren't rdf
records) but they change the
internal state of 'self'. Hence
grammar is an impure function.
other --> __call__(line)---> None # Comments do nothing, Errors are
identified, reported to stderr
and forgotten.
Integer:
int(grammar) returns the depth-- which is a non-negative integer telling
how deep the processor is in the include file tree
(IFT) Should not pass sys.getrecursionlimit().
grammar += 1 There are called when the deepth_processor goes up or
grammar -= 1 down the IFT. The change int(grammar) and manage the
affixes.
"""
## wrap tell read how to unwrap lines-- it's just a str
wrap = glyphs.WRAP
## sep is not used -yet, it would appear in RDF._eval at some point.
sep = glyphs.SEPARATOR
## The operator symbol (default) -capitalized to avoid class with property
Operator = glyphs.OPERATOR
## The comment symbol (default) -capitalized to avoid class with property
Comment = glyphs.COMMENT
## Static default prefix
Prefix = [""]
## Static default suffix
Suffix = [""]
## VERY IMPORTANT: Grammar() creates the DEFAULT RDF grammar \n
## Y'all can't change it, only RDF inputs can...
def __init__(self):
"""Nullary instaniation: you cannot inject dependcies (DI)
in the constructor. You allways start with the default grammar-
which is defined in static class attributes.
Only rdf Pragamatics (i.e commands or key words) can change the
grammar -- infact, the attributes enscapulated in mutators.
"""
## The recursion depth from which the rdf lines are coming.
self.depth = 0
## The dynamic self-aware operator punctuation.Glyph \n
self.operator = self.__class__.Operator
## The dynamic self-aware comment punctuation.Glyph
self.comment = self.__class__.Comment
## Dynamic prefix is a copy of a list -and depends on depth
self.prefix = self.__class__.Prefix[:]
## Dynamic suffixx is a copy of a list -and depends on depth
self.suffix = self.__class__.Suffix[:]
## Getter
@property
def operator(self):
return self._operator
## operator has mutators to ensure it is an
## rdf.language.punctuation.Glyph object
@operator.setter
def operator(self, value):
if not value: raise errors.NullCommandError
# symbol is converted to a glyph.
self._operator = punctuation.Glyph(value)
## Getter
@property
def comment(self):
return self._comment
## comment has mutators to ensure it is a
## rdf.language.punctuation.Glyph object
@comment.setter
def comment(self, value):
if not value: raise errors.NullCommandError
self._comment = punctuation.Glyph(value)
## Getter
@property
def prefix(self):
return self._prefix
## Ensure Grammar._prefix is an rdf.language.morpheme.Prefix
@prefix.setter
def prefix(self, value):
self._prefix = morpheme.Prefix(value)
## Getter
@property
def suffix(self):
return self._suffix
## Ensure Grammar._suffix is an rdf.language.morpheme.Suffix
@suffix.setter
def suffix(self, value):
self._suffix = morpheme.Suffix(value)
## str refects the current grammar state
def __str__(self):
return ( str(self.depth) + " " +
self.operator + " " +
self.comment + " " + str(self.prefix) + str(self.suffix) )
## int() --> depth
def __int__(self):
return self.depth
## += --> change depth and append affixes w/ morpheme.Affix.descend \n
## (which knows how to do it)
## \param n +1 or ValueError
## \par Side Effects:
## Affix.desend()
## \retval self self, changed
def __iadd__(self, n):
if n != 1: raise ValueError("Can only add +1")
self.depth += int(n)
self.prefix.descend()
self.suffix.descend()
return self
## += --> change depth and truncate affixes w/ morpheme.Affix.ascend
## (b/c grammar just implements it)
## \param n +1 or ValueEr`ror
## \par Side Effects:
## Affix.ascend()
## \retval self self, changed
def __isub__(self, n):
if n != 1: raise ValueError("Can only subtract +1")
self.depth -= int(n)
self.prefix.ascend()
self.suffix.ascend()
return self
## Grammar(line) --> rdf.data.entries.RDFRecord \n
## It's the money method-- not it's not a pure function- it can
## change the state of grammar.
def __call__(self, line):
"""grammar(line) --> grammar.process(line) (with error catching)"""
if isinstance(line, str): # Guard (why?)
try:
result = self.process(line)
except errors.RDFWarning as err:
print >>sys.stderr, repr(err) + "::" + line
result = []
else:
result = result
else:
raise TypeError("Grammar processes strings, not %s" %
line.__class__.__name__)
return result
## Process the line a Verb or a Line
## \param line rdf sentence
## \par Side Effects:
## word might change self
## \retval word(line,self) full rdf processed line
def process(self, line):
"""process checks lines agains _PRAGAMTICS and _NOUNS
in a short-circuit for loop. The 1st hit leads to
processing.
"""
# check for verbs, and then nouns-- and the ask them to do their thing
# order matters here- alot.
for word in itertools.chain(self._VERBS, self._NOUNS):
if word.line_is(line, self):
return word(line, self)
## Get value of a line (any line really)
def get_value(self, line):
"""get value of a Pragamtic"""
return self.operator.right(self.comment.left(line))
## Add affixes--note: Grammar just adds, the overloaded __add__ and
## __radd__ invoke the affix protocol.
def affix(self, key):
return self.prefix + key + self.suffix
|
vislab/datasets/behance.py | hertzmann/vislab | 113 | 12708544 | <gh_stars>100-1000
"""
Code to call the Behance API to construct a dataset.
"""
#import os
import sys
import requests
import bs4
import pandas as pd
#import numpy as np
import random
import vislab
tags = ['photo','blue','car','chinese ink','colored pencil','colors','comic',
'fashion illustration','graphics','infographic','landscape','vector',
'watercolor']
testURL = 'http://www.behance.net//gallery/Icons/1140561'
projectNum = 1140561
def get_image_url_for_photo_id(id):
df = get_photo_df()
return df.ix[id]['imageURL']
def get_image_url_for_illustration_id(id):
df = get_illustration_df()
return df.ix[id]['image_url']
def get_photo_df():
df = pd.read_csv(
vislab.config['behance_style_repo'] + '/data/behanceImages.csv')
df = df[df.label == 'photo']
df = df[df['imageURL'] != 'http://a2.behance.net/img/site/grey.png']
df.index = ['behance_photo_{}'.format(x) for x in df.index]
return df
def get_illustration_df():
"""
This DataFame was assembled in the notebooks load_data and processing
in the ADobe-private behance_style repo.
"""
df = pd.read_csv(
vislab.config['behance_style_repo'] + '/data/10k_illustrations_20_tags_3_images.csv',
index_col=0)
return df
def get_basic_dataset(force=False):
"""
Return DataFrame of image_id -> page_url, artist_slug, artwork_slug.
"""
filename = vislab.config['paths']['shared_data'] + \
'/wikipaintings_basic_info.h5'
df = vislab.util.load_or_generate_df(filename, fetch_basic_dataset, force)
return df
def _getSmallest(imageModule):
if not imageModule.has_key('sizes'):
return imageModule['src']
sizeList = imageModule['sizes']
knownSizes = ['max_1240','max_1920','original']
for s in knownSizes:
if sizeList.has_key(s):
return sizeList[s]
print(sizeList)
raise Exception
def fetch_single_project_image_URLs_via_API(projectNum):
query = 'http://www.behance.net/v2/projects/'+str(projectNum)+'?api_key='+vislab.config['behanceAPIkey']
# print('fetching project %d, query: %s'%(projectNum,query))
r = requests.get(query)
projectInfo = r.json()['project']
imageData = filter(lambda x:x['type'] == 'image', projectInfo['modules'])
return map(lambda x:_getSmallest(x), imageData)
def fetch_single_project_image_URLs_via_scraping(page_url):
r = requests.get(page_url)
soup = bs4.BeautifulSoup(r.text)
all_imgs = []
for li in soup.select('li.module.image'):
all_imgs += [img.attrs['src'] for img in li.find_all('img')]
return all_imgs
# set maximums to -1 in order to not have a maximum
def fetch_basic_dataset(maxRequests = 10, maxImagesPerProject=2, useAPI=True):
"""
Fetch basic info and page urls from a collection of projects.
Results are returned as a DataFrame.
"""
print("Fetching Behance dataset.")
projectList = pd.DataFrame.from_csv('behanceProjects.csv',header=-1)
APIkey = vislab.config['behanceAPIkey']
numRequests = 0
random.seed(0) # fix the seed so we get the same results each time
imageData = []
for index,row in projectList.iterrows():
if numRequests % 10 == 0:
sys.stdout.write('Fetching project %d / %d \r'%(numRequests,len(projectList.index)))
sys.stdout.flush()
projectNum = row.name
URL = row[1]
label = row[2]
if useAPI:
imageURLs = fetch_single_project_image_URLs_via_API(projectNum)
else:
imageURLs = fetch_single_project_image_URLs_via_scraping(URL)
if len(imageURLs) <= maxImagesPerProject or maxImagesPerProject <= 0:
pickedImageURLs = imageURLs
else:
pickedImageURLs = random.sample(imageURLs,maxImagesPerProject)
for u in pickedImageURLs:
imageData.append({'projectNum':projectNum,'projectURL':URL,'label':label,'imageURL':u})
numRequests = numRequests + 1
if maxRequests > 0 and numRequests>=maxRequests:
break
df = pd.DataFrame(imageData)
return df
if __name__ == '__main__':
"""
Run the scraping with a number of workers taking jobs from a queue.
"""
df = fetch_basic_dataset(maxRequests = -1, maxImagesPerProject=-1, useAPI=False)
df.to_csv('behanceImages.csv')
print(df)
|
susi_linux/ui/animators.py | swipswaps/susi_linux | 2,191 | 12708628 | import math
import cairo
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib # nopep8
class Animator(Gtk.DrawingArea):
def __init__(self, **properties):
super().__init__(**properties)
self.set_size_request(200, 80)
self.connect("draw", self.do_drawing)
GLib.timeout_add(50, self.tick)
def tick(self):
self.queue_draw()
return True
def do_drawing(self, widget, cr):
self.draw(cr, self.get_allocated_width(), self.get_allocated_height())
def draw(self, ctx, width, height):
pass
class ListeningAnimator(Animator):
def __init__(self, window, **properties):
super().__init__(**properties)
self.window = window
self.tc = 0
def draw(self, ctx, width, height):
self.tc += 0.2
self.tc %= 2 * math.pi
for i in range(-4, 5):
ctx.set_source_rgb(0.2, 0.5, 1)
ctx.set_line_width(6)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
if i % 2 == 0:
ctx.move_to(width / 2 + i * 10, height / 2 + 3 - 8 * math.sin(self.tc + i))
ctx.line_to(width / 2 + i * 10, height / 2 - 3 + 8 * math.sin(self.tc + i))
else:
ctx.set_source_rgb(0.2, 0.7, 1)
ctx.move_to(width / 2 + i * 10, height / 2 + 3 - 8 * math.cos(self.tc - i))
ctx.line_to(width / 2 + i * 10, height / 2 - 3 + 8 * math.cos(self.tc - i))
ctx.stroke()
class ThinkingAnimator(Animator):
def __init__(self, window, **properties):
super().__init__(**properties)
self.window = window
self.rot = 0
self.x, self.y = 0, 0
self.rad = 20
def draw(self, ctx, width, height):
self.x, self.y = width / 2, height / 2
self.rot += 0.2
self.rot %= 2 * math.pi
for i in range(-2, 2):
ctx.set_source_rgb(0.2, 0.7, 1)
ctx.arc(
self.x + i * 20,
self.y,
8 * math.cos(self.rot - i / 2),
0,
2 * math.pi)
ctx.fill()
|
neural_compressor/experimental/nas/nas.py | intel/neural-compressor | 172 | 12708680 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import shutil
from collections.abc import Iterable
from .nas_utils import find_pareto_front, NASMethods
from .search_algorithms import BayesianOptimizationSearcher, GridSearcher, RandomSearcher
from neural_compressor.conf.config import Conf
from neural_compressor.utils.utility import logger, LazyImport
torch = LazyImport('torch')
class NAS(object):
def __new__(self, conf_fname, *args, **kwargs):
if isinstance(conf_fname, str):
if os.path.isfile(conf_fname):
self.conf = Conf(conf_fname).usr_cfg
else: # pragma: no cover
raise NotImplementedError(
"Please provide a str path to the config file."
)
assert self.conf.nas is not None, "nas section must be set"
if isinstance(self.conf.nas.approach, str) and \
self.conf.nas.approach.lower() in NASMethods:
method = self.conf.nas.approach.lower()
else:
logger.warning(
"NAS approach not set in config, use default NAS approach, i.e. Basic."
)
method = 'basic'
return NASMethods[method](conf_fname, *args, **kwargs)
class NASBase(object):
"""
Args:
search_space (dict): A dictionary for defining the search space.
model_builder (function obj): A function to build model instance with the specified
model architecture parameters.
"""
def __init__(self, search_space=None, model_builder=None):
super().__init__()
self._search_space = search_space
self._model_builder = model_builder
self._search_algorithm = None
self.search_results = {}
self.best_model_archs = None
self.seed = None
def select_model_arch(self):
"""Propose architecture of the model based on search algorithm for next search iteration.
Returns:
Model architecture description.
"""
model_arch_paras = self._search_algorithm.suggest()
assert self.search_space_keys and isinstance(model_arch_paras, dict) and \
self.search_space_keys == list(model_arch_paras.keys()), \
"Keys of model_arch_paras should be the same with search_space_keys."
return model_arch_paras
def search(self, res_save_path=None):
"""NAS search process.
Returns:
Best model architecture found in search process.
"""
assert self.model_builder is not None, \
"Must specify model_builder for generating model instance by model architecture."
if res_save_path is None or not os.path.isdir(res_save_path):
res_save_path = os.getcwd()
save_path = os.path.join(res_save_path, 'NASResults')
self.model_paras_num = {}
self.load_search_results(save_path)
os.makedirs(save_path, exist_ok=True)
for i in range(self.max_trials):
logger.info(
"{fix} Trial {n} starts, {r} trials to go {fix}".format(
n=i+1, r=self.max_trials-i-1, fix="="*30
)
)
model_arch_paras = self.select_model_arch()
logger.info("Model architecture {} proposed.".format(model_arch_paras))
model = self._model_builder(model_arch_paras)
model_paras = self.count_model_parameters(model)
logger.info(
"***** Number of model parameters: {:.2f}M *****".format(model_paras / 10**6)
)
self.model_paras_num[tuple(model_arch_paras.values())] = model_paras
if tuple(model_arch_paras.values()) in self.search_results:
logger.info("Skip evaluated model architecture {}.".format(model_arch_paras))
continue
if tuple(model_arch_paras.values()) in self.resumed_search_results:
logger.info(
"Find previous results of model architecture: {}.".format(model_arch_paras)
)
metrics = self.resumed_search_results[tuple(model_arch_paras.values())]
else:
logger.info("Assessing model architecture: {}.".format(model_arch_paras))
metrics = self.estimate(model)
logger.info(
"Metrics of model architecture {} is {}.".format(model_arch_paras, metrics)
)
self.search_results[tuple(model_arch_paras.values())] = metrics
self._search_algorithm.get_feedback(sum(self.metrics_conversion(metrics)))
self.dump_search_results(
os.path.join(save_path, 'Trial_{}_results.txt'.format(i+1))
)
for model_arch_vec in self.resumed_search_results:
if model_arch_vec not in self.search_results:
self.search_results[model_arch_vec] = \
self.resumed_search_results[model_arch_vec]
model = self._model_builder(self.params_vec2params_dict(model_arch_vec))
self.model_paras_num[model_arch_vec] = self.count_model_parameters(model)
self.dump_search_results(os.path.join(save_path, 'Final_results.txt'.format(i+1)))
self.find_best_model_archs()
logger.info(
"{fix} Found {n} best model architectures {fix}".format(
n=len(self.best_model_archs), fix="="*30
)
)
for i, model_arch in enumerate(self.best_model_archs):
logger.info("Best model architecture {}: {}".format(i+1, model_arch))
return self.best_model_archs
def estimate(self, model): # pragma: no cover
"""Estimate performance of the model. Depends on specific NAS algorithm.
Returns:
Evaluated metrics of the model.
"""
raise NotImplementedError("Depends on specific NAS algorithm.")
def count_model_parameters(self, model):
if isinstance(model, torch.nn.Module):
return sum(p.numel() for p in model.parameters())
else:
raise NotImplementedError("Only support torch model now.") # pragma: no cover
def load_search_results(self, path):
self.resumed_search_results = {}
lastest_results_record = os.path.join(path, 'lastest_results.npy')
if not os.path.exists(path) or not os.path.exists(lastest_results_record):
return
self.resumed_search_results = np.load(lastest_results_record, allow_pickle=True).item()
os.makedirs(os.path.join(path, 'previous_results'), exist_ok=True)
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)):
shutil.move(os.path.join(path, f), os.path.join(path, 'previous_results', f))
logger.info("Loaded previous results.")
def dump_search_results(self, path):
lastest_results_record = os.path.join(os.path.dirname(path), 'lastest_results.npy')
np.save(lastest_results_record, self.search_results, allow_pickle=True)
write_contents = '=' * 30 + ' All Search Results ' + '=' * 30 + '\n\n'
for model_arch_vec in self.search_results:
tmp = ','.join(['{}_{}'.format(k, v) \
for k, v in zip(self.search_space_keys, model_arch_vec)])
write_contents += '{}: {} Paras: {}M\n'.format(
tmp, self.search_results[model_arch_vec],
self.model_paras_num[model_arch_vec] / 10**6
)
write_contents += '\n\n\n' + '=' * 30 + ' Best Search Results ' + '=' * 30 + '\n\n'
self.find_best_model_archs()
for i, model_arch in enumerate(self.best_model_archs):
model_arch_vec = tuple(model_arch.values())
tmp = ','.join(['{}_{}'.format(k, v) \
for k, v in zip(self.search_space_keys, model_arch_vec)])
write_contents += \
'{}. {}: {} Paras: {}M\n'.format(
i+1, tmp, self.search_results[model_arch_vec],
self.model_paras_num[model_arch_vec] / 10**6
)
with open(path, mode='w') as f:
f.write(write_contents)
def params_vec2params_dict(self, paras_vec):
assert len(paras_vec) == len(self.search_space_keys), \
"Length of paras_vec and search_space_keys should be the same."
return {k:v for k, v in zip(self.search_space_keys, paras_vec)}
def find_best_model_archs(self):
assert len(self.search_results) > 0, "Zero result in search_results."
model_arches = list(self.search_results.keys())
metrics = [self.metrics_conversion(self.search_results[ma]) for ma in model_arches]
pareto_front_indices = find_pareto_front(metrics)
self.best_model_archs = [self.params_vec2params_dict(model_arches[i]) \
for i in pareto_front_indices]
def metrics_conversion(self, metrics):
if not isinstance(metrics, Iterable):
metrics = [metrics]
if isinstance(metrics, dict):
if self.metrics is None:
self.metrics = list(metrics.keys())
assert list(metrics.keys()) == list(self.metrics), \
"Keys of metrics not match with metrics in the configuration."
metrics = list(metrics.values())
if self.higher_is_better is None:
self.higher_is_better = [True,] * len(metrics)
logger.warning("higher_is_better not set in the configuration, " + \
"set it to all True for every metric entry by default.")
converted_metrics = [metric if higher_is_better else -metric \
for metric, higher_is_better in zip(metrics, self.higher_is_better)]
return converted_metrics
def init_search_cfg(self, config):
self.search_cfg = config.search
if not self._search_space:
self._search_space = self.search_cfg.search_space
else:
logger.warning(
"Use user provided search space {}, instead of search space "
"defined in the config, i.e. {}.".format(
self._search_space, self.search_cfg.search_space
)
)
assert isinstance(self._search_space, dict) and len(self._search_space) > 0, \
"Must provide a dict as search_space for NAS."
self.search_space_keys = sorted(self.search_space.keys())
for k in self.search_space_keys:
assert isinstance(self.search_space[k], (list, tuple)), \
"Value of key \'{}\' must be a list or tuple".format(k)
self.metrics = self.search_cfg.metrics \
if self.search_cfg.metrics else None
self.higher_is_better = self.search_cfg.higher_is_better \
if self.search_cfg.higher_is_better else None
self.seed = self.search_cfg.seed
self.max_trials = self.search_cfg.max_trials \
if self.search_cfg.max_trials is not None else 3 # set default 3 for max_trials
self.search_algorithm_type = self.search_cfg.search_algorithm \
if self.search_cfg.search_algorithm else None
if not self.search_algorithm_type:
self._search_algorithm = BayesianOptimizationSearcher(self.search_space, self.seed)
elif self.search_algorithm_type.lower() == 'grid':
self._search_algorithm = GridSearcher(self.search_space)
elif self.search_algorithm_type.lower() == 'random':
self._search_algorithm = RandomSearcher(self.search_space, self.seed)
elif self.search_algorithm_type.lower() == 'bo':
self._search_algorithm = BayesianOptimizationSearcher(self.search_space, self.seed)
else: # pragma: no cover
raise NotImplementedError(
'Unsupported \'{}\' search algorithm'.format(self.search_algorithm_type)
)
@property
def search_space(self):
return self._search_space
@search_space.setter
def search_space(self, search_space):
self._search_space = search_space
@property
def search_algorithm(self):
return self._search_algorithm
@search_algorithm.setter
def search_algorithm(self, search_algorithm):
self._search_algorithm = search_algorithm
@property
def model_builder(self):
return self._model_builder
@model_builder.setter
def model_builder(self, model_builder):
self._model_builder = model_builder
def __repr__(self):
return 'Base Class of NAS' # pragma: no cover |
scripts/build_single_hop_qa_data.py | luxuantao/golden-retriever | 195 | 12708681 | <filename>scripts/build_single_hop_qa_data.py
from collections import Counter
from copy import copy
import json
from tqdm import tqdm
from search.search import bulk_text_query
from utils.general import chunks
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('split', choices=['train', 'dev'])
args = parser.parse_args()
if args.split == 'train':
filename = 'data/hotpotqa/hotpot_train_v1.1.json'
outputname = 'data/hotpotqa/hotpot_train_single_hop.json'
else:
filename = 'data/hotpotqa/hotpot_dev_fullwiki_v1.json'
outputname = 'data/hotpotqa/hotpot_dev_single_hop.json'
batch_size = 64
with open(filename) as f:
data = json.load(f)
outputdata = []
processed = 0
for batch in tqdm(chunks(data, batch_size), total=(len(data) + batch_size - 1) // batch_size):
queries = [x['question'] for x in batch]
res = bulk_text_query(queries, topn=10, lazy=False)
for r, d in zip(res, batch):
d1 = copy(d)
context = [item['data_object'] for item in r]
context = [(x['title'], x['text']) for x in context]
d1['context'] = context
outputdata.append(d1)
processed += len(batch)
with open(outputname, 'w') as f:
json.dump(outputdata, f)
if __name__ == "__main__":
main()
|
challenges/Single-Sign-On/poller/for-release/machine.py | pingjuiliao/cb-multios | 473 | 12708723 | <filename>challenges/Single-Sign-On/poller/for-release/machine.py
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
import string
import random
import itertools
import math
from struct import *
from collections import Counter
class TemplateGenerator(Actions):
def start(self):
self.token = pack('l', 4)
self.resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
self.users = []
self.commands = ['REQR', 'LOUT', 'REVR', 'REVU']
def command_no_user_failure(self):
command = random.choice(self.commands)
user_id = random.randint(1,100000)
while Counter(elem[0][0] for elem in self.users)[user_id] > 0:
user_id = random.randint(1,100000)
user_id_str = pack('l', user_id)
self.write(command + user_id_str + self.token + self.resource + '\x07')
self.read(delim="\n", expect = "Command failed.\n")
def request_no_resource_failure(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
while Counter(elem[0] for elem in user[1])[resource] > 0:
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
self.write("REQR" + user_id_str + self.token + resource + '\x07')
self.read(delim="\n", expect = "Please re-authenticate.\n")
def request_resource_wrong_token_failure(self):
auth_val = Variable('authval')
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
self.write("REQR" + user_id_str + self.token + resource[0] + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource[0]
auth_val.set_re(regex_str, group=1)
self.read(delim="\n", assign=auth_val)
self.read(delim="\n", expect = "Please re-authenticate.\n")
def request_success(self):
my_token = Variable('token')
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
self.write("REQR" + user_id_str)
self.write(resource[1])
self.write(resource[0] + '\x07')
regex_str = 'Access to %s is granted!(.*)\x0a' % resource[0]
my_token.set_re(regex_str, group=1)
self.read(delim="\n", assign=my_token)
def auth_failure_new_user(self):
user_id = random.randint(2,100000)
while Counter(elem[0][0] for elem in self.users)[user_id] > 0:
user_id = random.randint(2,100000)
user_id_str = pack('l', user_id)
varname = "user"+str(user_id)
variable = Variable(varname)
user_item = [user_id, variable]
self.users.append([user_item, []])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
self.write("AUTH" + user_id_str + self.token + resource + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def auth_failure_current_user(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
variable = user[0][1]
self.write("AUTH" + user_id_str + self.token + resource + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def auth_success_with_auth_val(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9))
varname = resource+str(user[0][0])
variable = Variable(varname)
resource_item = [resource, variable]
user[1].append(resource_item)
self.write("AUTH" + user_id_str)
self.write(user[0][1])
self.write(resource + '\x07')
regex_str = 'Access to %s is granted!(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def revoke_resource_success(self):
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
user[1].remove(resource)
self.write("REVR" + user_id_str)
self.write(resource[1])
self.write(resource[0] + '\x07')
self.read(delim="\n", expect="Resource revoked.\n")
def revoke_resource_failure(self):
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
token = self.token
self.write("REVR" + user_id_str)
self.write(token)
self.write(resource[0] + '\x07')
self.read(delim="\n", expect="Revocation denied.\n")
def logout(self):
if not self.users:
return self.quit()
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
self.users.remove(user)
self.write("LOUT" + user_id_str + self.token + self.resource + '\x07')
self.read(delim="\n", expect="Logged out successfully.\n")
def quit(self):
return -1
|
Python/example.py | tsadakane/TIGRE | 326 | 12708773 | <gh_stars>100-1000
from __future__ import division
from __future__ import print_function
import numpy as np
import tigre
import tigre.algorithms as algs
from tigre.utilities import sample_loader
from tigre.utilities.Measure_Quality import Measure_Quality
import tigre.utilities.gpu as gpu
import matplotlib.pyplot as plt
### This is just a basic example of very few TIGRE functionallity.
# We hihgly recomend checking the Demos folder, where most if not all features of tigre are demoed.
listGpuNames = gpu.getGpuNames()
if len(listGpuNames) == 0:
print("Error: No gpu found")
else:
for id in range(len(listGpuNames)):
print("{}: {}".format(id, listGpuNames[id]))
gpuids = gpu.getGpuIds(listGpuNames[0])
print(gpuids)
# Geometry
# geo1 = tigre.geometry(mode='cone', high_resolution=False, default=True)
geo = tigre.geometry(mode="cone", nVoxel=np.array([256, 256, 256]), default=True)
geo.dDetector = np.array([0.8, 0.8]) * 2 # size of each pixel (mm)
geo.sDetector = geo.dDetector * geo.nDetector
# print(geo)
nangles = 100
angles = np.linspace(0, 2 * np.pi, nangles, endpoint=False, dtype=np.float32)
# Prepare projection data
head = sample_loader.load_head_phantom(geo.nVoxel)
proj = tigre.Ax(head, geo, angles, gpuids=gpuids)
# Reconstruct
niter = 20
fdkout = algs.fdk(proj, geo, angles, gpuids=gpuids)
ossart = algs.ossart(proj, geo, angles, niter, blocksize=20, gpuids=gpuids)
# Measure Quality
# 'RMSE', 'MSSIM', 'SSD', 'UQI'
print("RMSE fdk:")
print(Measure_Quality(fdkout, head, ["nRMSE"]))
print("RMSE ossart")
print(Measure_Quality(ossart, head, ["nRMSE"]))
# Plot
fig, axes = plt.subplots(3, 2)
axes[0, 0].set_title("FDK")
axes[0, 0].imshow(fdkout[geo.nVoxel[0] // 2])
axes[1, 0].imshow(fdkout[:, geo.nVoxel[1] // 2, :])
axes[2, 0].imshow(fdkout[:, :, geo.nVoxel[2] // 2])
axes[0, 1].set_title("OS-SART")
axes[0, 1].imshow(ossart[geo.nVoxel[0] // 2])
axes[1, 1].imshow(ossart[:, geo.nVoxel[1] // 2, :])
axes[2, 1].imshow(ossart[:, :, geo.nVoxel[2] // 2])
plt.show()
# tigre.plotProj(proj)
# tigre.plotImg(fdkout)
|
test/common.py | pencc/bfs | 3,106 | 12708785 | <reponame>pencc/bfs<gh_stars>1000+
"""
Copyright (c) 2016, Baidu.com, Inc. All Rights Reserved
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
import subprocess
import filecmp
import os
import time
import nose.tools
import json
import commands
import string
from conf import const
def check_core():
ret = 0
(ret1, out1, err1) = runcmd("find %s -name \"core.*\" | grep \"core.*\"" % (const.bfs_nameserver_dir))
if(ret1 == 0):
runcmd("mkdir -p ./coresave/nameserver && find %s -name \"core.*\" | xargs -i mv {} ./coresave/nameserver" % (const.bfs_nameserver_dir))
ret = 1
(ret2, out2, err2) = runcmd("find %s -name \"core.*\" | grep \"core.*\"" % (const.bfs_chunkserver_dir))
if(ret2 == 0):
runcmd("mkdir -p ./coresave/chunkserver && find %s -name \"core.*\" | xargs -i mv {} ./coresave/chunkserver" % (const.bfs_chunkserver_dir))
ret = 1
(ret3, out3, err3) = runcmd("find %s -name \"core.*\" -maxdepth 1 | grep \"core.*\"" % (const.bfs_client_dir))
if(ret3 == 0):
runcmd("mkdir -p ./coresave/client && find %s -name \"core.*\" -maxdepth 1 | xargs -i mv {} ./coresave/client" % (const.bfs_client_dir))
ret = 1
return ret
def runcmd(cmd):
"""
run cmd and return ret,out,err
"""
print time.strftime("%Y%m%d-%H%M%S") + " command: "+cmd
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
(out, err)=p.communicate()
print "stdout: ", out
print "stderr: ", err
print "returncode: %d" % p.returncode
ret = p.returncode
return ret, out, err
def bfs_deploy():
print "deploy bfs"
ret = subprocess.Popen(const.deploy_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
print ''.join(ret.stdout.readlines())
print ''.join(ret.stderr.readlines())
def bfs_start():
print "start bfs"
ret = subprocess.Popen(const.start_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
print ''.join(ret.stdout.readlines())
print ''.join(ret.stderr.readlines())
def bfs_clear():
ret = subprocess.Popen(const.clear_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
print ''.join(ret.stdout.readlines())
print ''.join(ret.stderr.readlines())
def bfs_stop_all():
print "stop bfs"
(ret, out, err) = runcmd('killall -9 nameserver')
(ret, out, err) = runcmd('killall -9 chunkserver')
(ret, out, err) = runcmd('killall -9 bfs_client')
def check_process(type = all):
ret = 0
if type == "all" or type == "nameserver" :
output = commands.getoutput("find %s -name pid | xargs -i cat {}" % const.bfs_nameserver_dir)
pid_array = output.split('\n')
for i in range(len(pid_array)):
pid = pid_array[i]
ret1 = os.system('ps x | grep %s | grep -v "grep"' % pid)
if ret1 != 0:
print "nameserver process %s not exist " % pid
ret = 1
if type == "all" or type == "chunkserver" :
output = commands.getoutput("find %s -name pid | xargs -i cat {}" % const.bfs_chunkserver_dir)
pid_array = output.split('\n')
for i in range(len(pid_array)):
pid = pid_array[i]
ret1 = os.system('ps x | grep %s | grep -v "grep"' % pid)
if ret1 != 0:
print "chunkserver process %s not exist " % pid
ret = 1
return ret
def modify_conf(file, key, value):
(ret, out, err) = runcmd("sed -i 's/%s.*$/%s=%s/g' %s" % (key, key, value, file))
assert(ret == 0)
def check_log(file, str):
(ret, out, err) = runcmd("grep %s %s" % (str, file))
return ret, out, err
|
src/clusterfuzz/_internal/protos/untrusted_runner_pb2_grpc.py | mspectorgoogle/clusterfuzz | 5,023 | 12708808 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from clusterfuzz._internal.protos import untrusted_runner_pb2 as clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2
class UntrustedRunnerStub(object):
"""UntrusterRunner service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/UntrustedRunner/GetStatus',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusResponse.FromString,
)
self.SetupRegularBuild = channel.unary_unary(
'/UntrustedRunner/SetupRegularBuild',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupRegularBuildRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupBuildResponse.FromString,
)
self.RunProcess = channel.unary_unary(
'/UntrustedRunner/RunProcess',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessResponse.FromString,
)
self.RunAndWait = channel.unary_unary(
'/UntrustedRunner/RunAndWait',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitResponse.FromString,
)
self.CreateDirectory = channel.unary_unary(
'/UntrustedRunner/CreateDirectory',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryResponse.FromString,
)
self.RemoveDirectory = channel.unary_unary(
'/UntrustedRunner/RemoveDirectory',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryResponse.FromString,
)
self.ListFiles = channel.unary_unary(
'/UntrustedRunner/ListFiles',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesResponse.FromString,
)
self.CopyFileTo = channel.stream_unary(
'/UntrustedRunner/CopyFileTo',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileToResponse.FromString,
)
self.CopyFileFrom = channel.unary_stream(
'/UntrustedRunner/CopyFileFrom',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileFromRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.FromString,
)
self.Stat = channel.unary_unary(
'/UntrustedRunner/Stat',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatResponse.FromString,
)
self.UpdateEnvironment = channel.unary_unary(
'/UntrustedRunner/UpdateEnvironment',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentResponse.FromString,
)
self.ResetEnvironment = channel.unary_unary(
'/UntrustedRunner/ResetEnvironment',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentResponse.FromString,
)
self.UpdateSource = channel.unary_unary(
'/UntrustedRunner/UpdateSource',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceResponse.FromString,
)
self.SymbolizeStacktrace = channel.unary_unary(
'/UntrustedRunner/SymbolizeStacktrace',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceResponse.FromString,
)
self.TerminateStaleApplicationInstances = channel.unary_unary(
'/UntrustedRunner/TerminateStaleApplicationInstances',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesResponse.FromString,
)
self.GetFuzzTargets = channel.unary_unary(
'/UntrustedRunner/GetFuzzTargets',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsResponse.FromString,
)
self.PruneCorpus = channel.unary_unary(
'/UntrustedRunner/PruneCorpus',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusResponse.FromString,
)
self.ProcessTestcase = channel.unary_unary(
'/UntrustedRunner/ProcessTestcase',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ProcessTestcaseRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.FromString,
)
self.EngineFuzz = channel.unary_unary(
'/UntrustedRunner/EngineFuzz',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzResponse.FromString,
)
self.EngineReproduce = channel.unary_unary(
'/UntrustedRunner/EngineReproduce',
request_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceRequest.SerializeToString,
response_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.FromString,
)
class UntrustedRunnerServicer(object):
"""UntrusterRunner service.
"""
def GetStatus(self, request, context):
"""Get information about the worker.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetupRegularBuild(self, request, context):
"""Set up regular build.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunProcess(self, request, context):
"""Run command using process_handler.runProcess
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunAndWait(self, request, context):
"""Run command using new_process.ProcessRunner
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateDirectory(self, request, context):
"""Create a directory.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveDirectory(self, request, context):
"""Remove a directory.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFiles(self, request, context):
"""List files in a directory.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CopyFileTo(self, request_iterator, context):
"""Copy file from host to worker.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CopyFileFrom(self, request, context):
"""Copy file from worker to host.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stat(self, request, context):
"""Call stat() on a path.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateEnvironment(self, request, context):
"""Environment variable changes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResetEnvironment(self, request, context):
"""Reset environment variables.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateSource(self, request, context):
"""Update source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SymbolizeStacktrace(self, request, context):
"""Symbolize a stacktrace.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TerminateStaleApplicationInstances(self, request, context):
"""Terminate stale application instances.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFuzzTargets(self, request, context):
"""libFuzzer/AFL specific: get list of fuzz targets.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PruneCorpus(self, request, context):
"""libFuzzer specific: corpus pruning
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProcessTestcase(self, request, context):
"""Engine specific: Do testcase minimization or cleanse.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EngineFuzz(self, request, context):
"""Engine specific: Do fuzzing.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EngineReproduce(self, request, context):
"""Engine specific: Do reproduction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UntrustedRunnerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusResponse.SerializeToString,
),
'SetupRegularBuild': grpc.unary_unary_rpc_method_handler(
servicer.SetupRegularBuild,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupRegularBuildRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupBuildResponse.SerializeToString,
),
'RunProcess': grpc.unary_unary_rpc_method_handler(
servicer.RunProcess,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessResponse.SerializeToString,
),
'RunAndWait': grpc.unary_unary_rpc_method_handler(
servicer.RunAndWait,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitResponse.SerializeToString,
),
'CreateDirectory': grpc.unary_unary_rpc_method_handler(
servicer.CreateDirectory,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryResponse.SerializeToString,
),
'RemoveDirectory': grpc.unary_unary_rpc_method_handler(
servicer.RemoveDirectory,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryResponse.SerializeToString,
),
'ListFiles': grpc.unary_unary_rpc_method_handler(
servicer.ListFiles,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesResponse.SerializeToString,
),
'CopyFileTo': grpc.stream_unary_rpc_method_handler(
servicer.CopyFileTo,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileToResponse.SerializeToString,
),
'CopyFileFrom': grpc.unary_stream_rpc_method_handler(
servicer.CopyFileFrom,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileFromRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.SerializeToString,
),
'Stat': grpc.unary_unary_rpc_method_handler(
servicer.Stat,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatResponse.SerializeToString,
),
'UpdateEnvironment': grpc.unary_unary_rpc_method_handler(
servicer.UpdateEnvironment,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentResponse.SerializeToString,
),
'ResetEnvironment': grpc.unary_unary_rpc_method_handler(
servicer.ResetEnvironment,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentResponse.SerializeToString,
),
'UpdateSource': grpc.unary_unary_rpc_method_handler(
servicer.UpdateSource,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceResponse.SerializeToString,
),
'SymbolizeStacktrace': grpc.unary_unary_rpc_method_handler(
servicer.SymbolizeStacktrace,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceResponse.SerializeToString,
),
'TerminateStaleApplicationInstances': grpc.unary_unary_rpc_method_handler(
servicer.TerminateStaleApplicationInstances,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesResponse.SerializeToString,
),
'GetFuzzTargets': grpc.unary_unary_rpc_method_handler(
servicer.GetFuzzTargets,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsResponse.SerializeToString,
),
'PruneCorpus': grpc.unary_unary_rpc_method_handler(
servicer.PruneCorpus,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusResponse.SerializeToString,
),
'ProcessTestcase': grpc.unary_unary_rpc_method_handler(
servicer.ProcessTestcase,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ProcessTestcaseRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.SerializeToString,
),
'EngineFuzz': grpc.unary_unary_rpc_method_handler(
servicer.EngineFuzz,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzResponse.SerializeToString,
),
'EngineReproduce': grpc.unary_unary_rpc_method_handler(
servicer.EngineReproduce,
request_deserializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceRequest.FromString,
response_serializer=clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'UntrustedRunner', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class UntrustedRunner(object):
"""UntrusterRunner service.
"""
@staticmethod
def GetStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/GetStatus',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetupRegularBuild(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/SetupRegularBuild',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupRegularBuildRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SetupBuildResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunProcess(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/RunProcess',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunProcessResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunAndWait(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/RunAndWait',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RunAndWaitResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/CreateDirectory',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CreateDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RemoveDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/RemoveDirectory',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.RemoveDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListFiles(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/ListFiles',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ListFilesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CopyFileTo(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/UntrustedRunner/CopyFileTo',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileToResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CopyFileFrom(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/UntrustedRunner/CopyFileFrom',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.CopyFileFromRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.FileChunk.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Stat(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/Stat',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.StatResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateEnvironment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/UpdateEnvironment',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateEnvironmentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ResetEnvironment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/ResetEnvironment',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ResetEnvironmentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateSource(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/UpdateSource',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.UpdateSourceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SymbolizeStacktrace(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/SymbolizeStacktrace',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.SymbolizeStacktraceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def TerminateStaleApplicationInstances(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/TerminateStaleApplicationInstances',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.TerminateStaleApplicationInstancesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetFuzzTargets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/GetFuzzTargets',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.GetFuzzTargetsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PruneCorpus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/PruneCorpus',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.PruneCorpusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ProcessTestcase(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/ProcessTestcase',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.ProcessTestcaseRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def EngineFuzz(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/EngineFuzz',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineFuzzResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def EngineReproduce(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/UntrustedRunner/EngineReproduce',
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceRequest.SerializeToString,
clusterfuzz_dot___internal_dot_protos_dot_untrusted__runner__pb2.EngineReproduceResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
recipes/Python/576939_Convert_Dictionary_to_XML/recipe-576939.py | tdiprima/code | 2,023 | 12708818 | def dict2xml(dict, xml = ''):
for key,value in dict.iteritems():
exec 'content = '+ {'str': 'value', 'dict': 'dict2xml(value)'}[type(value).__name__]
xml += '<%s>%s</%s>' % (key, str(content), key)
return xml
|
hypertunity/trial.py | LaudateCorpus1/hypertunity | 130 | 12708826 | """A wrapper class for conducting multiple experiments, scheduling jobs and
saving results.
"""
from typing import Callable, Type, Union
from hypertunity import optimisation, reports, utils
from hypertunity.domain import Domain
from hypertunity.optimisation import Optimiser
from hypertunity.reports import Reporter
from hypertunity.scheduling import Job, Scheduler, SlurmJob
__all__ = [
"Trial"
]
OptimiserTypes = Union[str, Type[Optimiser], Optimiser]
ReporterTypes = Union[str, Type[Reporter], Reporter]
class Trial:
"""High-level API class for running hyperparameter optimisation.
This class encapsulates optimiser querying, job building, scheduling and
results collection as well as checkpointing and report generation.
"""
@utils.support_american_spelling
def __init__(self, objective: Union[Callable, str],
domain: Domain,
optimiser: OptimiserTypes = "bo",
reporter: ReporterTypes = "table",
device: str = "local",
**kwargs):
"""Initialise the :class:`Trial` experiment manager.
Args:
objective: :obj:`Callable` or :obj:`str`. The objective function or
script to run.
domain: :class:`Domain`. The optimisation domain of the objective
function.
optimiser: :class:`Optimiser` or :obj:`str`. The optimiser method
for domain sampling.
reporter: :class:`Reporter` or :obj:`str`. The reporting method for
the results.
device: :obj:`str`. The host device running the evaluations. Can be
'local' or 'slurm'.
**kwargs: additional parameters for the optimiser, reporter and
scheduler.
Keyword Args:
timeout: :obj:`float`. The number of seconds to wait for a
:class:`Job` instance to finish. Default is 259200 seconds,
or approximately 3 days.
"""
self.objective = objective
self.domain = domain
self.optimiser = self._init_optimiser(optimiser, **kwargs)
self.reporter = self._init_reporter(reporter, **kwargs)
self.scheduler = Scheduler
# 259200 is the number of seconds contained in 3 days
self._timeout = kwargs.get("timeout", 259200.0)
self._job = self._init_job(device)
def _init_optimiser(self, optimiser: OptimiserTypes, **kwargs) -> Optimiser:
if isinstance(optimiser, str):
optimiser_class = get_optimiser(optimiser)
elif issubclass(optimiser, Optimiser):
optimiser_class = optimiser
elif isinstance(optimiser, Optimiser):
return optimiser
else:
raise TypeError(
"An optimiser must be a either a string, "
"an Optimiser type or an Optimiser instance."
)
opt_kwargs = {}
if "seed" in kwargs:
opt_kwargs["seed"] = kwargs["seed"]
return optimiser_class(self.domain, **opt_kwargs)
def _init_reporter(self, reporter: ReporterTypes, **kwargs) -> Reporter:
if isinstance(reporter, str):
reporter_class = get_reporter(reporter)
elif issubclass(reporter, Reporter):
reporter_class = reporter
elif isinstance(reporter, Reporter):
return reporter
else:
raise TypeError("A reporter must be either a string, "
"a Reporter type or a Reporter instance.")
rep_kwargs = {"metrics": kwargs.get("metrics", ["score"]),
"database_path": kwargs.get("database_path", ".")}
if not issubclass(reporter_class, reports.Table):
rep_kwargs["logdir"] = kwargs.get("logdir", "tensorboard/")
return reporter_class(self.domain, **rep_kwargs)
@staticmethod
def _init_job(device: str) -> Type[Job]:
device = device.lower()
if device == "local":
return Job
if device == "slurm":
return SlurmJob
raise ValueError(
f"Unknown device {device}. Select one from {{'local', 'slurm'}}."
)
def run(self, n_steps: int, n_parallel: int = 1, **kwargs):
"""Run the optimisation and objective function evaluation.
Args:
n_steps: :obj:`int`. The total number of optimisation steps.
n_parallel: (optional) :obj:`int`. The number of jobs that can be
scheduled at once.
**kwargs: additional keyword arguments for the optimisation,
supplied to the :py:meth:`run_step` method of the
:class:`Optimiser` instance.
Keyword Args:
batch_size: (optional) :obj:`int`. The number of samples that are
suggested at once. Default is 1.
minimise: (optional) :obj:`bool`. If the optimiser is
:class:`BayesianOptimisation` then this flag tells whether the
objective function is being minimised or maximised. Otherwise
it has no effect. Default is `False`.
"""
batch_size = kwargs.get("batch_size", 1)
n_parallel = min(n_parallel, batch_size)
with self.scheduler(n_parallel=n_parallel) as scheduler:
for i in range(n_steps):
samples = self.optimiser.run_step(
batch_size=batch_size,
minimise=kwargs.get("minimise", False)
)
jobs = [
self._job(task=self.objective, args=s.as_dict())
for s in samples
]
scheduler.dispatch(jobs)
evaluations = [
r.data for r in scheduler.collect(
n_results=batch_size, timeout=self._timeout
)
]
self.optimiser.update(samples, evaluations)
for s, e, j in zip(samples, evaluations, jobs):
self.reporter.log((s, e), meta={"job_id": j.id})
def get_optimiser(name: str) -> Type[Optimiser]:
name = name.lower()
if name.startswith(("bayes", "bo")):
return optimisation.BayesianOptimisation
if name.startswith("random"):
return optimisation.RandomSearch
if name.startswith(("grid", "exhaustive")):
return optimisation.GridSearch
raise ValueError(
f"Unknown optimiser {name}. Select one from "
f"{{'bayesian_optimisation', 'random_search', 'grid_search'}}."
)
def get_reporter(name: str) -> Type[Reporter]:
name = name.lower()
if name.startswith("table"):
return reports.Table
if name.startswith(("tensor", "tb")):
import reports.tensorboard as tb
return tb.Tensorboard
raise ValueError(
f"Unknown reporter {name}. Select one from {{'table', 'tensorboard'}}."
)
|
convlab2/task/multiwoz/generate_goals.py | ljw23/ConvLab-2 | 339 | 12708868 | <reponame>ljw23/ConvLab-2
"""
generate user goal for collecting new multiwoz data
"""
from convlab2.task.multiwoz.goal_generator import GoalGenerator
from convlab2.util.file_util import read_zipped_json
import random
import numpy as np
import json
import datetime
from pprint import pprint
def extract_slot_combination_from_goal(goal):
domains = ['attraction', 'hotel', 'restaurant', 'police', 'hospital', 'taxi', 'train']
serialized_goal = []
for domain in goal:
if domain in domains:
for scope, content in goal[domain].items():
if content:
# if isinstance(content, dict):
# for slot, value in content.items():
# serialized_goal.append("{}-{}-{}-{}".format(domain, scope, slot, value))
# else:
# for slot in content:
# serialized_goal.append("{}-{}-{}".format(domain, scope, slot))
for slot in content:
serialized_goal.append("{}-{}-{}".format(domain, scope, slot))
return sorted(serialized_goal)
def test_generate_overlap(total_num=1000, seed=42, output_file='goal.json'):
train_data = read_zipped_json('../../../data/multiwoz/train.json.zip', 'train.json')
train_serialized_goals = []
for d in train_data:
train_serialized_goals.append(extract_slot_combination_from_goal(train_data[d]['goal']))
test_data = read_zipped_json('../../../data/multiwoz/test.json.zip', 'test.json')
test_serialized_goals = []
for d in test_data:
test_serialized_goals.append(extract_slot_combination_from_goal(test_data[d]['goal']))
overlap = 0
for serialized_goal in test_serialized_goals:
if serialized_goal in train_serialized_goals:
overlap += 1
print(len(train_serialized_goals), len(test_serialized_goals), overlap) # 8434 1000 430
random.seed(seed)
np.random.seed(seed)
goal_generator = GoalGenerator()
goals = []
avg_domains = []
serialized_goals = []
while len(goals) < total_num:
goal = goal_generator.get_user_goal()
# pprint(goal)
if 'police' in goal['domain_ordering']:
no_police = list(goal['domain_ordering'])
no_police.remove('police')
goal['domain_ordering'] = tuple(no_police)
del goal['police']
try:
message = goal_generator.build_message(goal)[1]
except:
continue
# print(message)
avg_domains.append(len(goal['domain_ordering']))
goals.append({
"goals": [],
"ori_goals": goal,
"description": message,
"timestamp": str(datetime.datetime.now()),
"ID": len(goals)
})
serialized_goals.append(extract_slot_combination_from_goal(goal))
if len(serialized_goals) == 1:
print(serialized_goals)
overlap = 0
for serialized_goal in serialized_goals:
if serialized_goal in train_serialized_goals:
overlap += 1
print(len(train_serialized_goals), len(serialized_goals), overlap) # 8434 1000 199
def generate(total_num=1000, seed=42, output_file='goal.json'):
random.seed(seed)
np.random.seed(seed)
goal_generator = GoalGenerator()
goals = []
avg_domains = []
while len(goals) < total_num:
goal = goal_generator.get_user_goal()
# pprint(goal)
if 'police' in goal['domain_ordering']:
no_police = list(goal['domain_ordering'])
no_police.remove('police')
goal['domain_ordering'] = tuple(no_police)
del goal['police']
try:
message = goal_generator.build_message(goal)[1]
except:
continue
# print(message)
avg_domains.append(len(goal['domain_ordering']))
goals.append({
"goals": [],
"ori_goals": goal,
"description": message,
"timestamp": str(datetime.datetime.now()),
"ID": len(goals)
})
print('avg domains:', np.mean(avg_domains)) # avg domains: 1.846
json.dump(goals, open(output_file, 'w'), indent=4)
if __name__ == '__main__':
generate(output_file='goal20200629.json')
|
lib/python/batch_sim/scoring_functions/buoy.py | leozz37/makani | 1,178 | 12708873 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scoring functions relating to the buoy."""
from makani.lib.python.batch_sim import scoring_functions
from makani.lib.python.h5_utils import numpy_utils
import numpy as np
class BuoyWaterLineScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the highest point that the water line reaches."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyWaterLineScoringFunction, self).__init__(
'Buoy Min. Water Line Distance to Threshold', 'm', good_limit,
bad_limit, severity)
assert good_limit > bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['water_line_min']
def GetOutput(self, timeseries):
return {'water_line_min': np.min(timeseries['water_line'])}
def GetTimeSeries(self, params, sim, control):
water_line = self._SelectTelemetry(sim, control, 'water_line')
return {'water_line': water_line}
class BuoyYawAngleScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the maximum/minimum yaw angle."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyYawAngleScoringFunction, self).__init__(
'Buoy Peak Yaw Angle From Equilibrium', 'deg', good_limit,
bad_limit, severity)
assert good_limit < bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['peak_buoy_yaw_angle_deg']
def GetOutput(self, timeseries):
buoy_yaw_angle_from_eq_deg = timeseries['buoy_yaw_angle_from_eq_deg']
return {'peak_buoy_yaw_angle_deg': np.max(
np.fabs(buoy_yaw_angle_from_eq_deg))}
def GetTimeSeries(self, params, sim, control):
buoy_yaw_angle_from_eq = self._SelectTelemetry(sim, control,
'buoy_yaw_angle_from_eq')
return {'buoy_yaw_angle_from_eq_deg': np.degrees(buoy_yaw_angle_from_eq)}
class BuoyVesselOriginAccelScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Score to evaluate the maximum acceleration of the vessel frame origin."""
def __init__(self, good_limit, bad_limit, severity):
super(BuoyVesselOriginAccelScoringFunction, self).__init__(
'Buoy Vessel Origin Acceleration', 'g', good_limit,
bad_limit, severity)
assert good_limit < bad_limit
def GetSystemLabels(self):
return ['offshore', 'buoy']
def GetValue(self, output):
return output['peak_buoy_accel_norm_gs']
def GetOutput(self, timeseries):
buoy_accel_norm_gs = timeseries['buoy_accel_norm_gs']
return {'peak_buoy_accel_norm_gs': np.max(buoy_accel_norm_gs)}
def GetTimeSeries(self, params, sim, control):
buoy_accel_g = self._SelectTelemetry(sim, control, 'buoy_accel_g')
try:
buoy_accel_g_norm = np.sum(
np.abs(numpy_utils.Vec3ToArray(buoy_accel_g))**2, axis=-1)**(1./2)
except (TypeError, ValueError):
buoy_accel_g_norm = np.array([float('nan')])
return {'buoy_accel_norm_gs': buoy_accel_g_norm / 9.81}
|
Lib/test/_typed_dict_helper.py | oleksandr-pavlyk/cpython | 52,316 | 12708877 | """Used to test `get_type_hints()` on a cross-module inherited `TypedDict` class
This script uses future annotations to postpone a type that won't be available
on the module inheriting from to `Foo`. The subclass in the other module should
look something like this:
class Bar(_typed_dict_helper.Foo, total=False):
b: int
"""
from __future__ import annotations
from typing import Optional, TypedDict
OptionalIntType = Optional[int]
class Foo(TypedDict):
a: OptionalIntType
|
hoomd/metal/pair.py | XT-Lee/hoomd-blue | 204 | 12708915 | <filename>hoomd/metal/pair.py
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" Metal pair potentials.
"""
from hoomd.md import force
from hoomd.md import nlist as nl # to avoid naming conflicts
import hoomd
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.metal import _metal
import math
import sys
class eam(force._force):
R""" EAM pair potential.
Args:
file (str): File name with potential tables in Alloy or FS format
type (str): Type of file potential ('Alloy', 'FS')
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list (default of None automatically creates a global cell-list based neighbor list)
:py:class:`eam` specifies that a EAM (embedded atom method) pair potential should be applied between every
non-excluded particle pair in the simulation.
No coefficients need to be set for :py:class:`eam`. All specifications, including the cutoff radius, form of the
potential, etc. are read in from the specified file.
Particle type names must match those referenced in the EAM potential file.
Particle mass (in atomic mass) **must** be set in the input script, users are allowed to set different mass values
other than those in the potential file.
Two file formats are supported: *Alloy* and *FS*. They are described in LAMMPS documentation
(commands eam/alloy and eam/fs) here: http://lammps.sandia.gov/doc/pair_eam.html
and are also described here: http://enpub.fulton.asu.edu/cms/potentials/submain/format.htm
.. attention::
EAM is **NOT** supported in MPI parallel simulations.
Example::
nl = nlist.cell()
eam = pair.eam(file='name.eam.fs', type='FS', nlist=nl)
eam = pair.eam(file='name.eam.alloy', type='Alloy', nlist=nl)
"""
def __init__(self, file, type, nlist):
# Error out in MPI simulations
if (hoomd.version.mpi_enabled):
if hoomd.context.current.system_definition.getParticleData(
).getDomainDecomposition():
hoomd.context.current.device.cpp_msg.error(
"pair.eam is not supported in multi-processor simulations.\n\n"
)
raise RuntimeError("Error setting up pair potential.")
# initialize the base class
force._force.__init__(self)
# Translate type
if (type == 'Alloy'):
type_of_file = 0
elif (type == 'FS'):
type_of_file = 1
else:
raise RuntimeError('Unknown EAM input file type')
# create the c++ mirror class
if not hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
self.cpp_force = _metal.EAMForceCompute(
hoomd.context.current.system_definition, file, type_of_file)
else:
self.cpp_force = _metal.EAMForceComputeGPU(
hoomd.context.current.system_definition, file, type_of_file)
#After load EAMForceCompute we know r_cut from EAM potential`s file. We need update neighbor list.
self.r_cut_new = self.cpp_force.get_r_cut()
self.nlist = nlist
self.nlist.subscribe(lambda: self.get_rcut())
self.nlist.update_rcut()
#Load neighbor list to compute.
self.cpp_force.set_neighbor_list(self.nlist.cpp_nlist)
if hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
self.nlist.cpp_nlist.setStorageMode(
_md.NeighborList.storageMode.full)
hoomd.context.current.device.cpp_msg.notice(
2, "Set r_cut = " + str(self.r_cut_new) + " from potential`s file '"
+ str(file) + "'.\n")
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name)
self.pair_coeff = hoomd.md.pair.coeff()
def get_rcut(self):
# go through the list of only the active particle types in the simulation
ntypes = hoomd.context.current.system_definition.getParticleData(
).getNTypes()
type_list = []
for i in range(0, ntypes):
type_list.append(hoomd.context.current.system_definition
.getParticleData().getNameByType(i))
# update the rcut by pair type
r_cut_dict = nl.rcut()
for i in range(0, ntypes):
for j in range(i, ntypes):
# get the r_cut value
r_cut_dict.set_pair(type_list[i], type_list[j], self.r_cut_new)
return r_cut_dict
def update_coeffs(self):
# check that the pair coefficients are valid
pass
|
env/Lib/site-packages/OpenGL/raw/GLES2/OES/geometry_shader.py | 5gconnectedbike/Navio2 | 210 | 12708922 | <filename>env/Lib/site-packages/OpenGL/raw/GLES2/OES/geometry_shader.py
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OES_geometry_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_geometry_shader',error_checker=_errors._error_checker)
GL_FIRST_VERTEX_CONVENTION_OES=_C('GL_FIRST_VERTEX_CONVENTION_OES',0x8E4D)
GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES=_C('GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES',0x8DA7)
GL_FRAMEBUFFER_DEFAULT_LAYERS_OES=_C('GL_FRAMEBUFFER_DEFAULT_LAYERS_OES',0x9312)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES',0x8DA8)
GL_GEOMETRY_LINKED_INPUT_TYPE_OES=_C('GL_GEOMETRY_LINKED_INPUT_TYPE_OES',0x8917)
GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES=_C('GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES',0x8918)
GL_GEOMETRY_LINKED_VERTICES_OUT_OES=_C('GL_GEOMETRY_LINKED_VERTICES_OUT_OES',0x8916)
GL_GEOMETRY_SHADER_BIT_OES=_C('GL_GEOMETRY_SHADER_BIT_OES',0x00000004)
GL_GEOMETRY_SHADER_INVOCATIONS_OES=_C('GL_GEOMETRY_SHADER_INVOCATIONS_OES',0x887F)
GL_GEOMETRY_SHADER_OES=_C('GL_GEOMETRY_SHADER_OES',0x8DD9)
GL_LAST_VERTEX_CONVENTION_OES=_C('GL_LAST_VERTEX_CONVENTION_OES',0x8E4E)
GL_LAYER_PROVOKING_VERTEX_OES=_C('GL_LAYER_PROVOKING_VERTEX_OES',0x825E)
GL_LINES_ADJACENCY_OES=_C('GL_LINES_ADJACENCY_OES',0x000A)
GL_LINE_STRIP_ADJACENCY_OES=_C('GL_LINE_STRIP_ADJACENCY_OES',0x000B)
GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES=_C('GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES',0x8A32)
GL_MAX_FRAMEBUFFER_LAYERS_OES=_C('GL_MAX_FRAMEBUFFER_LAYERS_OES',0x9317)
GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES=_C('GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES',0x92D5)
GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES=_C('GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES',0x92CF)
GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES=_C('GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES',0x90CD)
GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES',0x9123)
GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES',0x9124)
GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES=_C('GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES',0x8DE0)
GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES=_C('GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES',0x8E5A)
GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES=_C('GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES',0x90D7)
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES=_C('GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES',0x8C29)
GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES',0x8DE1)
GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES=_C('GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES',0x8A2C)
GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES',0x8DDF)
GL_PRIMITIVES_GENERATED_OES=_C('GL_PRIMITIVES_GENERATED_OES',0x8C87)
GL_REFERENCED_BY_GEOMETRY_SHADER_OES=_C('GL_REFERENCED_BY_GEOMETRY_SHADER_OES',0x9309)
GL_TRIANGLES_ADJACENCY_OES=_C('GL_TRIANGLES_ADJACENCY_OES',0x000C)
GL_TRIANGLE_STRIP_ADJACENCY_OES=_C('GL_TRIANGLE_STRIP_ADJACENCY_OES',0x000D)
GL_UNDEFINED_VERTEX_OES=_C('GL_UNDEFINED_VERTEX_OES',0x8260)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glFramebufferTextureOES(target,attachment,texture,level):pass
|
test/units/test_oci_autonomous_data_warehouse.py | slmjy/oci-ansible-modules | 106 | 12708927 | <filename>test/units/test_oci_autonomous_data_warehouse.py<gh_stars>100-1000
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
import pytest
from nose.plugins.skip import SkipTest
import logging
from ansible.modules.cloud.oracle import oci_autonomous_data_warehouse
from ansible.module_utils.oracle import oci_utils, oci_db_utils
import tempfile
import os
try:
import oci
from oci.util import to_dict
from oci.database.models import AutonomousDataWarehouse
from oci.exceptions import ServiceError, ClientError
except ImportError:
raise SkipTest("test_oci_autonomous_data_warehouse.py requires `oci` module")
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception(kwargs["msg"])
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
@pytest.fixture()
def db_client(mocker):
mock_db_client = mocker.patch("oci.database.database_client.DatabaseClient")
return mock_db_client.return_value
@pytest.fixture()
def check_and_create_resource_patch(mocker):
return mocker.patch.object(oci_utils, "check_and_create_resource")
@pytest.fixture()
def update_autonomous_data_warehouse_patch(mocker):
return mocker.patch.object(
oci_autonomous_data_warehouse, "update_autonomous_data_warehouse"
)
@pytest.fixture()
def check_and_update_resource_patch(mocker):
return mocker.patch.object(oci_utils, "check_and_update_resource")
@pytest.fixture()
def create_and_wait_patch(mocker):
return mocker.patch.object(oci_utils, "create_and_wait")
@pytest.fixture()
def get_existing_resource_patch(mocker):
return mocker.patch.object(oci_utils, "get_existing_resource")
@pytest.fixture()
def delete_and_wait_patch(mocker):
return mocker.patch.object(oci_utils, "delete_and_wait")
@pytest.fixture()
def execute_function_and_wait_patch(mocker):
return mocker.patch.object(oci_db_utils, "execute_function_and_wait")
@pytest.fixture()
def call_with_backoff_patch(mocker):
return mocker.patch.object(oci_utils, "call_with_backoff")
@pytest.fixture()
def write_stream_to_file_patch(mocker):
return mocker.patch.object(oci_db_utils, "write_stream_to_file")
def setUpModule():
logging.basicConfig(
filename="/tmp/oci_ansible_module.log", filemode="a", level=logging.INFO
)
oci_autonomous_data_warehouse.set_logger(logging)
def test_create_or_update_autonomous_data_warehouse_create(
db_client, check_and_create_resource_patch
):
module = get_module()
autonomous_data_warehouse = get_autonomous_data_warehouse()
check_and_create_resource_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.create_or_update_autonomous_data_warehouse(
db_client, module
)
assert (
result["autonomous_data_warehouse"]["display_name"]
is autonomous_data_warehouse.display_name
)
def test_create_or_update_autonomous_data_warehouse_update(
db_client, update_autonomous_data_warehouse_patch
):
module = get_module(
dict({"autonomous_data_warehouse_id": "ocid1.autonomous_data_warehouse.aaa"})
)
autonomous_data_warehouse = get_autonomous_data_warehouse()
update_autonomous_data_warehouse_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.create_or_update_autonomous_data_warehouse(
db_client, module
)
assert (
result["autonomous_data_warehouse"]["display_name"]
is autonomous_data_warehouse.display_name
)
def test_create_or_update_autonomous_data_warehousee_client_error(
db_client, check_and_create_resource_patch
):
error_message = "databse attribute has no value"
module = get_module()
check_and_create_resource_patch.side_effect = ClientError(Exception(error_message))
try:
oci_autonomous_data_warehouse.create_or_update_autonomous_data_warehouse(
db_client, module
)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_or_update_autonomous_data_warehousee_service_error(
db_client, check_and_create_resource_patch
):
error_message = "Internal Server Error"
module = get_module()
check_and_create_resource_patch.side_effect = ServiceError(
499, "InternalServerError", dict(), error_message
)
try:
oci_autonomous_data_warehouse.create_or_update_autonomous_data_warehouse(
db_client, module
)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_autonomous_data_warehouse(db_client, create_and_wait_patch):
module = get_module()
autonomous_data_warehouse = get_autonomous_data_warehouse()
create_and_wait_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.create_autonomous_data_warehouse(
db_client, module
)
assert (
result["autonomous_data_warehouse"]["display_name"]
is autonomous_data_warehouse.display_name
)
def test_update_autonomous_data_warehouse_cpu_core_count(
db_client, check_and_update_resource_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
autonomous_data_warehouse.cpu_core_count = 4
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(
dict({"autonomous_data_warehouse_id": "ocid1.autonomousdbwarehouse.aaa"})
)
check_and_update_resource_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.update_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_update_autonomous_data_warehouse_freeform_tags(
db_client, check_and_update_resource_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(
dict(
freeform_tags=dict(system_type="oracledb"),
autonomous_data_warehouse_id="ocid1.autonomousdbwarehouse.aaa",
)
)
check_and_update_resource_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.update_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_update_autonomous_data_warehouse_defined_tags(
db_client, check_and_update_resource_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(
dict(
defined_tags=dict(system_strength=dict(shape="medium")),
autonomous_data_warehouse_id="ocid1.autonomousdbwarehouse.aaa",
)
)
check_and_update_resource_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.update_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_delete_db_system(db_client, delete_and_wait_patch):
module = get_module(
dict(autonomous_data_warehouse_id="ocid1.autonomousdatabase.aaa")
)
autonomous_data_warehouse = get_autonomous_data_warehouse()
delete_and_wait_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.delete_autonomous_data_warehouse(
db_client, module
)
assert (
result["autonomous_data_warehouse"]["display_name"]
is autonomous_data_warehouse.display_name
)
def test_restore_autonomous_data_warehouse(db_client, execute_function_and_wait_patch):
autonomous_data_warehouse = get_autonomous_data_warehouse()
module = get_module()
execute_function_and_wait_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.restore_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_start_or_stop_autonomous_data_warehouse_start(
db_client, get_existing_resource_patch, execute_function_and_wait_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
autonomous_data_warehouse.lifecycle_state = "STOPPED"
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(dict(state="start"))
execute_function_and_wait_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.start_or_stop_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_start_or_stop_autonomous_data_warehouse_start_idempotent(
db_client, get_existing_resource_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
autonomous_data_warehouse.lifecycle_state = "AVAILABLE"
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(dict(state="start"))
result = oci_autonomous_data_warehouse.start_or_stop_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is False
def test_start_or_stop_autonomous_data_warehouse_stop(
db_client, get_existing_resource_patch, execute_function_and_wait_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
autonomous_data_warehouse.lifecycle_state = "AVAILABLE"
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(dict(state="stop"))
execute_function_and_wait_patch.return_value = {
"autonomous_data_warehouse": to_dict(autonomous_data_warehouse),
"changed": True,
}
result = oci_autonomous_data_warehouse.start_or_stop_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is True
def test_start_or_stop_autonomous_data_warehouse_stop_idempotent(
db_client, get_existing_resource_patch
):
autonomous_data_warehouse = get_autonomous_data_warehouse()
autonomous_data_warehouse.lifecycle_state = "STOPPED"
get_existing_resource_patch.return_value = autonomous_data_warehouse
module = get_module(dict(state="stop"))
result = oci_autonomous_data_warehouse.start_or_stop_autonomous_data_warehouse(
db_client, module
)
assert result["changed"] is False
def test_generate_wallet(
db_client, call_with_backoff_patch, write_stream_to_file_patch
):
call_with_backoff_patch.return_value = get_response(200, None, "test", None)
write_stream_to_file_patch.return_value = True
module = get_module(dict(password="<PASSWORD>", wallet_file="test_wallet_file"))
result = oci_autonomous_data_warehouse.generate_wallet(db_client, module)
assert result["changed"] is True
def test_generate_wallet_no_wallet_file_defined(db_client):
error_message = "Wallet file must be declared"
module = get_module(dict(password="<PASSWORD>"))
try:
oci_autonomous_data_warehouse.generate_wallet(db_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_generate_wallet_empty_wallet_file_defined(db_client):
error_message = "Wallet file must be declared"
module = get_module(dict(password="<PASSWORD>", wallet_file=" "))
try:
oci_autonomous_data_warehouse.generate_wallet(db_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_generate_wallet_service_error(db_client, call_with_backoff_patch):
error_message = "Internal Server Error"
module = get_module(dict(password="<PASSWORD>", wallet_file="test_wallet_file"))
call_with_backoff_patch.side_effect = ServiceError(
499, "InternalServerError", dict(), error_message
)
try:
oci_autonomous_data_warehouse.generate_wallet(db_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_generate_wallet_client_error(db_client, call_with_backoff_patch):
error_message = "Wallet file not valid"
module = get_module(dict(password="<PASSWORD>", wallet_file="test_wallet_file"))
call_with_backoff_patch.side_effect = ClientError(Exception(error_message))
try:
oci_autonomous_data_warehouse.generate_wallet(db_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def get_autonomous_data_warehouse():
autonomous_data_warehouse = AutonomousDataWarehouse()
autonomous_data_warehouse.display_name = "ansible_autonomous_data_warehousee"
autonomous_data_warehouse.freeform_tags = {"system_type": "exadata"}
autonomous_data_warehouse.defined_tags = {"system_strength": {"shape": "small"}}
return autonomous_data_warehouse
def get_response(status, header, data, request):
return oci.Response(status, header, data, request)
def get_module(additional_properties=None):
params = {
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx..qndq",
"admin_password": "<PASSWORD>",
"data_storage_size_in_tbs": 1,
"cpu_core_count": 1,
"db_name": "ansibledbwarehouse",
"display_name": "ansibleautodbwarehouse",
"license_model": "LICENSE_INCLUDED",
"wait": False,
"freeform_tags": {"db_type": "test"},
}
if additional_properties:
params.update(additional_properties)
module = FakeModule(**params)
return module
|
acoular/version.py | ishine/acoular | 294 | 12708944 | # coding=UTF-8
#------------------------------------------------------------------------------
# Copyright (c) 2007-2021, Acoular Development Team.
#------------------------------------------------------------------------------
# separate file to find out about version without importing the acoular lib
__author__ = "Acoular Development Team"
__date__ = "5 May 2021"
__version__ = "21.05"
|
shadowproxy/proxies/socks/server.py | xiaoshihu/shadowproxy | 180 | 12708976 | from ...protocols import socks4, socks5
from ...utils import run_parser_curio
from ..base.server import ProxyBase
class SocksProxy(ProxyBase):
proto = "SOCKS"
def __init__(self, bind_addr, auth=None, via=None, plugin=None, **kwargs):
self.bind_addr = bind_addr
self.auth = auth
self.via = via
self.plugin = plugin
self.kwargs = kwargs
async def _run(self):
socks5_parser = socks5.server.parser(self.auth)
request = await run_parser_curio(socks5_parser, self.client)
self.target_addr = (request.addr.host, request.addr.port)
via_client = await self.connect_server(self.target_addr)
# await self.client.sendall(socks5.resp())
socks5_parser.send_event(0)
await run_parser_curio(socks5_parser, self.client)
async with via_client:
redundant = socks5_parser.readall()
if redundant:
await via_client.sendall(redundant)
await self.relay(via_client)
class Socks4Proxy(ProxyBase):
proto = "SOCKS4"
def __init__(self, bind_addr, auth=None, via=None, plugin=None, **kwargs):
self.bind_addr = bind_addr
self.auth = auth
self.via = via
self.plugin = plugin
self.kwargs = kwargs
async def _run(self):
socks4_parser = socks4.server.parser()
self.target_addr = await run_parser_curio(socks4_parser, self.client)
via_client = await self.connect_server(self.target_addr)
socks4_parser.send_event(0x5A)
await run_parser_curio(socks4_parser, self.client)
async with via_client:
redundant = socks4_parser.readall()
if redundant:
await via_client.sendall(redundant)
await self.relay(via_client)
|
delta/data/task/base_speech_task.py | didichuxing/delta | 1,442 | 12708984 | <filename>delta/data/task/base_speech_task.py
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' Base Speech Task'''
from delta import utils
from delta.data import utils as data_utils
from delta.data.task.base_task import WavSpeechTask
#pylint: disable=abstract-method
class SpeechTask(WavSpeechTask):
''' base class for speech task'''
def __init__(self, config, mode):
super().__init__(config)
assert mode in (utils.TRAIN, utils.EVAL, utils.INFER)
self._mode = mode
@property
def mode(self):
return self._mode
#pylint: disable=arguments-differ
def input_fn(self, mode, batch_size, num_epoch=None):
''' estimator input_fn'''
return data_utils.input_fn(self.dataset, mode, batch_size, num_epoch)
|
s3prl/pretrain/distiller/dataset.py | Hem7513/s3prl | 856 | 12709005 | <reponame>Hem7513/s3prl<gh_stars>100-1000
"""
Dataset for distiller
Author: <NAME> (https://github.com/vectominist)
"""
import os
import random
import numpy as np
import pandas as pd
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import Dataset
import torchaudio
HALF_BATCHSIZE_TIME = 99999
class WaveDataset(Dataset):
"""Waveform dataset for Disiller"""
def __init__(
self,
task_config,
bucket_size,
file_path,
sets,
max_timestep=0,
libri_root=None,
**kwargs
):
super().__init__()
self.task_config = task_config
self.libri_root = libri_root
self.sample_length = task_config["sequence_length"]
if self.sample_length > 0:
print(
"[Dataset] - Sampling random segments for training, sample length:",
self.sample_length,
)
# Read file
self.root = file_path
tables = [pd.read_csv(os.path.join(file_path, s + ".csv")) for s in sets]
self.table = pd.concat(tables, ignore_index=True).sort_values(
by=["length"], ascending=False
)
print("[Dataset] - Training data from these sets:", str(sets))
# Drop seqs that are too long
if max_timestep > 0:
self.table = self.table[self.table.length < max_timestep]
# Drop seqs that are too short
if max_timestep < 0:
self.table = self.table[self.table.length > (-1 * max_timestep)]
X = self.table["file_path"].tolist()
X_lens = self.table["length"].tolist()
self.num_samples = len(X)
print("[Dataset] - Number of individual training instances:", self.num_samples)
# Use bucketing to allow different batch size at run time
self.X = []
batch_x, batch_len = [], []
for x, x_len in zip(X, X_lens):
batch_x.append(x)
batch_len.append(x_len)
# Fill in batch_x until batch is full
if len(batch_x) == bucket_size:
# Half the batch size if seq too long
if (
(bucket_size >= 2)
and (max(batch_len) > HALF_BATCHSIZE_TIME)
and self.sample_length == 0
):
self.X.append(batch_x[: bucket_size // 2])
self.X.append(batch_x[bucket_size // 2 :])
else:
self.X.append(batch_x)
batch_x, batch_len = [], []
# Gather the last batch
if len(batch_x) > 1:
self.X.append(batch_x)
def _sample(self, x):
if self.sample_length <= 0:
return x
if len(x) < self.sample_length:
return x
idx = random.randint(0, len(x) - self.sample_length)
return x[idx : idx + self.sample_length]
def __len__(self):
return len(self.X)
def collate_fn(self, items):
items = items[0] # hack bucketing
assert (
len(items) == 4
), "__getitem__ should return (wave_input, wave_orig, wave_len, pad_mask)"
return items
class OnlineWaveDataset(WaveDataset):
"""Online waveform dataset"""
def __init__(
self,
task_config,
bucket_size,
file_path,
sets,
max_timestep=0,
libri_root=None,
target_level=-25,
**kwargs
):
super().__init__(
task_config,
bucket_size,
file_path,
sets,
max_timestep,
libri_root,
**kwargs
)
self.target_level = target_level
def _load_feat(self, feat_path):
if self.libri_root is None:
return torch.FloatTensor(np.load(os.path.join(self.root, feat_path)))
wav, _ = torchaudio.load(os.path.join(self.libri_root, feat_path))
return wav.squeeze() # (seq_len)
def __getitem__(self, index):
# Load acoustic feature and pad
x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]]
x_lens = [len(x) for x in x_batch]
x_lens = torch.LongTensor(x_lens)
x_pad_batch = pad_sequence(x_batch, batch_first=True)
pad_mask = torch.ones(x_pad_batch.shape) # (batch_size, seq_len)
# zero vectors for padding dimension
for idx in range(x_pad_batch.shape[0]):
pad_mask[idx, x_lens[idx] :] = 0
return [x_pad_batch, x_batch, x_lens, pad_mask]
|
dataloader/dataset/SKU110K-R/rotate_augment.py | PauliKarl/RotationDetection | 850 | 12709026 | <reponame>PauliKarl/RotationDetection
# -*- coding:utf-8 -*-
import os
import sys
from multiprocessing import Pool
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
import PIL
from PIL import Image
PIL.Image.MAX_IMAGE_PIXELS = 200000000
process_num = 64
ia.seed(1)
def preprocess_handler(img_name, img_dir, rot_list, out_img_dir='/data/dataset/SKU110K/SKU110K-R/images'):
img_path = os.path.join(img_dir, img_name)
try:
img = Image.open(img_path).convert('RGB')
img = np.array(img)
except:
try:
img = cv2.imread(img_path)
except:
print(img_path)
for ang in rot_list:
seq = iaa.Sequential([
iaa.Affine(
rotate=ang,
fit_output=True
)
])
seq_det = seq.to_deterministic()
image_aug = seq_det.augment_images([img])[0]
out_img_name = 'rotate_aug_{}_'.format(str(ang))
out_img_name = out_img_name + img_name
if out_img_dir is None:
out_dir = os.path.join(img_dir, out_img_name)
else:
out_dir = os.path.join(out_img_dir, out_img_name)
cv2.imwrite(out_dir, image_aug, [int(cv2.IMWRITE_JPEG_QUALITY), 81])
def main(img_dir):
rotate_angle_list = [-45, -30, -15, 15, 30, 45]
p = Pool(process_num)
for img_name in os.listdir(img_dir):
p.apply_async(preprocess_handler, args=(img_name, img_dir, rotate_angle_list))
p.close()
p.join()
if __name__ == '__main__':
root_img_dir = sys.argv[1]
main(root_img_dir) |
tests/test_galton_dataset.py | mtreviso/lxmls-toolkit | 183 | 12709034 | import sys
import os
import pytest
from numpy import array, array_equal, allclose
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lxmls.readers import galton
tolerance = 1e-5
@pytest.fixture(scope='module')
def galton_data():
return galton.load()
def test_galton_data(galton_data):
mean = galton_data.mean(0)
expected_mean = array([68.30818966, 68.08846983])
assert allclose(mean, expected_mean, tolerance)
std = galton_data.std(0)
expected_std = array([1.78637014, 2.51658435])
assert allclose(std, expected_std, tolerance)
n, bins, _ = plt.hist(galton_data)
expected_n = [array([ 0., 14., 23., 66., 289., 219., 183., 68., 43., 23.]), array([ 12., 32., 107., 117., 138., 120., 167., 163., 41., 31.])]
expected_bins = array([61.7, 62.9, 64.1, 65.3, 66.5, 67.7, 68.9, 70.1, 71.3, 72.5, 73.7])
assert allclose(n, expected_n, tolerance)
assert allclose(bins, expected_bins, tolerance)
if __name__ == '__main__':
pytest.main([__file__])
|
src/aprl/visualize/make_videos.py | fkamrani/adversarial-policies | 211 | 12709047 | <reponame>fkamrani/adversarial-policies<filename>src/aprl/visualize/make_videos.py<gh_stars>100-1000
"""Generate videos for adversaries and standard baselines."""
import logging
import os
import os.path as osp
from sacred import Experiment
from sacred.observers import FileStorageObserver
from aprl.common.utils import make_timestamp
from aprl.configs import DATA_LOCATION
from aprl.multi.score import extract_data, run_external
from aprl.visualize import util
make_videos_ex = Experiment("make_videos")
make_videos_logger = logging.getLogger("make_videos")
@make_videos_ex.config
def default_config():
adversary_path = osp.join(
DATA_LOCATION, "multi_train", "paper", "highest_win_policies_and_rates.json"
)
ray_upload_dir = "data" # where Ray will upload multi.score outputs. 'data' works on baremetal
score_configs = [("normal",), ("normal", "mask_observations_of_victim")]
multi_score = {}
root_dir = "data/videos"
exp_name = "default"
_ = locals() # quieten flake8 unused variable warning
del _
@make_videos_ex.named_config
def defense_config():
score_configs = [("defenses",), ("defenses", "mask_observations_of_victim")]
exp_name = "defense"
_ = locals() # quieten flake8 unused variable warning
del _
@make_videos_ex.named_config
def slides_config():
"""Generate a subset of videos, with tighter-cropped camera.
Intended for slideshows/demos."""
score_configs = [("summary",), ("summary", "mask_observations_of_victim")]
multi_score = {
"score": {
"video_params": {"annotation_params": {"camera_config": "close", "short_labels": True}}
}
}
exp_name = "slides"
_ = locals() # quieten flake8 unused variable warning
del _
LOW_RES = {
"score": {"video_params": {"annotation_params": {"resolution": (640, 480), "font_size": 24}}}
}
@make_videos_ex.named_config
def low_res():
multi_score = LOW_RES # noqa: F841
@make_videos_ex.named_config
def debug_config():
score_configs = [
("debug_one_each_type",),
("debug_one_each_type", "mask_observations_of_victim"),
]
multi_score = dict(LOW_RES)
multi_score["score"]["episodes"] = 2
exp_name = "debug"
_ = locals() # quieten flake8 unused variable warning
del _
@make_videos_ex.capture
def generate_videos(score_configs, multi_score, adversary_path):
"""Uses multi.score to generate videos."""
return run_external(
score_configs,
post_named_configs=["video"],
config_updates=multi_score,
adversary_path=adversary_path,
)
@make_videos_ex.capture
def extract_videos(out_dir, video_dirs, ray_upload_dir):
def path_generator(
trial_root,
env_sanitized,
victim_index,
victim_type,
victim_path,
opponent_type,
opponent_path,
cfg,
):
src_path = osp.join(
trial_root, "data", "sacred", "score", "1", "videos", "env_0_episode_0_recording.mp4"
)
victim_suffix = ""
opponent_suffix = ""
mask_index = cfg["mask_agent_index"]
if mask_index is not None:
if mask_index == victim_index:
victim_suffix = "M"
else:
opponent_suffix == "M"
victim = util.abbreviate_agent_config(
cfg["env_name"], victim_type, victim_path, victim_suffix, victim=True
)
opponent = util.abbreviate_agent_config(
cfg["env_name"], opponent_type, opponent_path, opponent_suffix, victim=False
)
new_name = f"{env_sanitized}_victim_{victim}_opponent_{opponent}"
return src_path, new_name, "mp4"
return extract_data(path_generator, out_dir, video_dirs, ray_upload_dir)
@make_videos_ex.main
def make_videos(root_dir, exp_name):
out_dir = osp.join(root_dir, exp_name, make_timestamp())
os.makedirs(out_dir)
video_dirs = generate_videos()
extract_videos(out_dir=out_dir, video_dirs=video_dirs)
def main():
observer = FileStorageObserver(osp.join("data", "sacred", "make_videos"))
make_videos_ex.observers.append(observer)
make_videos_ex.run_commandline()
make_videos_logger.info("Sacred run completed, files stored at {}".format(observer.dir))
if __name__ == "__main__":
main()
|
angr/engines/pcode/arch/ArchPcode_SuperH_BE_32_SH_1.py | matthewpruett/angr | 6,132 | 12709052 | <reponame>matthewpruett/angr<gh_stars>1000+
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_SuperH_BE_32_SH_1(ArchPcode):
name = 'SuperH:BE:32:SH-1'
pcode_arch = 'SuperH:BE:32:SH-1'
description = 'SuperH SH-1 processor 32-bit big-endian'
bits = 32
ip_offset = 0x118
sp_offset = 0x3c
bp_offset = sp_offset
instruction_endness = Endness.BE
register_list = [
Register('r0', 4, 0x0),
Register('r1', 4, 0x4),
Register('r2', 4, 0x8),
Register('r3', 4, 0xc),
Register('r4', 4, 0x10),
Register('r5', 4, 0x14),
Register('r6', 4, 0x18),
Register('r7', 4, 0x1c),
Register('r8', 4, 0x20),
Register('r9', 4, 0x24),
Register('r10', 4, 0x28),
Register('r11', 4, 0x2c),
Register('r12', 4, 0x30),
Register('r13', 4, 0x34),
Register('r14', 4, 0x38),
Register('r15', 4, 0x3c),
Register('sr', 4, 0x100),
Register('gbr', 4, 0x104),
Register('vbr', 4, 0x108),
Register('mach', 4, 0x10c),
Register('macl', 4, 0x110),
Register('pr', 4, 0x114),
Register('pc', 4, 0x118, alias_names=('ip',))
]
register_arch(['superh:be:32:sh-1'], 32, Endness.BE, ArchPcode_SuperH_BE_32_SH_1)
|
ops.py | carrenD/cross-modality-UDA | 211 | 12709063 | import tensorflow as tf
import pdb
def _phase_shift(I, r, batch_size = 10):
# Helper function with main phase shift operation
# pdb.set_trace()
_, a, b, c = I.get_shape().as_list()
X = tf.reshape(I, (batch_size, a, b, r, r))
X = tf.transpose(X, (0, 1, 2, 4, 3)) # bsize, a, b, 1, 1
X = tf.split(X, a, 1) # a, [bsize, b, r, r]
X = tf.concat([tf.squeeze(x) for x in X], 2) # bsize, b, a*r, r
if batch_size == 1:
X = tf.expand_dims( X, 0 )
X = tf.split(X, b, 1) # b, [bsize, a*r, r]
if batch_size == 1:
X = tf.concat([x for x in X], 2 )
else:
X = tf.concat([tf.squeeze(x) for x in X], 2) #
out = tf.reshape(X, (batch_size, a*r, b*r, 1))
if batch_size == 1:
out = tf.transpose( out, (0,2,1,3) )
return out
def PS(X, r, n_channel = 8, batch_size = 10):
# Main OP that you can arbitrarily use in you tensorflow code
Xc = tf.split(X, n_channel, -1 )
X = tf.concat([_phase_shift(x, r, batch_size) for x in Xc], 3)
return X
|
Python3/468.py | rakhi2001/ecom7 | 854 | 12709080 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def validIPAddress(self, IP: str) -> str:
def checkIPv4(IP):
IPList = IP.split('.')
if len(IPList) != 4:
return "Neither"
for part in IPList:
if not part:
return "Neither"
if part[0] == '0':
if len(part) > 1:
return "Neither"
elif not part.isdigit() or int(part) > 255:
return "Neither"
return "IPv4"
def checkIPv6(IP):
IPList = IP.split(":")
if len(IPList) != 8:
return "Neither"
for part in IPList:
if not part or len(part) > 4:
return "Neither"
for c in part:
if not c.isdigit() and (c not in "abcdefABCDEF"):
return "Neither"
return "IPv6"
if "." in IP:
return checkIPv4(IP)
elif ":" in IP:
return checkIPv6(IP)
else:
return "Neither"
__________________________________________________________________________________________________
sample 13024 kb submission
class Solution:
def __init__(self):
self.characters = {'a', 'b', 'c', 'd', 'e', 'f',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
def _ipv4(self, ip):
ip = ip.split('.')
for x in ip:
if x.startswith('0') and len(x) > 1:
return "Neither"
try:
x_int = int(x)
except:
return "Neither"
if not (x_int >= 0 and x_int <= 255):
return "Neither"
return "IPv4"
def _ipv6(self, ip):
ip = ip.split(':')
for x in ip:
if len(x) > 4 or len(x) == 0:
return "Neither"
s = {y for y in x if y.lower() not in self.characters}
if s:
return "Neither"
return 'IPv6'
def validIPAddress(self, IP: str) -> str:
if len(IP) > 39:
return "Neither"
c = collections.Counter(IP)
if '-' in c:
return "Neither"
if '.' in c and c['.'] == 3:
return self._ipv4(IP)
elif ':' in c and c[':'] == 7:
return self._ipv6(IP)
return "Neither"
__________________________________________________________________________________________________
|
envi/tests/msp430/isetc.py | rnui2k/vivisect | 716 | 12709085 | from envi.archs.msp430.regs import *
checks = [
# SETC
(
'SETC',
{ 'regs': [], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "12d3", 'data': "" },
{ 'regs': [], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "12d3", 'data': "" }
),
]
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotRebarEndDetailStrip_t.py | htlcnn/ironpython-stubs | 182 | 12709108 | <reponame>htlcnn/ironpython-stubs
class dotRebarEndDetailStrip_t(object):
# no doc
RebarHookData=None
RebarStrip=None
RebarThreading=None
|
algorithms/happy-ladybugs.py | gajubadge11/HackerRank-1 | 340 | 12709129 | <gh_stars>100-1000
#!/bin/python3
import os
import sys
from collections import Counter
#
# Complete the happyLadybugs function below.
#
def is_happy(b):
if b[0] != b[1] or b[-1] != b[-2]:
return False
for ind in range(1, len(b)-1):
if b[ind] != b[ind-1] and b[ind] != b[ind+1]:
return False
return True
def happyLadybugs(b):
cnt = Counter(b)
print("cnt = {}".format(cnt))
singles = list(filter(lambda x: x[0] != '_' and x[1] == 1, cnt.items()))
empty = b.count('_')
print("singles = {}".format(singles))
print("empty = {}".format(empty))
if len(singles) == 0 and empty > 0:
return 'YES'
elif len(b) > 2 and is_happy(b):
return 'YES'
else:
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
g = int(input())
for g_itr in range(g):
n = int(input())
b = input()
result = happyLadybugs(b)
fptr.write(result + '\n')
fptr.close()
|
metrics/webqsp/evaluator.py | HKUNLP/UnifiedSKG | 191 | 12709182 | <reponame>HKUNLP/UnifiedSKG
# encoding=utf8
from collections import defaultdict
from metrics.webqsp.utils import *
from rdflib.graph import Graph
kg_files = [f"third_party/webqsp/freebase150k_part{i}.txt" for i in range(3)]
kg_str = "".join([open(f).read() for f in kg_files])
g = Graph()
g.parse(data=kg_str, format="nt")
def execute_sparql(sparql):
try:
qres = g.query(sparql)
answers = [str(a[0]) for a in qres]
return answers
except:
return []
def compute_answers_F1(pred, gold):
ans_ents = [e[0] for e in gold['answers']]
try:
sparql = lisp_to_sparql(pred)
pred_ents = execute_sparql(sparql)
except:
pred_ents = []
tp = len([p for p in pred_ents if p in ans_ents])
P = tp / len(pred_ents) if len(pred_ents) else 0
R = tp / len(ans_ents) if len(ans_ents) else 0
F1 = 2 * (P * R) / (P + R) if (P + R) else 0
return F1
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
eval_dict = defaultdict(float)
for pred, gold in zip(preds, golds):
eval_dict["F1"] += compute_answers_F1(pred, gold)
for key in eval_dict:
# print (key, eval_dict[key], '/', len(golds))
eval_dict[key] = eval_dict[key] / len(golds) if len(golds) else 0
return eval_dict
|
tests/test_dummy.py | gabrielDonnantuoni/drf-nested-routers | 1,254 | 12709185 | from django.test import TestCase
class TestDummy(TestCase):
def test_one_plus_one(self):
assert 1 + 1 == 2
|
library/oci_volume_group_facts.py | slmjy/oci-ansible-modules | 106 | 12709190 | <reponame>slmjy/oci-ansible-modules
#!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_volume_group_facts
short_description: Retrieve facts of volume groups in OCI Block Volume service
description:
- This module retrieves information of a specified volume group or all the volume groups in a specified compartment.
version_added: "2.5"
options:
availability_domain:
description: The name of the Availability Domain.
required: false
compartment_id:
description: The OCID of the compartment. Required to get information of all the volume groups in a specified
compartment.
required: false
volume_group_id:
description: The OCID of the volume group. Required to get information of the specified volume group.
required: false
aliases: [ 'id' ]
lifecycle_state:
description: A filter to only return resources that match the given lifecycle state. The state value is
case-insensitive. Allowed values are "PROVISIONING", "RESTORING", "AVAILABLE", "TERMINATING",
"TERMINATED", "FAULTY".
required: false
choices: ["PROVISIONING", "RESTORING", "AVAILABLE", "TERMINATING", "TERMINATED", "FAULTY"]
author: "<NAME> (@rohitChaware)"
extends_documentation_fragment: [ oracle, oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get information of all the volume groups for a specific availability domain & compartment_id
oci_volume_group_facts:
availability_domain: BnQb:PHX-AD-1
compartment_id: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
- name: Get information of a volume group
oci_volume_group_facts:
volume_group_id: ocid1.volumegroup.oc1.phx.xxxxxEXAMPLExxxxx
"""
RETURN = """
volume_groups:
description: List of volume group information
returned: On success
type: complex
contains:
availability_domain:
description: The Availability Domain of the volume group.
returned: always
type: string
sample: IwGV:US-ASHBURN-AD-2
compartment_id:
description: The OCID of the compartment that contains the volume group.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
defined_tags:
description: Defined tags for this resource. Each key is predefined and scoped to a namespace.
returned: always
type: string
sample: {"Operations": {"CostCenter": "42"}}
display_name:
description: Name of the volume group.
returned: always
type: string
sample: ansible_test_volume
freeform_tags:
description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name,
type, or namespace.
returned: always
type: string
sample: {"Department": "Finance"}
id:
description: The OCID of the volume group.
returned: always
type: string
sample: ocid1.volumegroup.oc1.iad.xxxxxEXAMPLExxxxx
is_hydrated:
description: Specifies whether the newly created cloned volume group's data has finished copying from the
source volume group or backup.
returned: always
type: bool
sample: False
lifecycle_state:
description: The current state of a volume group.
returned: always
type: string
sample: PROVISIONING
size_in_gbs:
description: The aggregate size of the volume group in GBs.
returned: always
type: int
sample: 50
size_in_mbs:
description: The aggregate size of the volume group in MBs.
returned: always
type: int
sample: 51200
source_details:
description: The volume group source. The source is either another list of volume IDs in the same
availability domain, another volume group, or a volume group backup.
returned: always
type: dict
contains:
id:
description: The OCID of the volume group to clone from or OCIDs for the volumes in this volume
group or OCID of the volume group backup to restore from.
returned: always
type: string
sample: ocid1.volumegroupbackup.oc1.iad.xxxxxEXAMPLExxxxx
type:
description: Type of volume group source either 'volumeGroupBackupId' or 'volumeGroupId' or
'volumeIds'.
returned: always
type: string
sample: volumeGroupBackupId
sample: {
"id": "ocid1.volumegroupbackup.oc1.iad.xxxxxEXAMPLExxxxx",
"type": "volumeGroupBackupId"
}
time_created:
description: The date and time the volume group was created. Format defined by RFC3339.
returned: always
type: datetime
sample: 2017-11-22T19:40:08.871000+00:00
volume_ids:
description: OCIDs for the volumes in this volume group.
returned: always
type: datetime
sample: ['ocid1.volume.oc1.iad.xxxxxEXAMPLExxxxx']
sample: [{
"availability_domain": "IwGV:US-ASHBURN-AD-2",
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name": "ansible_test_volume_group",
"id": "ocid1.volumegroup.oc1.iad.xxxxxEXAMPLExxxxx",
"is_hydrated": true,
"lifecycle_state": "AVAILABLE",
"size_in_gbs": 50,
"size_in_mbs": 51200,
"source_details": {
"id": "ocid1.volumegroupbackup.oc1.iad.xxxxxEXAMPLExxxxx",
"type": "volumeGroupBackupId"
},
"time_created": "2017-12-05T15:35:28.747000+00:00",
"volume_ids": ['ocid1.volume.oc1.iad.xxxxxEXAMPLExxxxx']
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core.blockstorage_client import BlockstorageClient
from oci.util import to_dict
from oci.exceptions import ServiceError
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def main():
module_args = oci_utils.get_facts_module_arg_spec()
module_args.update(
dict(
availability_domain=dict(type="str", required=False),
compartment_id=dict(type="str", required=False),
lifecycle_state=dict(
type="str",
required=False,
choices=[
"PROVISIONING",
"RESTORING",
"AVAILABLE",
"TERMINATING",
"TERMINATED",
"FAULTY",
],
),
volume_group_id=dict(type="str", required=False, aliases=["id"]),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_one_of=[["compartment_id", "volume_group_id"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
block_storage_client = oci_utils.create_service_client(module, BlockstorageClient)
volume_group_id = module.params["volume_group_id"]
try:
if volume_group_id is not None:
result = [
to_dict(
oci_utils.call_with_backoff(
block_storage_client.get_volume_group,
volume_group_id=volume_group_id,
).data
)
]
else:
compartment_id = module.params["compartment_id"]
optional_list_method_params = [
"display_name",
"lifecycle_state",
"availability_domain",
]
optional_kwargs = dict(
(param, module.params[param])
for param in optional_list_method_params
if module.params.get(param) is not None
)
result = to_dict(
oci_utils.list_all_resources(
block_storage_client.list_volume_groups,
compartment_id=compartment_id,
**optional_kwargs
)
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
module.exit_json(volume_groups=result)
if __name__ == "__main__":
main()
|
lona/__init__.py | fscherf/lona | 155 | 12709195 | <filename>lona/__init__.py
from .exceptions import * # NOQA: F403
from .routing import MATCH_ALL, Route
from .errors import * # NOQA: F403
from .view import LonaView
from .app import LonaApp
VERSION = (1, 8, 5)
VERSION_STRING = '.'.join(str(i) for i in VERSION)
|
torchbenchmark/models/tacotron2/utils.py | Chillee/benchmark | 384 | 12709223 | <filename>torchbenchmark/models/tacotron2/utils.py
import numpy as np
from scipy.io.wavfile import read
import torch
from pathlib import Path
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device)
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(filename, split="|"):
root = str(Path(__file__).parent)
with open(filename, encoding='utf-8') as f:
filepaths_and_text = []
for line in f:
filename, *text = line.strip().split(split)
filename = f'{root}/{filename}'
filepaths_and_text.append((filename, *text))
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
|
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/test/test_dog.py | JigarJoshi/openapi-generator | 11,868 | 12709236 | <filename>samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/test/test_dog.py
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys
import unittest
import petstore_api
try:
from petstore_api.model import animal
except ImportError:
animal = sys.modules[
'petstore_api.model.animal']
try:
from petstore_api.model import dog_all_of
except ImportError:
dog_all_of = sys.modules[
'petstore_api.model.dog_all_of']
from petstore_api.model.dog import Dog
class TestDog(unittest.TestCase):
"""Dog unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDog(self):
"""Test Dog
This will fail because additional_properties_type is None in Animal and it must be defined as any type
to allow in the property breed which is not defined in Animal, it is defined in Dog
"""
# make an instance of dog, a composed schema model
class_name = 'Dog'
color = 'white'
breed = '<NAME>'
with self.assertRaises(petstore_api.exceptions.ApiValueError):
dog = Dog(
class_name=class_name,
color=color,
breed=breed
)
if __name__ == '__main__':
unittest.main()
|
cup/log.py | liu0208xuan/CUP | 900 | 12709255 | <reponame>liu0208xuan/CUP<filename>cup/log.py
#!/usr/bin/python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: <NAME> (@mythmgn),
"""
:description:
common log related module
"""
from __future__ import print_function
__all__ = [
'debug', 'info', 'warn', 'critical',
'init_comlog', 'setloglevel', 'ROTATION', 'INFINITE',
'reinit_comlog', 'parse',
'backtrace_info', 'backtrace_debug', 'backtrace_error',
'backtrace_critical',
'info_if', 'debug_if', 'warn_if', 'error_if', 'critical_if',
# x* functions are for loggers other than logging.root (the global logger)
'xinit_comlog', 'xdebug', 'xinfo', 'xwarn', 'xerror', 'xcritical'
]
import os
import re
import sys
import time
import uuid
import logging
from logging import handlers
import threading
import collections
import cup
from cup import err
from cup import platforms
ROTATION = 0
INFINITE = 1
ROTATION_COUNTS = 30
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
G_INITED_LOGGER = []
# pylint:disable=C0103
info = logging.info
warn = logging.warn
try:
warn = logging.warning
except Exception:
pass
error = logging.error
debug = logging.debug
critical = logging.critical
LoggerParams = collections.namedtuple('LoggerParams', [
'loglevel', # one of logging.INFO logging.DEBUG logging.xxx levels
'logfile', # valid logfile position, e.g. /home/test/test.log
'logtype', # log.ROTATION log.INFINITE
'maxlogsize', # logsize for one log, in bytes
'bprint_console', # True, logger will printn to stdout, False, otherwise
'gen_wf' # True/False, generate log lines with level >= WARNNING
])
class _Singleton(object): # pylint: disable=R0903
"""
internal use for logging. Plz use @Singoleton in cup.decorators
"""
_LOCK = threading.Lock()
def __init__(self, cls):
self.__instance = None
self.__cls = cls
def __call__(self, *args, **kwargs):
self._LOCK.acquire()
if self.__instance is None:
self.__instance = self.__cls(*args, **kwargs)
self._LOCK.release()
return self.__instance
# pylint: disable=R0903
class _MsgFilter(logging.Filter):
"""
Msg filters by log levels
"""
def __init__(self, msg_level=logging.WARNING):
self.msg_level = msg_level
def filter(self, record):
if record.levelno >= self.msg_level:
return False
else:
return True
class LogInitializer(object):
"""
default log initalizer
"""
# def config_filelogger(self,
# logger, loglevel, strlogfile, logtype,
# maxsize, bprint_console, gen_wf=False
# ): # too many arg pylint: disable=R0913
@classmethod
def setup_filelogger(cls, logger, logparams):
"""
config logger
"""
loglevel = logparams.loglevel
strlogfile = logparams.logfile
logtype = logparams.logtype
maxsize = logparams.maxlogsize
bprint_console = logparams.bprint_console
gen_wf = logparams.gen_wf
if not os.path.exists(strlogfile):
try:
if platforms.is_linux():
os.mknod(strlogfile)
else:
with open(strlogfile, 'w+') as fhandle:
fhandle.write('\n')
except IOError:
raise err.LoggerException(
'logfile does not exist. '
'try to create it. but file creation failed'
)
logger.setLevel(loglevel)
# '%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s'
tznum = time.strftime('%z')
tzkey = time.strftime('%Z')
formatter = logging.Formatter(
fmt='%(levelname)s:\t %(asctime)s {0}({1}) * '
'[%(process)d:%(thread)x] [%(filename)s:%(lineno)s] %(message)s'
.format(tznum, tzkey)
)
if bprint_console:
info('bprint_console enabled, will print to stdout')
streamhandler = logging.StreamHandler()
streamhandler.setLevel(loglevel)
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
fdhandler = None
if logtype == ROTATION:
fdhandler = handlers.RotatingFileHandler(
strlogfile, 'a', maxsize, ROTATION_COUNTS, encoding='utf-8'
)
else:
fdhandler = logging.FileHandler(
strlogfile, 'a', encoding='utf-8'
)
fdhandler.setFormatter(formatter)
fdhandler.setLevel(loglevel)
if gen_wf:
# add .wf handler
file_wf = str(strlogfile) + '.wf'
warn_handler = logging.FileHandler(file_wf, 'a', encoding='utf-8')
warn_handler.setLevel(logging.WARNING)
warn_handler.setFormatter(formatter)
logger.addHandler(warn_handler)
fdhandler.addFilter(_MsgFilter(logging.WARNING))
logger.addHandler(fdhandler)
@classmethod
def proc_thd_id(cls):
"""return proc thread id"""
return '{0}:{1}'.format(
os.getpid(), threading.current_thread().ident
)
@classmethod
def get_codeline(cls, back=0):
"""get code line"""
return sys._getframe(back + 1).f_lineno # traceback pylint:disable=W0212
@classmethod
def get_codefile(cls, back=0):
"""
get code file
"""
# pylint: disable=W0212
# to get code filename
return os.path.basename(sys._getframe(back + 1).f_code.co_filename)
@classmethod
def log_file_func_info(cls, msg, back_trace_len=0):
"""return log traceback info"""
tempmsg = ' * [%s] [%s:%s] ' % (
cls.proc_thd_id(), cls.get_codefile(2 + back_trace_len),
cls.get_codeline(2 + back_trace_len)
)
msg = '{0}{1}'.format(tempmsg, msg)
if platforms.is_py2():
if isinstance(msg, unicode):
return msg
return msg.decode('utf8')
return msg
# pylint: disable=R0903
@_Singleton
class _RootLogerMan(object):
_instance = None
_rootlogger = None
_b_rotation = False
_logfile = ''
_logtype = ROTATION
_loggername = None
def __init__(self):
pass
def get_rootlogger(self):
"""
get default(root) logger
"""
if self._rootlogger is None:
raise err.LoggerException(
'The Cup logger has not been initalized Yet. '
'Call init_comlog first'
)
return self._rootlogger
def set_rootlogger(self, loggername, logger):
"""
set default(root) logger with a new loggername
"""
if self._rootlogger is not None:
raise err.LoggerException(
"""WARNING!!! The cup logger has been initalized already\
.Plz do NOT init_comlog twice""")
self._rootlogger = logger
self._loggername = loggername
def reset_rootlogger(self, logger):
"""reset root logger"""
global G_INITED_LOGGER
tmplogger = self._rootlogger
while len(tmplogger.handlers) > 0:
tmplogger.removeHandler(tmplogger.handlers[0])
del tmplogger
self._rootlogger = logger
logging.root = logger
def is_initalized(self):
"""
Initialized or not
"""
if self._rootlogger is None:
return False
return True
# too many arg pylint: disable=R0913
def init_comlog(loggername, loglevel=logging.INFO, logfile='cup.log',
logtype=ROTATION, maxlogsize=1073741824, bprint_console=False,
gen_wf=False):
"""
Initialize your default logger
:param loggername:
Unique logger name for default logging.
:param loglevel:
4 default levels: log.DEBUG log.INFO log.ERROR log.CRITICAL
:param logfile:
log file. Will try to create it if no existence
:param logtype:
Two type candidiates: log.ROTATION and log.INFINITE
log.ROTATION will let logfile switch to a new one (30 files at most).
When logger reaches the 30th logfile, will overwrite from the
oldest to the most recent.
log.INFINITE will write on the logfile infinitely
:param maxlogsize:
maxmum log size with byte
:param b_printcmd:
print to stdout or not?
:param gen_wf:
print log msges with level >= WARNING to file (${logfile}.wf)
*E.g.*
::
import logging
from cup import log
log.init_comlog(
'test',
log.DEBUG,
'/home/work/test/test.log',
log.ROTATION,
1024,
False
)
log.info('test xxx')
log.critical('test critical')
"""
loggerman = _RootLogerMan()
rootloger = logging.getLogger()
if not loggerman.is_initalized():
loggerman.set_rootlogger(loggername, rootloger)
if os.path.exists(logfile) is False:
if platforms.is_linux():
os.mknod(logfile)
else:
with open(logfile, 'w+') as fhandle:
fhandle.write('\n')
elif os.path.isfile(logfile) is False:
raise err.LoggerException(
'The log file exists. But it\'s not regular file'
)
loggerparams = LoggerParams(
loglevel, logfile, logtype, maxlogsize, bprint_console, gen_wf
)
LogInitializer.setup_filelogger(rootloger, loggerparams)
info('-' * 20 + 'Log Initialized Successfully' + '-' * 20)
global G_INITED_LOGGER
G_INITED_LOGGER.append(loggername)
else:
print('[{0}:{1}] init_comlog has been already initalized'.format(
LogInitializer.get_codefile(1), LogInitializer.get_codeline(1)
))
return
# too many arg pylint: disable=R0913
def reinit_comlog(loggername, loglevel=logging.INFO, logfile='cup.log',
logtype=ROTATION, maxlogsize=1073741824,
bprint_console=False, gen_wf=False):
"""
reinitialize default root logger for cup logging
:param loggername:
logger name, should be different from the original one
"""
global G_INITED_LOGGER
if loggername in G_INITED_LOGGER:
msg = ('loggername:{0} has been already used!!! Change a name'.format(
loggername))
raise ValueError(msg)
G_INITED_LOGGER.append(loggername)
tmplogger = logging.getLogger(loggername)
if os.path.exists(logfile) is False:
if platforms.is_linux():
os.mknod(logfile)
else:
with open(logfile, 'w+') as fhandle:
fhandle.write('\n')
elif os.path.isfile(logfile) is False:
raise err.LoggerException(
'The log file exists. But it\'s not regular file'
)
loggerman = _RootLogerMan()
loggerparms = LoggerParams(
loglevel, logfile, logtype, maxlogsize, bprint_console, gen_wf
)
LogInitializer.setup_filelogger(tmplogger, loggerparms)
loggerman.reset_rootlogger(tmplogger)
info('-' * 20 + 'Log Reinitialized Successfully' + '-' * 20)
return
def _fail_handle(msg, e):
if platforms.is_py2():
if not isinstance(msg, unicode):
msg = msg.decode('utf8')
print('{0}\nerror:{1}'.format(msg, e))
elif platforms.is_py3():
print('{0}\nerror:{1}'.format(msg, e))
def backtrace_info(msg, back_trace_len=0):
"""
info with backtrace support
"""
try:
msg = LogInitializer.log_file_func_info(msg, back_trace_len)
loggerman = _RootLogerMan()
loggerman.get_rootlogger().info(msg)
except err.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_debug(msg, back_trace_len=0):
"""
debug with backtrace support
"""
try:
msg = LogInitializer.log_file_func_info(msg, back_trace_len)
loggerman = _RootLogerMan()
loggerman.get_rootlogger().debug(msg)
except err.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_warn(msg, back_trace_len=0):
"""
warning msg with backtrace support
"""
try:
msg = LogInitializer.log_file_func_info(msg, back_trace_len)
loggerman = _RootLogerMan()
loggerman.get_rootlogger().warn(msg)
except err.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
def backtrace_error(msg, back_trace_len=0):
"""
error msg with backtarce support
"""
try:
msg = LogInitializer.log_file_func_info(msg, back_trace_len)
loggerman = _RootLogerMan()
loggerman.get_rootlogger().error(msg)
except err.LoggerException:
return
except Exception as error:
_fail_handle(msg, error)
def backtrace_critical(msg, back_trace_len=0):
"""
logging.CRITICAL with backtrace support
"""
try:
msg = LogInitializer.log_file_func_info(msg, back_trace_len)
loggerman = _RootLogerMan()
loggerman.get_rootlogger().critical(msg)
except err.LoggerException:
return
except Exception as error:
_fail_handle(msg, error)
def setloglevel(logginglevel):
"""
change log level during runtime
::
from cup import log
log.setloglevel(log.DEBUG)
"""
loggerman = _RootLogerMan()
loggerman.get_rootlogger().setLevel(logginglevel)
def parse(logline):
"""
return a dict if the line is valid.
Otherwise, return None
::
dict_info:= {
'loglevel': 'DEBUG',
'date': '2015-10-14',
'time': '16:12:22,924',
'pid': 8808,
'tid': 1111111,
'srcline': 'util.py:33',
'msg': 'this is the log content',
'tznum': 8,
'tzstr': 'CST'
}
"""
try:
content = logline[logline.find(']'):]
content = content[(content.find(']') + 1):]
content = content[(content.find(']') + 1):].strip()
regex = re.compile('[ \t]+')
items = regex.split(logline)
loglevel, date, time_, timezone, _, pid_tid, src = items[0:6]
pid, tid = pid_tid.strip('[]').split(':')
tznum, tzkey = timezone.strip('+)').split('(')
return {
'loglevel': loglevel.strip(':'),
'date': date,
'time': time_,
'pid': pid,
'tid': tid,
'srcline': src.strip('[]'),
'msg': content,
'tznum': int(tznum),
'tzkey': tzkey
}
# pylint: disable = W0703
except Exception:
return None
def info_if(bol, msg, back_trace_len=1):
"""log msg with info loglevel if bol is true"""
if bol:
info(msg, back_trace_len)
def error_if(bol, msg, back_trace_len=1):
"""log msg with error loglevel if bol is true"""
if bol:
error(msg, back_trace_len)
def warn_if(bol, msg, back_trace_len=1):
"""log msg with error loglevel if bol is true"""
if bol:
warn(msg, back_trace_len)
def critical_if(bol, msg, back_trace_len=1):
"""log msg with critical loglevel if bol is true"""
if bol:
critical(msg, back_trace_len)
def debug_if(bol, msg, back_trace_len=1):
"""log msg with critical loglevel if bol is true"""
if bol:
debug(msg, back_trace_len)
def xinit_comlog(loggername, logger_params):
"""
xinit_comlog along with xdebug xinfo xwarn xerror are functions for
different loggers other than logging.root (the global logger).
:param loggername:
loggername, example: http.access,
:param logger_params:
object of LoggerParams
Code example:
::
logparams = log.LoggerParams(
log.DEBUG, 'cup.x.log', log.ROTATION, 100 * 1024 * 1024,
True, True
)
log.xinit_comlog('log.x', logparams)
log.xdebug('log.x', 'xdebug')
log.xinfo('log.x', 'xinfo')
log.xerror('log.x', 'xerror')
logparams = log.LoggerParams(
log.DEBUG, 'cup.y.log', log.ROTATION, 100 * 1024 * 1024,
True, True
)
log.xinit_comlog('log.y', logparams)
log.xdebug('log.y', 'ydebug')
log.xinfo('log.y', 'yinfo')
log.xerror('log.y', 'yerror')
"""
if not isinstance(logger_params, LoggerParams):
raise TypeError('logger_params should be a object of log.LoggerParams')
logger = logging.getLogger(loggername)
LogInitializer.setup_filelogger(logger, logger_params)
def xdebug(loggername, msg, back_trace_len=1):
"""
:param loggername:
shoule be xinit_comlog before calling xdebug/xinfo/xerror/xcritical
:param msg:
log msg
:back_trace_len:
1 by default, ignore this if you don't need this
"""
logger = logging.getLogger(loggername)
logger.debug(LogInitializer.log_file_func_info(msg, back_trace_len))
def xinfo(loggername, msg, back_trace_len=1):
"""
:param loggername:
shoule be xinit_comlog before calling xdebug/xinfo/xerror/xcritical
:param msg:
log msg
:back_trace_len:
default 1, just ignore this param if you don't know what it is.
This param will trace back 1 layer and get the
[code_filename:code_lines]
"""
logger = logging.getLogger(loggername)
logger.info(LogInitializer.log_file_func_info(msg, back_trace_len))
def xwarn(loggername, msg, back_trace_len=1):
"""
:param loggername:
shoule be xinit_comlog before calling xdebug/xinfo/xerror/xcritical
:param msg:
log msg
:back_trace_len:
default 1, just ignore this param if you don't know what it is.
This param will trace back 1 layer and get the
[code_filename:code_lines]
"""
logger = logging.getLogger(loggername)
logger.warn(LogInitializer.log_file_func_info(msg, back_trace_len))
def xerror(loggername, msg, back_trace_len=1):
"""
:param loggername:
shoule be xinit_comlog before calling xdebug/xinfo/xerror/xcritical
:param msg:
log msg
:back_trace_len:
default 1, just ignore this param if you don't know what it is.
This param will trace back 1 layer and get the
[code_filename:code_lines]
"""
logger = logging.getLogger(loggername)
logger.error(LogInitializer.log_file_func_info(msg, back_trace_len))
def xcritical(loggername, msg, back_trace_len=1):
"""
:param loggername:
shoule be xinit_comlog before calling xdebug/xinfo/xerror/xcritical
:param msg:
log msg
:back_trace_len:
default 1, just ignore this param if you don't know what it is.
This param will trace back 1 layer and get the
[code_filename:code_lines]
"""
logger = logging.getLogger(loggername)
logger.critical(LogInitializer.log_file_func_info(msg, back_trace_len))
if __name__ == '__main__':
cup.log.debug('中文')
cup.log.init_comlog(
'test', cup.log.DEBUG, './test.log',
cup.log.ROTATION, 102400000, False
)
cup.log.init_comlog(
'test', cup.log.DEBUG, './test.log',
cup.log.ROTATION, 102400000, False
)
cup.log.info('test info')
cup.log.debug('test debug')
cup.log.info('中文'.decode('utf8'))
cup.log.reinit_comlog(
're-test', cup.log.DEBUG, './re.test.log',
cup.log.ROTATION, 102400000, False
)
cup.log.reinit_comlog(
're-test', cup.log.DEBUG, './re.test.log',
cup.log.ROTATION, 102400000, False
)
cup.log.info('re:test info')
cup.log.debug('re:test debug')
cup.log.debug('re:中文')
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
demo/bs4_quotes.py | ssatyam4753/sukhoi | 1,010 | 12709280 | <gh_stars>1000+
"""
This example extract just the quotes, you end up with a structure like:
[quote0, quote1, ...]
Note: It uses beautifulsoup4 :)
"""
from sukhoi import MinerBS4, core
class QuoteMiner(MinerBS4):
def run(self, dom):
elems = dom.find_all('div', {'class':'quote'})
self.extend(list(map(self.extract_quote, elems)))
elem = dom.find('li', {'class', 'next'})
if elem: self.next(elem.find('a').get('href'))
def extract_quote(self, elem):
quote = elem.find('span', {'class': 'text'})
return quote.text
if __name__ == '__main__':
URL = 'http://quotes.toscrape.com/'
quotes = QuoteMiner(URL)
core.gear.mainloop()
print(quotes)
|
fedml_api/standalone/hierarchical_fl/client.py | xuwanwei/FedML | 1,120 | 12709294 | import copy
import torch
from fedml_api.standalone.fedavg.client import Client
class Client(Client):
def train(self, global_round_idx, group_round_idx, w):
self.model.load_state_dict(w)
self.model.to(self.device)
if self.args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr)
else:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.args.lr,
weight_decay=self.args.wd, amsgrad=True)
w_list = []
for epoch in range(self.args.epochs):
for x, labels in self.local_training_data:
x, labels = x.to(self.device), labels.to(self.device)
self.model.zero_grad()
log_probs = self.model(x)
loss = self.criterion(log_probs, labels)
loss.backward()
optimizer.step()
global_epoch = global_round_idx*self.args.group_comm_round*self.args.epochs + \
group_round_idx*self.args.epochs + epoch
if global_epoch % self.args.frequency_of_the_test == 0 or epoch == self.args.epochs-1:
w_list.append((global_epoch, copy.deepcopy(self.model.state_dict())))
return w_list
|
platform/mcu/haas1000/prebuild/data/python-apps/driver/spi/main.py | NEXTLEO/AliOS-Things | 4,538 | 12709301 | <reponame>NEXTLEO/AliOS-Things<filename>platform/mcu/haas1000/prebuild/data/python-apps/driver/spi/main.py
from driver import SPI
print("-------------------spi test--------------------")
spi = SPI()
spi.open("SPI0")
readBuf = bytearray(3)
writeBuf = bytearray([0x9f])
print(writeBuf)
print(readBuf)
value = spi.sendRecv(writeBuf, readBuf)
print(value)
print(writeBuf)
print(readBuf)
spi.close()
print("-------------------spi test--------------------")
|
qt__pyqt__pyside__pyqode/pyqt5__QWebEnginePage__Client_sync.py | DazEB2/SimplePyScripts | 117 | 12709319 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEnginePage
class Client(QWebEnginePage):
def __init__(self, urls):
self.app = QApplication([])
super().__init__()
self.response_list = []
self.loadFinished.connect(self._on_load_finished)
for url in urls:
self.load(QUrl(url))
self.app.exec_()
def _on_load_finished(self):
self.toHtml(self.callable)
def callable(self, html_str):
self.response_list.append(html_str)
self.app.quit()
if __name__ == '__main__':
urls = [
'http://doc.qt.io/Qt-5/qwebenginepage.html',
'https://www.google.ru/',
'https://yandex.ru/',
]
client = Client(urls)
print(len(client.response_list), client.response_list) # 3 ['<!--?xml version="1.0" ...
|
Python/benchmarking/image_capture.py | felixbinder/tdw | 307 | 12709341 | from argparse import ArgumentParser
from benchmark_utils import PATH
from benchmarker import Benchmarker
from platform import system
"""
Image capture benchmarks
"""
def granular():
output = "| Test | FPS |\n| --- | --- |\n"
rows = []
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img",
row="--images --passes _img"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_id",
row="--boxes --images --passes _id"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img", hi_res=True,
row="--images --passes _img --hi_res"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img", png=True,
row="--images --passes _img --png"))
b.start()
b.run(boxes=True, images=True, passes="_img_id",
row="--boxes --images --passes _img_id")
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img", hi_res=True, size=1024,
row="--images --passes _img --hi_res --size 1024"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_id", hi_res=True, size=1024,
row="--images --passes _id --hi_res --size 1024"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img_id", hi_res=True, size=1024,
row="--images --passes _img_id --hi_res --size 1024"))
b.start()
rows.append(b.run(boxes=True, images=True, passes="_img_id", hi_res=True, size=1024, png=True,
row="--images --passes _img_id --hi_res --size 1024 --png"))
for row in rows:
output += row + "\n"
print(output)
def write_to_main():
b.start()
tr = b.run(boxes=True, transforms=True, return_row=False)
b.start()
lo = b.run(images=True, passes="_img", return_row=False)
b.start()
hi = b.run(images=True, passes="_img", return_row=False, hi_res=True, size=1024)
txt = PATH.read_text()
txt = txt.replace("$TRANSFORMS_" + machine_key, str(tr))
txt = txt.replace("$IMG_LOW_" + machine_key, str(lo))
txt = txt.replace("$IMG_HIGH_" + machine_key, str(hi))
PATH.write_text(txt)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--main', action='store_true')
parser.add_argument('--machine', type=str, default='legion_lenovo', choices=['legion_lenovo', 'braintree', 'node11'])
args = parser.parse_args()
machine_key = args.machine.upper()
if machine_key == "LEGION_LENOVO":
if system() == "Windows":
machine_key += "_WINDOWS"
else:
machine_key += "_UBUNTU"
b = Benchmarker()
if args.main:
write_to_main()
else:
granular()
b.communicate({"$type": "terminate"})
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.