max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
ex09_sed/testfiles/parseargs.py | techieguy007/learn-more-python-the-hard-way-solutions | 466 | 12741588 | <reponame>techieguy007/learn-more-python-the-hard-way-solutions
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='N', type=int, nargs='+')
parser.add_argument('-f', '--foo', help='foo help')
parser.add_argument('-b', '--bar', help='bar help')
parser.add_argument('-z', '--baz', help='baz help')
parser.add_argument('-t', '--turn-on', action='store_true')
parser.add_argument('-x', '--exclude', action='store_false')
parser.add_argument('-s', '--start', action='store_true')
args = parser.parse_args()
print(args)
|
terrascript/data/rancher/rancher2.py | mjuenema/python-terrascript | 507 | 12741608 | <filename>terrascript/data/rancher/rancher2.py
# terrascript/data/rancher/rancher2.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:25:37 UTC)
import terrascript
class rancher2_app(terrascript.Data):
pass
class rancher2_catalog(terrascript.Data):
pass
class rancher2_catalog_v2(terrascript.Data):
pass
class rancher2_certificate(terrascript.Data):
pass
class rancher2_cloud_credential(terrascript.Data):
pass
class rancher2_cluster(terrascript.Data):
pass
class rancher2_cluster_alert_group(terrascript.Data):
pass
class rancher2_cluster_alert_rule(terrascript.Data):
pass
class rancher2_cluster_driver(terrascript.Data):
pass
class rancher2_cluster_logging(terrascript.Data):
pass
class rancher2_cluster_role_template_binding(terrascript.Data):
pass
class rancher2_cluster_scan(terrascript.Data):
pass
class rancher2_cluster_template(terrascript.Data):
pass
class rancher2_cluster_v2(terrascript.Data):
pass
class rancher2_etcd_backup(terrascript.Data):
pass
class rancher2_global_dns_provider(terrascript.Data):
pass
class rancher2_global_role(terrascript.Data):
pass
class rancher2_global_role_binding(terrascript.Data):
pass
class rancher2_multi_cluster_app(terrascript.Data):
pass
class rancher2_namespace(terrascript.Data):
pass
class rancher2_node_driver(terrascript.Data):
pass
class rancher2_node_pool(terrascript.Data):
pass
class rancher2_node_template(terrascript.Data):
pass
class rancher2_notifier(terrascript.Data):
pass
class rancher2_pod_security_policy_template(terrascript.Data):
pass
class rancher2_project(terrascript.Data):
pass
class rancher2_project_alert_group(terrascript.Data):
pass
class rancher2_project_alert_rule(terrascript.Data):
pass
class rancher2_project_logging(terrascript.Data):
pass
class rancher2_project_role_template_binding(terrascript.Data):
pass
class rancher2_registry(terrascript.Data):
pass
class rancher2_role_template(terrascript.Data):
pass
class rancher2_secret(terrascript.Data):
pass
class rancher2_secret_v2(terrascript.Data):
pass
class rancher2_setting(terrascript.Data):
pass
class rancher2_storage_class_v2(terrascript.Data):
pass
class rancher2_user(terrascript.Data):
pass
__all__ = [
"rancher2_app",
"rancher2_catalog",
"rancher2_catalog_v2",
"rancher2_certificate",
"rancher2_cloud_credential",
"rancher2_cluster",
"rancher2_cluster_alert_group",
"rancher2_cluster_alert_rule",
"rancher2_cluster_driver",
"rancher2_cluster_logging",
"rancher2_cluster_role_template_binding",
"rancher2_cluster_scan",
"rancher2_cluster_template",
"rancher2_cluster_v2",
"rancher2_etcd_backup",
"rancher2_global_dns_provider",
"rancher2_global_role",
"rancher2_global_role_binding",
"rancher2_multi_cluster_app",
"rancher2_namespace",
"rancher2_node_driver",
"rancher2_node_pool",
"rancher2_node_template",
"rancher2_notifier",
"rancher2_pod_security_policy_template",
"rancher2_project",
"rancher2_project_alert_group",
"rancher2_project_alert_rule",
"rancher2_project_logging",
"rancher2_project_role_template_binding",
"rancher2_registry",
"rancher2_role_template",
"rancher2_secret",
"rancher2_secret_v2",
"rancher2_setting",
"rancher2_storage_class_v2",
"rancher2_user",
]
|
plugins/modules/nsxt_policy_ip_pool.py | madhukark/ansible-for-nsxt | 127 | 12741622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_policy_ip_pool
short_description: Create or Delete a Policy IP Pool
description:
Creates or deletes a Policy IP Pool.
Required attributes include id and display_name.
version_added: "2.8"
author: <NAME>
extends_documentation_fragment:
- vmware.ansible_for_nsxt.vmware_nsxt
options:
id:
description: The id of the Policy IP Pool.
required: false
type: str
description:
description: Resource description.
type: str
pool_block_subnets:
type: list
element: dict
description: Specify the IP Pool Block Subnets that need to be created,
updated, or deleted as a list of dict in this section
suboptions:
auto_assign_gateway:
description:
- Indicate whether default gateway is to be reserved from
the range
- If this property is set to true, the first IP in the
range will be reserved for gateway.
type: bool
default: true
description:
description: Resource description.
type: str
display_name:
description:
- Display name.
- If resource ID is not specified, display_name will be
used as ID.
required: false
type: str
do_wait_till_create:
type: bool
default: false
description: Can be used to wait for the realization of
subresource before the request to create the next
resource is sent to the Manager
id:
description: The id of the Policy IP Pool Block Subnet.
required: false
type: str
ip_block_display_name:
description: Same as ip_block_id. Either one must be specified.
If both are specified, ip_block_id takes
precedence.
required: false
type: str
ip_block_id:
description: The ID of the IpAddressBlock from which the subnet
is to be created
type: str
size:
description:
- Represents the size or number of IP addresses in the
subnet
- The size parameter is required for subnet creation. It
must be specified during creation but cannot be changed
later.
type: int
state:
choices:
- present
- absent
description: "State can be either 'present' or 'absent'.
'present' is used to create or update resource.
'absent' is used to delete resource."
required: true
tags:
description: Opaque identifiers meaningful to the API user.
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
pool_static_subnets:
type: list
element: dict
description: Specify the IP Pool Static Subnets that need to be
created, updated, or deleted as a list of dict in
this section
suboptions:
allocation_ranges:
description: A collection of IPv4 or IPv6 IP Pool Ranges.
type: list
element: dict
suboptions:
start:
description: The start IP Address of the IP Range.
type: str
required: true
end:
description: The end IP Address of the IP Range.
type: str
required: true
cidr:
description: Subnet representation is a network address
and prefix length
type: str
required: true
description:
description: Resource description.
type: str
display_name:
description:
- Display name.
- If resource ID is not specified, display_name will be
used as ID.
required: false
type: str
dns_nameservers:
description: The collection of upto 3 DNS servers
for the subnet.
type: list
element: str
dns_suffix:
description: The DNS suffix for the DNS server.
type: str
do_wait_till_create:
type: bool
default: false
description: Can be used to wait for the realization of
subresource before the request to create the next
resource is sent to the Manager
gateway_ip:
description: The default gateway address on a
layer-3 router.
type: str
id:
description: The id of the Policy IP Pool Block Subnet.
required: false
type: str
state:
choices:
- present
- absent
description: "State can be either 'present' or 'absent'.
'present' is used to create or update resource.
'absent' is used to delete resource."
tags:
description: Opaque identifiers meaningful to the API user.
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
'''
EXAMPLES = '''
- name: create IP Pool
nsxt_policy_ip_pool:
hostname: "10.10.10.10"
nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt
nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key
validate_certs: False
id: test-ip-pool
display_name: test-ip-pool
state: "absent"
tags:
- tag: "a"
scope: "b"
pool_block_subnets:
- id: test-ip-subnet-1
state: present
ip_block_id: "test-ip-blk-1"
size: 16
- display_name: test-ip-subnet-2
state: present
ip_block_id: "test-ip-blk-1"
size: 16
- display_name: test-ip-subnet-3
state: present
ip_block_id: "test-ip-blk-1"
size: 8
pool_static_subnets:
- id: test-ip-static-subnet-1
state: present
allocation_ranges:
- start: '172.16.31.10'
end: '172.16.31.10'
- start: '172.16.31.10'
end: '172.16.58.3'
cidr: '172.16.17.32/26'
- display_name: test-ip-static-subnet-2
state: present
allocation_ranges:
- start: '172.16.17.32'
end: '172.16.58.3'
- start: '172.16.58.3'
end: '172.16.31.10'
cidr: '192.168.3.11/26'
'''
RETURN = '''# '''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import (
IP_ADDRESS_POOL_SUBNET_URL, IP_BLOCK_URL, IP_POOL_URL)
from ansible.module_utils._text import to_native
class NSXTIpPool(NSXTBaseRealizableResource):
@staticmethod
def get_resource_spec():
ip_pool_arg_spec = {}
return ip_pool_arg_spec
@staticmethod
def get_resource_base_url(baseline_args=None):
return IP_POOL_URL
def update_parent_info(self, parent_info):
parent_info["ip_pool_id"] = self.id
class NSXTIpAddressPoolBlockSubnet(NSXTBaseRealizableResource):
def get_spec_identifier(self):
return (NSXTIpPool.NSXTIpAddressPoolBlockSubnet.
get_spec_identifier())
@classmethod
def get_spec_identifier(cls):
return "pool_block_subnets"
@staticmethod
def get_resource_spec():
ip_addr_pool_blk_subnet_arg_spec = {}
ip_addr_pool_blk_subnet_arg_spec.update(
ip_block_id=dict(
required=False,
type='str'
),
ip_block_display_name=dict(
required=False,
type='str'
),
auto_assign_gateway=dict(
required=False,
type='bool'
),
size=dict(
required=True,
type='int'
),
start_ip=dict(
required=False,
type='str'
),
)
return ip_addr_pool_blk_subnet_arg_spec
@staticmethod
def get_resource_base_url(parent_info):
return IP_ADDRESS_POOL_SUBNET_URL.format(
parent_info["ip_pool_id"]
)
def update_resource_params(self, nsx_resource_params):
# ip_block is a required attr
ip_block_id = self.get_id_using_attr_name_else_fail(
"ip_block", nsx_resource_params,
IP_BLOCK_URL, "IP Block")
nsx_resource_params["ip_block_path"] = (
IP_BLOCK_URL + "/" + ip_block_id)
nsx_resource_params["resource_type"] = "IpAddressPoolBlockSubnet"
class NSXTIpAddressPoolStaticSubnet(NSXTBaseRealizableResource):
def get_spec_identifier(self):
return (NSXTIpPool.NSXTIpAddressPoolStaticSubnet.
get_spec_identifier())
@classmethod
def get_spec_identifier(cls):
return "pool_static_subnets"
@staticmethod
def get_resource_spec():
ip_addr_pool_static_subnet_arg_spec = {}
ip_addr_pool_static_subnet_arg_spec.update(
auto_assign_gateway=dict(
required=False,
type='bool'
),
allocation_ranges=dict(
required=True,
elements='dict',
type='list',
options=dict(
start=dict(
required=True,
type='str'
),
end=dict(
required=True,
type='str'
),
)
),
cidr=dict(
required=True,
type='str'
),
dns_nameservers=dict(
required=False,
elements='str',
type='list'
),
dns_suffix=dict(
required=False,
type='str'
),
gateway_ip=dict(
required=False,
type='str'
),
)
return ip_addr_pool_static_subnet_arg_spec
@staticmethod
def get_resource_base_url(parent_info):
return IP_ADDRESS_POOL_SUBNET_URL.format(
parent_info["ip_pool_id"]
)
def update_resource_params(self, nsx_resource_params):
nsx_resource_params["resource_type"] = "IpAddressPoolStaticSubnet"
if __name__ == '__main__':
ip_pool = NSXTIpPool()
ip_pool.realize()
|
recipes/Python/111971_Format_version_numbers/recipe-111971.py | tdiprima/code | 2,023 | 12741626 | def StringVersion( seq ):
return '.'.join( ['%s'] * len( seq )) % tuple( seq )
def TupleVersion( str ):
return map( int, str.split( '.' ))
|
components/aws/sagemaker/tests/unit_tests/tests/deploy/test_deploy_component.py | Strasser-Pablo/pipelines | 2,860 | 12741651 | from common.sagemaker_component import SageMakerComponent, SageMakerJobStatus
from deploy.src.sagemaker_deploy_spec import SageMakerDeploySpec
from deploy.src.sagemaker_deploy_component import (
EndpointRequests,
SageMakerDeployComponent,
)
from tests.unit_tests.tests.deploy.test_deploy_spec import DeploySpecTestCase
import unittest
from unittest.mock import patch, MagicMock, ANY
class DeployComponentTestCase(unittest.TestCase):
REQUIRED_ARGS = DeploySpecTestCase.REQUIRED_ARGS
@classmethod
def setUp(cls):
cls.component = SageMakerDeployComponent()
# Instantiate without calling Do()
cls.component._endpoint_config_name = "endpoint-config"
cls.component._endpoint_name = "endpoint"
cls.component._should_update_existing = False
@patch("deploy.src.sagemaker_deploy_component.super", MagicMock())
def test_do_sets_name(self):
given_endpoint_name = SageMakerDeploySpec(
self.REQUIRED_ARGS + ["--endpoint_name", "my-endpoint"]
)
given_endpoint_config_name = SageMakerDeploySpec(
self.REQUIRED_ARGS + ["--endpoint_config_name", "my-endpoint-config"]
)
unnamed_spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
with patch(
"deploy.src.sagemaker_deploy_component.SageMakerComponent._generate_unique_timestamped_id",
MagicMock(return_value="-generated"),
):
self.component.Do(given_endpoint_name)
self.assertEqual(
"EndpointConfig-generated", self.component._endpoint_config_name
)
self.assertEqual("my-endpoint", self.component._endpoint_name)
self.component.Do(given_endpoint_config_name)
self.assertEqual("my-endpoint-config", self.component._endpoint_config_name)
self.assertEqual("Endpoint-generated", self.component._endpoint_name)
self.component.Do(unnamed_spec)
self.assertEqual(
"EndpointConfig-generated", self.component._endpoint_config_name
)
self.assertEqual("Endpoint-generated", self.component._endpoint_name)
@patch("deploy.src.sagemaker_deploy_component.super", MagicMock())
def test_update_endpoint_do_sets_name(self):
given_endpoint_name = SageMakerDeploySpec(
self.REQUIRED_ARGS
+ ["--endpoint_name", "my-endpoint", "--update_endpoint", "True"]
)
given_endpoint_config_name = SageMakerDeploySpec(
self.REQUIRED_ARGS
+ [
"--endpoint_config_name",
"my-endpoint-config",
"--update_endpoint",
"True",
]
)
unnamed_spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
SageMakerDeployComponent._generate_unique_timestamped_id = MagicMock(
return_value="-generated-update"
)
self.component._endpoint_name_exists = MagicMock(return_value=True)
self.component._get_endpoint_config = MagicMock(return_value="existing-config")
with patch(
"deploy.src.sagemaker_deploy_component.SageMakerComponent._generate_unique_timestamped_id",
MagicMock(return_value="-generated-update"),
):
self.component.Do(given_endpoint_name)
self.assertEqual(
"EndpointConfig-generated-update", self.component._endpoint_config_name
)
self.assertEqual("my-endpoint", self.component._endpoint_name)
self.assertTrue(self.component._should_update_existing)
# Ignore given endpoint config name for update
self.component.Do(given_endpoint_config_name)
self.assertEqual(
"EndpointConfig-generated-update", self.component._endpoint_config_name
)
self.assertEqual("Endpoint-generated-update", self.component._endpoint_name)
self.assertTrue(self.component._should_update_existing)
self.component.Do(unnamed_spec)
self.assertEqual(
"EndpointConfig-generated-update", self.component._endpoint_config_name
)
self.assertEqual("Endpoint-generated-update", self.component._endpoint_name)
self.assertFalse(self.component._should_update_existing)
def test_create_deploy_job_requests(self):
spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
EndpointRequests(
config_request={
"EndpointConfigName": "endpoint-config",
"ProductionVariants": [
{
"VariantName": "variant-name-1",
"ModelName": "model-test",
"InitialInstanceCount": 1,
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1.0,
}
],
"Tags": [],
},
endpoint_request={
"EndpointName": "endpoint",
"EndpointConfigName": "endpoint-config",
},
),
)
def test_create_update_deploy_job_requests(self):
spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
self.component._should_update_existing = True
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
EndpointRequests(
config_request={
"EndpointConfigName": "endpoint-config",
"ProductionVariants": [
{
"VariantName": "variant-name-1",
"ModelName": "model-test",
"InitialInstanceCount": 1,
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1.0,
}
],
"Tags": [],
},
endpoint_request={
"EndpointName": "endpoint",
"EndpointConfigName": "endpoint-config",
},
),
)
def test_create_deploy_job_multiple_variants(self):
spec = SageMakerDeploySpec(
self.REQUIRED_ARGS
+ [
"--variant_name_1",
"variant-test-1",
"--initial_instance_count_1",
"1",
"--instance_type_1",
"t1",
"--initial_variant_weight_1",
"0.1",
"--accelerator_type_1",
"ml.eia1.medium",
"--model_name_2",
"model-test-2",
"--variant_name_2",
"variant-test-2",
"--initial_instance_count_2",
"2",
"--instance_type_2",
"t2",
"--initial_variant_weight_2",
"0.2",
"--accelerator_type_2",
"ml.eia1.large",
]
)
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
EndpointRequests(
config_request={
"EndpointConfigName": "endpoint-config",
"ProductionVariants": [
{
"VariantName": "variant-test-1",
"ModelName": "model-test",
"InitialInstanceCount": 1,
"InstanceType": "t1",
"InitialVariantWeight": 0.1,
"AcceleratorType": "ml.eia1.medium",
},
{
"VariantName": "variant-test-2",
"ModelName": "model-test-2",
"InitialInstanceCount": 2,
"InstanceType": "t2",
"InitialVariantWeight": 0.2,
"AcceleratorType": "ml.eia1.large",
},
],
"Tags": [],
},
endpoint_request={
"EndpointName": "endpoint",
"EndpointConfigName": "endpoint-config",
},
),
)
def test_get_job_status(self):
self.component._sm_client = mock_client = MagicMock()
self.component._sm_client.describe_endpoint.return_value = {
"EndpointStatus": "Creating"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=False, raw_status="Creating"),
)
self.component._sm_client.describe_endpoint.return_value = {
"EndpointStatus": "Updating"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=False, raw_status="Updating"),
)
self.component._sm_client.describe_endpoint.return_value = {
"EndpointStatus": "InService"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=True, raw_status="InService"),
)
self.component._sm_client.describe_endpoint.return_value = {
"EndpointStatus": "Failed",
"FailureReason": "lolidk",
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(
is_completed=True,
raw_status="Failed",
has_error=True,
error_message="lolidk",
),
)
def test_after_job_completed(self):
spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
self.component._after_job_complete({}, {}, spec.inputs, spec.outputs)
self.assertEqual(spec.outputs.endpoint_name, "endpoint")
def test_submit_update_job_request(self):
self.component._should_update_existing = True
self.component._existing_endpoint_config_name = "old-config"
self.component._delete_endpoint_config = MagicMock(return_value=True)
self.component._sm_client = MagicMock()
requests = EndpointRequests(
config_request={
"EndpointConfigName": "endpoint-config",
"ProductionVariants": [
{
"VariantName": "variant-test-1",
"ModelName": "model-test",
"InitialInstanceCount": 1,
"InstanceType": "t1",
"InitialVariantWeight": 0.1,
"AcceleratorType": "ml.eia1.medium",
},
{
"VariantName": "variant-test-2",
"ModelName": "model-test-2",
"InitialInstanceCount": 2,
"InstanceType": "t2",
"InitialVariantWeight": 0.2,
"AcceleratorType": "ml.eia1.large",
},
],
"Tags": [],
},
endpoint_request={
"EndpointName": "endpoint",
"EndpointConfigName": "endpoint-config",
},
)
self.component._submit_job_request(requests)
self.component._sm_client.update_endpoint.assert_called_once_with(
**{"EndpointName": "endpoint", "EndpointConfigName": "endpoint-config",}
)
self.component._delete_endpoint_config.assert_called_once_with("old-config")
|
tests/test_admin.py | adamlamers/pypicloud | 336 | 12741656 | """ Tests for admin endpoints """
from mock import MagicMock
from pyramid.httpexceptions import HTTPBadRequest
from pypicloud.views.admin import AdminEndpoints
from . import MockServerTest
class TestAdmin(MockServerTest):
"""Tests for admin endpoints"""
def setUp(self):
super(TestAdmin, self).setUp()
self.access = self.request.access = MagicMock()
def test_rebuild(self):
"""Rebuild endpoint refreshes db cache"""
self.request.db = MagicMock()
AdminEndpoints(self.request).rebuild_package_list()
self.assertTrue(self.request.db.reload_from_storage.called)
def test_get_pending_users(self):
"""Retrieve pending users from access backend"""
ret = AdminEndpoints(self.request).get_pending_users()
self.assertEqual(ret, self.access.pending_users())
def test_get_users(self):
"""Retrieve all users from access backend"""
ret = AdminEndpoints(self.request).get_users()
self.assertEqual(ret, self.access.user_data())
def test_get_user(self):
"""Retrieve data for a single user"""
self.request.named_subpaths = {"username": "a"}
ret = AdminEndpoints(self.request).get_user()
self.access.user_data.assert_called_with("a")
self.assertEqual(ret, self.access.user_data())
def test_delete_user(self):
"""Delete user from access backend"""
self.request.named_subpaths = {"username": "a"}
AdminEndpoints(self.request).delete_user()
self.access.delete_user.assert_called_with("a")
def test_approve_user(self):
"""Approve a pending user"""
self.request.named_subpaths = {"username": "a"}
AdminEndpoints(self.request).approve_user()
self.access.approve_user.assert_called_with("a")
def test_set_admin_status(self):
"""Set the admin flag for a user"""
self.request.named_subpaths = {"username": "a"}
AdminEndpoints(self.request).set_admin_status(True)
self.access.set_user_admin.assert_called_with("a", True)
def test_add_group_member(self):
"""Add a user to a group"""
self.request.named_subpaths = {"username": "a", "group": "b"}
self.request.method = "PUT"
AdminEndpoints(self.request).mutate_group_member()
self.access.edit_user_group.assert_called_with("a", "b", True)
def test_remove_group_member(self):
"""Remove a user from a group"""
self.request.named_subpaths = {"username": "a", "group": "b"}
self.request.method = "DELETE"
AdminEndpoints(self.request).mutate_group_member()
self.access.edit_user_group.assert_called_with("a", "b", False)
def test_get_groups(self):
"""Retrieve list of all groups"""
ret = AdminEndpoints(self.request).get_groups()
self.assertEqual(ret, self.access.groups())
def test_delete_group(self):
"""Delete a group"""
self.request.named_subpaths = {"group": "a"}
AdminEndpoints(self.request).delete_group()
self.access.delete_group.assert_called_with("a")
def test_get_user_permissions(self):
"""Get a user's permissions from the access backend"""
self.request.named_subpaths = {"username": "a"}
ret = AdminEndpoints(self.request).get_user_permissions()
self.access.user_package_permissions.assert_called_with("a")
self.assertEqual(ret, self.access.user_package_permissions())
def test_get_group(self):
"""Get a group's members and permissions"""
self.request.named_subpaths = {"group": "a"}
ret = AdminEndpoints(self.request).get_group()
self.access.group_members.assert_called_with("a")
self.access.group_package_permissions.assert_called_with("a")
self.assertEqual(
ret,
{
"members": self.access.group_members(),
"packages": self.access.group_package_permissions(),
},
)
def test_get_package_permissions(self):
"""Get user and group permissions for a package"""
self.request.named_subpaths = {"package": "a"}
self.access.user_permissions.return_value = {"u1": ["read"]}
self.access.group_permissions.return_value = {"g1": ["read", "write"]}
ret = AdminEndpoints(self.request).get_package_permissions()
self.assertEqual(
ret,
{
"user": [{"username": "u1", "permissions": ["read"]}],
"group": [{"group": "g1", "permissions": ["read", "write"]}],
},
)
def test_create_group(self):
"""Create a group"""
self.request.named_subpaths = {"group": "a"}
AdminEndpoints(self.request).create_group()
self.access.create_group.assert_called_with("a")
def test_no_create_everyone_group(self):
"""Cannot create the 'everyone' group"""
self.request.named_subpaths = {"group": "everyone"}
ret = AdminEndpoints(self.request).create_group()
self.assertTrue(isinstance(ret, HTTPBadRequest))
def test_no_create_authenticated_group(self):
"""Cannot create the 'authenticated' group"""
self.request.named_subpaths = {"group": "authenticated"}
ret = AdminEndpoints(self.request).create_group()
self.assertTrue(isinstance(ret, HTTPBadRequest))
def test_add_user_permission(self):
"""Add a user permission to a package"""
self.request.named_subpaths = {
"type": "user",
"package": "p",
"name": "u",
"permission": "read",
}
self.request.method = "PUT"
AdminEndpoints(self.request).edit_permission()
self.access.edit_user_permission.assert_called_with("p", "u", "read", True)
def test_remove_user_permission(self):
"""Remove a user permission from a package"""
self.request.named_subpaths = {
"type": "user",
"package": "p",
"name": "u",
"permission": "read",
}
self.request.method = "DELETE"
AdminEndpoints(self.request).edit_permission()
self.access.edit_user_permission.assert_called_with("p", "u", "read", False)
def test_add_group_permission(self):
"""Add a group permission to a package"""
self.request.named_subpaths = {
"type": "group",
"package": "p",
"name": "g",
"permission": "read",
}
self.request.method = "PUT"
AdminEndpoints(self.request).edit_permission()
self.access.edit_group_permission.assert_called_with("p", "g", "read", True)
def test_remove_group_permission(self):
"""Remove a group permission from a package"""
self.request.named_subpaths = {
"type": "group",
"package": "p",
"name": "g",
"permission": "read",
}
self.request.method = "DELETE"
AdminEndpoints(self.request).edit_permission()
self.access.edit_group_permission.assert_called_with("p", "g", "read", False)
def test_toggle_allow_register(self):
"""Toggle registration enabled"""
AdminEndpoints(self.request).toggle_allow_register(True)
self.access.set_allow_register.assert_called_with(True)
|
third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py | tingshao/catapult | 2,151 | 12741707 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extended protorpc descriptors.
This takes existing protorpc Descriptor classes and adds extra
properties not directly supported in proto itself, notably field and
message descriptions. We need this in order to generate protorpc
message files with comments.
Note that for most of these classes, we can't simply wrap the existing
message, since we need to change the type of the subfields. We could
have a "plain" descriptor attached, but that seems like unnecessary
bookkeeping. Where possible, we purposely reuse existing tag numbers;
for new fields, we start numbering at 100.
"""
import abc
import operator
import textwrap
import six
from apitools.base.protorpclite import descriptor as protorpc_descriptor
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
import apitools.base.py as apitools_base
class ExtendedEnumValueDescriptor(messages.Message):
"""Enum value descriptor with additional fields.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
description: Description of this enum value.
"""
name = messages.StringField(1)
number = messages.IntegerField(2, variant=messages.Variant.INT32)
description = messages.StringField(100)
class ExtendedEnumDescriptor(messages.Message):
"""Enum class descriptor with additional fields.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
description: Description of this enum class.
full_name: Fully qualified name of this enum class.
enum_mappings: Mappings from python to JSON names for enum values.
"""
class JsonEnumMapping(messages.Message):
"""Mapping from a python name to the wire name for an enum."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
values = messages.MessageField(
ExtendedEnumValueDescriptor, 2, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
enum_mappings = messages.MessageField(
'JsonEnumMapping', 102, repeated=True)
class ExtendedFieldDescriptor(messages.Message):
"""Field descriptor with additional fields.
Fields:
field_descriptor: The underlying field descriptor.
name: The name of this field.
description: Description of this field.
"""
field_descriptor = messages.MessageField(
protorpc_descriptor.FieldDescriptor, 100)
# We duplicate the names for easier bookkeeping.
name = messages.StringField(101)
description = messages.StringField(102)
class ExtendedMessageDescriptor(messages.Message):
"""Message descriptor with additional fields.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
description: Description of this message.
full_name: Full qualified name of this message.
decorators: Decorators to include in the definition when printing.
Printed in the given order from top to bottom (so the last entry
is the innermost decorator).
alias_for: This type is just an alias for the named type.
field_mappings: Mappings from python to json field names.
"""
class JsonFieldMapping(messages.Message):
"""Mapping from a python name to the wire name for a field."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
fields = messages.MessageField(ExtendedFieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'extended_descriptor.ExtendedMessageDescriptor', 3, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 4, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
decorators = messages.StringField(102, repeated=True)
alias_for = messages.StringField(103)
field_mappings = messages.MessageField(
'JsonFieldMapping', 104, repeated=True)
class ExtendedFileDescriptor(messages.Message):
"""File descriptor with additional fields.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
description: Description of this file.
additional_imports: Extra imports used in this package.
"""
package = messages.StringField(2)
message_types = messages.MessageField(
ExtendedMessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 5, repeated=True)
description = messages.StringField(100)
additional_imports = messages.StringField(101, repeated=True)
def _WriteFile(file_descriptor, package, version, proto_printer):
"""Write the given extended file descriptor to the printer."""
proto_printer.PrintPreamble(package, version, file_descriptor)
_PrintEnums(proto_printer, file_descriptor.enum_types)
_PrintMessages(proto_printer, file_descriptor.message_types)
custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types)
custom_json_mappings.extend(
_FetchCustomMappings(file_descriptor.message_types))
for mapping in custom_json_mappings:
proto_printer.PrintCustomJsonMapping(mapping)
def WriteMessagesFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out as a message file."""
_WriteFile(file_descriptor, package, version,
_Proto2Printer(printer))
def WritePythonFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out."""
_WriteFile(file_descriptor, package, version,
_ProtoRpcPrinter(printer))
def PrintIndentedDescriptions(printer, ls, name, prefix=''):
if ls:
with printer.Indent(indent=prefix):
with printer.CommentContext():
width = printer.CalculateWidth() - len(prefix)
printer()
printer(name + ':')
for x in ls:
description = '%s: %s' % (x.name, x.description)
for line in textwrap.wrap(description, width,
initial_indent=' ',
subsequent_indent=' '):
printer(line)
def _FetchCustomMappings(descriptor_ls):
"""Find and return all custom mappings for descriptors in descriptor_ls."""
custom_mappings = []
for descriptor in descriptor_ls:
if isinstance(descriptor, ExtendedEnumDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Enum', m, descriptor)
for m in descriptor.enum_mappings)
elif isinstance(descriptor, ExtendedMessageDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Field', m, descriptor)
for m in descriptor.field_mappings)
custom_mappings.extend(
_FetchCustomMappings(descriptor.enum_types))
custom_mappings.extend(
_FetchCustomMappings(descriptor.message_types))
return custom_mappings
def _FormatCustomJsonMapping(mapping_type, mapping, descriptor):
return '\n'.join((
'encoding.AddCustomJson%sMapping(' % mapping_type,
" %s, '%s', '%s')" % (descriptor.full_name, mapping.python_name,
mapping.json_name),
))
def _EmptyMessage(message_type):
return not any((message_type.enum_types,
message_type.message_types,
message_type.fields))
class ProtoPrinter(six.with_metaclass(abc.ABCMeta, object)):
"""Interface for proto printers."""
@abc.abstractmethod
def PrintPreamble(self, package, version, file_descriptor):
"""Print the file docstring and import lines."""
@abc.abstractmethod
def PrintEnum(self, enum_type):
"""Print the given enum declaration."""
@abc.abstractmethod
def PrintMessage(self, message_type):
"""Print the given message declaration."""
class _Proto2Printer(ProtoPrinter):
"""Printer for proto2 definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintEnumCommentLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values',
prefix='// ')
def __PrintEnumValueCommentLines(self, enum_value):
if enum_value.description:
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(enum_value.description, width):
self.__printer('// %s', line)
def PrintEnum(self, enum_type):
self.__PrintEnumCommentLines(enum_type)
self.__printer('enum %s {', enum_type.name)
with self.__printer.Indent():
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer()
self.__PrintEnumValueCommentLines(enum_value)
self.__printer('%s = %s;', enum_value.name, enum_value.number)
self.__printer('}')
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('// Generated message classes for %s version %s.',
package, version)
self.__printer('// NOTE: This file is autogenerated and should not be '
'edited by hand.')
description_lines = textwrap.wrap(file_descriptor.description, 75)
if description_lines:
self.__printer('//')
for line in description_lines:
self.__printer('// %s', line)
self.__printer()
self.__printer('syntax = "proto2";')
self.__printer('package %s;', file_descriptor.package)
def __PrintMessageCommentLines(self, message_type):
"""Print the description of this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(description, width):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.message_types,
'Messages', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.fields,
'Fields', prefix='// ')
def __PrintFieldDescription(self, description):
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
def __PrintFields(self, fields):
for extended_field in fields:
field = extended_field.field_descriptor
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
self.__printer()
self.__PrintFieldDescription(extended_field.description)
label = str(field.label).lower()
if field_type in (messages.EnumField, messages.MessageField):
proto_type = field.type_name
else:
proto_type = str(field.variant).lower()
default_statement = ''
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = '"%s"' % field.default_value
elif field_type is messages.BooleanField:
default_value = str(field.default_value).lower()
else:
default_value = str(field.default_value)
default_statement = ' [default = %s]' % default_value
self.__printer(
'%s %s %s = %d%s;',
label, proto_type, field.name, field.number, default_statement)
def PrintMessage(self, message_type):
self.__printer()
self.__PrintMessageCommentLines(message_type)
if _EmptyMessage(message_type):
self.__printer('message %s {}', message_type.name)
return
self.__printer('message %s {', message_type.name)
with self.__printer.Indent():
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
self.__PrintFields(message_type.fields)
self.__printer('}')
def PrintCustomJsonMapping(self, mapping_lines):
raise NotImplementedError(
'Custom JSON encoding not supported for proto2')
class _ProtoRpcPrinter(ProtoPrinter):
"""Printer for ProtoRPC definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintClassSeparator(self):
self.__printer()
if not self.__printer.indent:
self.__printer()
def __PrintEnumDocstringLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap('"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values')
self.__printer('"""')
def PrintEnum(self, enum_type):
self.__printer('class %s(_messages.Enum):', enum_type.name)
with self.__printer.Indent():
self.__PrintEnumDocstringLines(enum_type)
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer('%s = %s', enum_value.name, enum_value.number)
if not enum_type.values:
self.__printer('pass')
self.__PrintClassSeparator()
def __PrintAdditionalImports(self, imports):
"""Print additional imports needed for protorpc."""
google_imports = [x for x in imports if 'google' in x]
other_imports = [x for x in imports if 'google' not in x]
if other_imports:
for import_ in sorted(other_imports):
self.__printer(import_)
self.__printer()
# Note: If we ever were going to add imports from this package, we'd
# need to sort those out and put them at the end.
if google_imports:
for import_ in sorted(google_imports):
self.__printer(import_)
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('"""Generated message classes for %s version %s.',
package, version)
self.__printer()
for line in textwrap.wrap(file_descriptor.description, 78):
self.__printer(line)
self.__printer('"""')
self.__printer('# NOTE: This file is autogenerated and should not be '
'edited by hand.')
self.__printer()
self.__PrintAdditionalImports(file_descriptor.additional_imports)
self.__printer()
self.__printer("package = '%s'", file_descriptor.package)
self.__printer()
self.__printer()
def __PrintMessageDocstringLines(self, message_type):
"""Print the docstring for this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
short_description = (
_EmptyMessage(message_type) and
len(description) < (self.__printer.CalculateWidth() - 6))
with self.__printer.CommentContext():
if short_description:
# Note that we use explicit string interpolation here since
# we're in comment context.
self.__printer('"""%s"""' % description)
return
for line in textwrap.wrap('"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums')
PrintIndentedDescriptions(
self.__printer, message_type.message_types, 'Messages')
PrintIndentedDescriptions(
self.__printer, message_type.fields, 'Fields')
self.__printer('"""')
self.__printer()
def PrintMessage(self, message_type):
if message_type.alias_for:
self.__printer(
'%s = %s', message_type.name, message_type.alias_for)
self.__PrintClassSeparator()
return
for decorator in message_type.decorators:
self.__printer('@%s', decorator)
self.__printer('class %s(_messages.Message):', message_type.name)
with self.__printer.Indent():
self.__PrintMessageDocstringLines(message_type)
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
_PrintFields(message_type.fields, self.__printer)
self.__PrintClassSeparator()
def PrintCustomJsonMapping(self, mapping):
self.__printer(mapping)
def _PrintEnums(proto_printer, enum_types):
"""Print all enums to the given proto_printer."""
enum_types = sorted(enum_types, key=operator.attrgetter('name'))
for enum_type in enum_types:
proto_printer.PrintEnum(enum_type)
def _PrintMessages(proto_printer, message_list):
message_list = sorted(message_list, key=operator.attrgetter('name'))
for message_type in message_list:
proto_printer.PrintMessage(message_type)
_MESSAGE_FIELD_MAP = {
message_types.DateTimeMessage.definition_name(): (
message_types.DateTimeField),
}
def _PrintFields(fields, printer):
for extended_field in fields:
field = extended_field.field_descriptor
printed_field_info = {
'name': field.name,
'module': '_messages',
'type_name': '',
'type_format': '',
'number': field.number,
'label_format': '',
'variant_format': '',
'default_format': '',
}
message_field = _MESSAGE_FIELD_MAP.get(field.type_name)
if message_field:
printed_field_info['module'] = '_message_types'
field_type = message_field
elif field.type_name == 'extra_types.DateField':
printed_field_info['module'] = 'extra_types'
field_type = apitools_base.DateField
else:
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
if field_type in (messages.EnumField, messages.MessageField):
printed_field_info['type_format'] = "'%s', " % field.type_name
if field.label == protorpc_descriptor.FieldDescriptor.Label.REQUIRED:
printed_field_info['label_format'] = ', required=True'
elif field.label == protorpc_descriptor.FieldDescriptor.Label.REPEATED:
printed_field_info['label_format'] = ', repeated=True'
if field_type.DEFAULT_VARIANT != field.variant:
printed_field_info['variant_format'] = (
', variant=_messages.Variant.%s' % field.variant)
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = repr(field.default_value)
elif field_type is messages.EnumField:
try:
default_value = str(int(field.default_value))
except ValueError:
default_value = repr(field.default_value)
else:
default_value = field.default_value
printed_field_info[
'default_format'] = ', default=%s' % (default_value,)
printed_field_info['type_name'] = field_type.__name__
args = ''.join('%%(%s)s' % field for field in (
'type_format',
'number',
'label_format',
'variant_format',
'default_format'))
format_str = '%%(name)s = %%(module)s.%%(type_name)s(%s)' % args
printer(format_str % printed_field_info)
|
src/test/make_tests_poly1305_reduce.py | ghassanmas/pycryptodome | 2,063 | 12741722 | <gh_stars>1000+
"""Make unit test for poly1305_reduce()"""
from common import counter, make_main, split32
def make_test(value):
result = value % (2**130 - 5)
h_in = split32(value, 5)
h_out = split32(result, 5)
print("")
print("void test_%d() {" % next(counter))
print(" uint32_t h[5] = {" + ", ".join(h_in) + "};")
print(" const uint32_t expected_h[5] = {" + ", ".join(h_out) + "};")
print("")
print(" poly1305_reduce(h);")
print(" assert(memcmp(h, expected_h, sizeof(h)) == 0);")
print("}")
print("")
print("#ifdef NDEBUG")
print("#undef NDEBUG")
print("#endif")
print("#include <assert.h>")
print("#include <string.h>")
print("#include <stdint.h>")
print("#include <stdio.h>")
print()
print("void poly1305_reduce(uint32_t h[5]);")
make_test(0)
make_test(2**130-5-1)
make_test(2**130-5)
make_test(2**130-5+1)
make_test(2*(2**130-5))
make_test(2*(2**130-5)+9)
# make_test(2*(2**130-5)+10) - Fails, since h[5] takes more than 3 bits
make_main()
|
tests/strided_range_test.py | jnice-81/dace | 227 | 12741762 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.memlet import Memlet
import numpy as np
sr = dace.SDFG('strided_range_test')
s0 = sr.add_state('s0')
A = s0.add_array('A', [2, 16, 4], dace.float32)
B = s0.add_array('B', [16], dace.float32)
tasklet = s0.add_tasklet(
'srtest', {'a'}, {'b'}, """
b[0] = a[0,0] * 2
b[1] = a[0,1] * 2
b[2] = a[1,0] * 2
b[3] = a[1,1] * 2
""")
me, mx = s0.add_map('srmap', dict(i='0:4'))
# Reading A at [1, 2i:2i+8:8:2, 3]
s0.add_memlet_path(A,
me,
tasklet,
dst_conn='a',
memlet=Memlet.simple(A, '1, 2*i:2*i+10:8:2, 3'))
# Writing B at [4*i:4*i+4]
s0.add_memlet_path(tasklet,
mx,
B,
src_conn='b',
memlet=Memlet.simple(B, '4*i:4*i+4'))
def test():
print('Strided range tasklet test')
A = np.random.rand(2, 16, 4).astype(np.float32)
B = np.random.rand(16).astype(np.float32)
sr(A=A, B=B)
diffs = [
B[0:2] - 2 * A[1, 0:2, 3], B[2:4] - 2 * A[1, 8:10, 3],
B[4:6] - 2 * A[1, 2:4, 3], B[6:8] - 2 * A[1, 10:12, 3],
B[8:10] - 2 * A[1, 4:6, 3], B[10:12] - 2 * A[1, 12:14, 3],
B[12:14] - 2 * A[1, 6:8, 3], B[14:16] - 2 * A[1, 14:16, 3]
]
diff = np.linalg.norm(np.array(diffs))
print('Differences:', [np.linalg.norm(d) for d in diffs])
assert diff <= 1e-5
if __name__ == "__main__":
test()
|
pyGPs/Demo/generate_data_for_Rasmussen_examples.py | Corentin-LF/pyGPs | 196 | 12741771 | from builtins import zip
from builtins import range
import numpy as np
def save_data_regresssion():
# n = 20 # number of labeled/training data
# D = 1 # dimension of input data
x = np.array([[2.083970427750732, -0.821018066101379, -0.617870699182597, -1.183822608860694,\
0.274087442277144, 0.599441729295593, 1.768897919204435, -0.465645549031928,\
0.588852784375935, -0.832982214438054, -0.512106527960363, 0.277883144210116,\
-0.065870426922211, -0.821412363806325, 0.185399443778088, -0.858296174995998,\
0.370786630037059, -1.409869162416639,-0.144668412325022,-0.553299615220374]]).T
y = np.array([[4.549203746331698, 0.371985574437271, 0.711307965514790, -0.013212893618430, 2.255473255338191,\
1.009915749295733, 3.744675937965029, 0.424592771793202, 1.322833652295811, 0.278298293510020,\
0.267229130945574, 2.200112286723833, 1.200609983308969, 0.439971697236094, 2.628580433511255,\
0.503774817336353, 1.942525313820564, 0.579133950013327, 0.670874423968554, 0.377353755100965]]).T
# TEST points
# test points evenly distributed in the interval [-2, 2.5]
xstar = np.array(list(range(-200,250,4)), dtype=np.float64, ndmin=2).T
xstar /= 100
np.savez('Regression/regression_data', x=x, y=y, xstar=xstar)
def save_data_classification():
# Synthetic data for binary classification: two partially overlapping
# Gaussians in two dimensions. 120 data points are generated from two
# Gaussians with different means and covariances. One Gaussian is
# isotropic and contains 2/3 of the data (blue), the other is highly
# correlated and contains 1/3 of the points (red). Note, that the
# labels for the targets are -1/+1 (and not 0/1).
n1 = 80; n2 = 40
x1 = np.array([[0.089450165731417, -0.000700765006939],\
[ 1.171605560541542, 1.177765337635947],\
[ 1.404722675089394, -0.017417915887421],\
[ 0.556096196907929, -1.489370243839215],\
[ 1.213163445267992, 0.044545401368647],\
[ 0.173404742510759, -0.675668036759603],\
[ 2.225008556585363, 0.469803193769368],\
[ 1.470329290331445, 0.887642323697526],\
[ 2.715199208821485, 0.621044646503113],\
[ 0.173640760494328, -0.936054178730056],\
[ 2.038152815025167, 0.262587298316711],\
[ 1.670218375320427, -2.633186886994263],\
[ 0.270098501389591, -0.948779657473203],\
[ 1.396339236138275, -1.114992287201776],\
[-1.482070589718501, -0.654590652482805],\
[-1.493788226272929, 0.382017940248275],\
[ 1.025083846875763, -0.860344923788873],\
[ 0.750316336734172, -0.101864205602753],\
[ 0.184311310148912, -0.258523866245887],\
[ 0.221868667121623, -1.393954437105630],\
[ 2.258881477897777, -0.786806071526136],\
[ 1.211362530151533, -0.423431246029886],\
[ 1.525307406741207, -0.097975367602030],\
[ 0.978930232706465, 0.476154349549524],\
[ 1.347884229346280, -0.248408186838667],\
[ 1.205779546204216, -0.090878327349907],\
[ 0.124388644862000, 0.599612645000285],\
[ 0.784044356662233, 0.356596736271853],\
[ 1.060216683845210, -0.318474838087900],\
[ 1.678114484474938, 0.678735373910422],\
[ 0.973851135005570, 0.024880700382574],\
[ 0.016237746864886, -0.480899874254564],\
[ 0.979406721923196, 0.697708815321128],\
[ 2.217307638531248, -0.956931847027775],\
[ 2.150475558834153, 1.059031573329512],\
[ 1.050502393215048, 0.532141747419667],\
[ 1.210593098269218, -0.318123542280113],\
[ 0.426309208807901, -0.571727978045793],\
[ 0.742552105732714, -0.122112766396886],\
[ 0.757210723588679, 0.862002000781123],\
[-0.431639130160791, -0.763118261936640],\
[-0.748398486307095, -0.603667649379360],\
[ 0.975086541108249, -1.525297946453790],\
[ 0.074503762788667, -0.092155036190678],\
[-0.668889572018935, 1.305400680048752],\
[ 0.725632503186580, 0.096286255882168],\
[-1.042270707136463, 1.297009698531055],\
[ 1.943144890398260, -1.051176922438962],\
[ 1.191448645802597, 0.261349747400059],\
[ 0.778004017505022, -1.046301123377022],\
[ 0.628873970760607, 1.103926629619643],\
[ 1.295113890591403, -0.479519217798997],\
[ 1.522065175744686, 0.993476032742058],\
[ 1.100255776045601, 0.961069161713818],\
[-0.593243832838153, -0.479418953496258],\
[ 2.023196521366462, -0.275055494808503],\
[-0.788103134597041, -1.090707985778480],\
[-0.085168420896236, 1.226858390046108],\
[ 1.691706923196703, -1.153144804780540],\
[ 1.989279380395157, 1.974704317386435],\
[ 0.398799861652602, 3.051291814188982],\
[-0.707217210772927, 0.185505264874794],\
[ 0.697550136765320, 0.222287208720035],\
[ 2.186126058382323, -0.327829143438683],\
[ 1.368068331060010, 1.708138258453435],\
[ 0.883049126818189, -1.334269372314072],\
[ 1.737643116893527, 0.618452933813739],\
[ 2.002228743955222, 0.103381966018445],\
[-0.202638622737115, 0.495024938090909],\
[ 0.543309203560769, -0.802120609128192],\
[-1.796161599703804, -0.054795478648902],\
[ 1.460693782000059, 0.750052171180825],\
[ 0.133277872804608, -1.154891068006907],\
[ 0.203670382700157, -0.480336687666025],\
[-0.278985011909341, 0.030578590108392],\
[ 2.070490237052893, 2.420782751903098],\
[ 0.599023881366768, -1.673208560658818],\
[ 0.140506592147238, 0.804938444757444],\
[-0.980799204108985, -1.847987723222053],\
[-0.102350006007740, -0.822093851434857]])
x2 = np.array([[1.160257057434194, 1.544111720606185],\
[-0.458434595629321, 0.205667827100987],\
[-1.053562345687376, -0.614938261650010],\
[-1.687901005751336, -0.780028275457715],\
[-0.467035854712698, 0.561692074343868],\
[-0.703391186121452, 0.281301267639200],\
[-1.568557779993616, -0.629129013661319],\
[-2.176478596101226, -1.176211396013793],\
[ 0.768109265900499, 1.376893437232103],\
[-0.514772970064353, 0.474264363701950],\
[-1.301924381487904, -0.525179228127957],\
[-1.312024947004566, -0.049469442305628],\
[-0.623417800418214, 0.226456899059445],\
[ 0.020290591370131, 0.374055846421580],\
[-1.002901826023476, 0.076597486786743],\
[-2.553713136283273, -1.731788289864902],\
[-1.788156378743716, -0.742460481943494],\
[-1.119582270077321, -0.256154464598782],\
[-0.423084091988017, 0.395108309297119],\
[-1.645945345460644, -1.216319293733455],\
[ 0.227805611684674, 0.925948003854262],\
[-1.298719171366801, -0.965511301629466],\
[-0.618292817021891, 0.140045887498202],\
[ 0.794935039731655, 1.917830760420081],\
[-0.213709179946402, 0.617751634356751],\
[-0.474251035850546, -0.054854432018974],\
[ 0.056077816960464, 1.046282980014428],\
[ 0.887136693467512, 1.536490289895764],\
[ 1.377161915854166, 1.764872700787871],\
[-0.901195709427863, -0.340855547886558],\
[-0.783104424735034, -0.330927422324566],\
[-1.507139570543989, 0.137504213149820],\
[-0.348999111724700, 0.235931187612453],\
[-0.367309385513174, 0.655996377722041],\
[-0.050622309620072, 0.410969334468070],\
[ 1.734919039047271, 2.611080177877894],\
[-0.567413078682755, -0.458249564234885],\
[-0.622230797920433, 0.258401595566888],\
[-1.642146761593230, -1.138579130251617],\
[-0.285298076847255, 0.085451489400687]])
x = np.concatenate((x1,x2),axis=0)
y = np.concatenate((-np.ones((1,n1)),np.ones((1,n2))),axis=1).T
# For plotting, we superimpose the data points with the posterior equi-probability contour
# lines for the probability of class two given complete information about the generating mechanism.
t1,t2 = np.meshgrid(np.arange(-4,4.1,0.1),np.arange(-4,4.1,0.1))
t = np.array(list(zip(np.reshape(t1,(np.prod(t1.shape),)),np.reshape(t2,(np.prod(t2.shape),))))) # these are the test inputs
n = t.shape[0]
tmm = np.zeros_like(t)
S1 = np.eye(2); S2 = np.array([[1, 0.95], [0.95, 1]])
m1 = np.array([0.75, 0]); m2 = np.array([-0.75, 0])
tmm[:,0] = t[:,0] - m1[0]; tmm[:,1] = t[:,1] - m1[1]
p1 = n1*np.exp( (-np.dot(tmm,np.linalg.inv(S1))*tmm/2).sum(axis=1) )
tmm[:,0] = t[:,0] - m2[0]; tmm[:,1] = t[:,1] - m2[1]
S2i = np.linalg.inv(S2)
p2 = n2*np.exp( (-np.dot(tmm,S2i)*tmm/2).sum(axis=1) ) / np.sqrt(0.0975)
np.savez('Classification/classification_data', x=x, y=y, xstar=t, x1=x1,x2=x2,t1=t1,t2=t2,p1=p1,p2=p2)
if __name__=='__main__':
save_data_regresssion()
#save_data_classification()
|
pano_opt_gen.py | kopetri/LayoutNetv2 | 166 | 12741774 | import torch
import torch.optim as optim
import numpy as np
from PIL import Image
#import pano
import pano_gen as pano
import time
def vecang(vec1, vec2):
vec1 = vec1 / np.sqrt((vec1 ** 2).sum())
vec2 = vec2 / np.sqrt((vec2 ** 2).sum())
return np.arccos(np.dot(vec1, vec2))
def rotatevec(vec, theta):
x = vec[0] * torch.cos(theta) - vec[1] * torch.sin(theta)
y = vec[0] * torch.sin(theta) + vec[1] * torch.cos(theta)
return torch.cat([x, y])
def pts_linspace(pa, pb, pts=300):
pa = pa.view(1, 2)
pb = pb.view(1, 2)
w = torch.arange(0, pts + 1, dtype=pa.dtype).view(-1, 1)
return (pa * (pts - w) + pb * w) / pts
def xyz2uv(xy, z=-1):
c = torch.sqrt((xy ** 2).sum(1))
u = torch.atan2(xy[:, 1], xy[:, 0]).view(-1, 1)
v = torch.atan2(torch.zeros_like(c) + z, c).view(-1, 1)
return torch.cat([u, v], dim=1)
def uv2idx(uv, w, h):
col = (uv[:, 0] / (2 * np.pi) + 0.5) * w - 0.5
row = (uv[:, 1] / np.pi + 0.5) * h - 0.5
return torch.cat([col.view(-1, 1), row.view(-1, 1)], dim=1)
def wallidx(xy, w, h, z1, z2):
col = (torch.atan2(xy[1], xy[0]) / (2 * np.pi) + 0.5) * w - 0.5
c = torch.sqrt((xy ** 2).sum())
row_s = (torch.atan2(torch.zeros_like(c) + z1, c) / np.pi + 0.5) * h - 0.5
row_t = (torch.atan2(torch.zeros_like(c) + z2, c) / np.pi + 0.5) * h - 0.5
pa = torch.cat([col.view(1), row_s.view(1)])
pb = torch.cat([col.view(1), row_t.view(1)])
return pts_linspace(pa, pb)
def map_coordinates(input, coordinates):
''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
input: (H, W)
coordinates: (2, ...)
'''
h = input.shape[0]
w = input.shape[1]
def _coordinates_pad_wrap(h, w, coordinates):
coordinates[0] = coordinates[0] % h
coordinates[1] = coordinates[1] % w
return coordinates
co_floor = torch.floor(coordinates).long()
co_ceil = torch.ceil(coordinates).long()
d1 = (coordinates[1] - co_floor[1].float())
d2 = (coordinates[0] - co_floor[0].float())
co_floor = _coordinates_pad_wrap(h, w, co_floor)
co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
f00 = input[co_floor[0], co_floor[1]]
f10 = input[co_floor[0], co_ceil[1]]
f01 = input[co_ceil[0], co_floor[1]]
f11 = input[co_ceil[0], co_ceil[1]]
fx1 = f00 + d1 * (f10 - f00)
fx2 = f01 + d1 * (f11 - f01)
return fx1 + d2 * (fx2 - fx1)
def pc2cor_id(pc, pc_vec, pc_theta, pc_height):
if pc_theta.numel()==1:
ps = torch.stack([
(pc + pc_vec),
(pc + rotatevec(pc_vec, pc_theta)),
(pc - pc_vec),
(pc + rotatevec(pc_vec, pc_theta - np.pi))
])
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
return torch.cat([
uv2idx(xyz2uv(ps, z=-1), 1024, 512),
uv2idx(xyz2uv(ps, z=pc_height), 1024, 512),
], dim=0)
def project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i_step=None):
# Sample corner loss
corid = pc2cor_id(pc, pc_vec, pc_theta, pc_height)
corid_coordinates = torch.stack([corid[:, 1], corid[:, 0]])
loss_cor = -map_coordinates(scorecor, corid_coordinates).mean()
# Sample boundary loss
if pc_theta.numel()==1:
p1 = pc + pc_vec
p2 = pc + rotatevec(pc_vec, pc_theta)
p3 = pc - pc_vec
p4 = pc + rotatevec(pc_vec, pc_theta - np.pi)
segs = [
pts_linspace(p1, p2),
pts_linspace(p2, p3),
pts_linspace(p3, p4),
pts_linspace(p4, p1),
]
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
segs = []
for c_num in range(ps.shape[0]-1):
segs.append(pts_linspace(ps[c_num,:], ps[c_num+1,:]))
segs.append(pts_linspace(ps[-1,:], ps[0,:]))
# ceil-wall
loss_ceilwall = 0
for seg in segs:
ceil_uv = xyz2uv(seg, z=-1)
ceil_idx = uv2idx(ceil_uv, 1024, 512)
ceil_coordinates = torch.stack([ceil_idx[:, 1], ceil_idx[:, 0]])
loss_ceilwall -= map_coordinates(scoreedg[..., 1], ceil_coordinates).mean() / len(segs)
# floor-wall
loss_floorwall = 0
for seg in segs:
floor_uv = xyz2uv(seg, z=pc_height)
floor_idx = uv2idx(floor_uv, 1024, 512)
floor_coordinates = torch.stack([floor_idx[:, 1], floor_idx[:, 0]])
loss_floorwall -= map_coordinates(scoreedg[..., 2], floor_coordinates).mean() / len(segs)
#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall
losses = 1.0 * loss_cor + 1.0 * loss_ceilwall + 1.0 * loss_floorwall
if i_step is not None:
with torch.no_grad():
print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)' % (
i_step, losses,
loss_cor, loss_wallwall,
loss_ceilwall, loss_floorwall))
return losses
def optimize_cor_id(cor_id, scoreedg, scorecor, num_iters=100, verbose=False):
assert scoreedg.shape == (512, 1024, 3)
assert scorecor.shape == (512, 1024)
Z = -1
ceil_cor_id = cor_id[0::2]
floor_cor_id = cor_id[1::2]
ceil_cor_id, ceil_cor_id_xy = pano.constraint_cor_id_same_z(ceil_cor_id, scorecor, Z)
#ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z])
ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(ceil_cor_id.shape[0]).reshape(-1, 1) + Z])
# TODO: revise here to general layout
#pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2
#print(ceil_cor_id_xy)
if abs(ceil_cor_id_xy[0,0]-ceil_cor_id_xy[1,0])>abs(ceil_cor_id_xy[0,1]-ceil_cor_id_xy[1,1]):
ceil_cor_id_xy = np.concatenate((ceil_cor_id_xy[1:,:],ceil_cor_id_xy[:1,:]), axis=0)
#print(cor_id)
#print(ceil_cor_id_xy)
pc = np.mean(ceil_cor_id_xy, axis=0)
pc_vec = ceil_cor_id_xy[0] - pc
pc_theta = vecang(pc_vec, ceil_cor_id_xy[1] - pc)
pc_height = pano.fit_avg_z(floor_cor_id, ceil_cor_id_xy, scorecor)
if ceil_cor_id_xy.shape[0] > 4:
pc_theta = np.array([ceil_cor_id_xy[1,1]])
for c_num in range(2, ceil_cor_id_xy.shape[0]-1):
if (c_num % 2) == 0:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num,0])
else:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num,1])
scoreedg = torch.FloatTensor(scoreedg)
scorecor = torch.FloatTensor(scorecor)
pc = torch.FloatTensor(pc)
pc_vec = torch.FloatTensor(pc_vec)
pc_theta = torch.FloatTensor([pc_theta])
pc_height = torch.FloatTensor([pc_height])
pc.requires_grad = True
pc_vec.requires_grad = True
pc_theta.requires_grad = True
pc_height.requires_grad = True
#print(pc_theta)
#time.sleep(2)
#return cor_id
optimizer = optim.SGD([
pc, pc_vec, pc_theta, pc_height
], lr=1e-3, momentum=0.9)
best = {'score': 1e9}
for i_step in range(num_iters):
i = i_step if verbose else None
optimizer.zero_grad()
score = project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i)
if score.item() < best['score']:
best['score'] = score.item()
best['pc'] = pc.clone()
best['pc_vec'] = pc_vec.clone()
best['pc_theta'] = pc_theta.clone()
best['pc_height'] = pc_height.clone()
score.backward()
optimizer.step()
pc = best['pc']
pc_vec = best['pc_vec']
pc_theta = best['pc_theta']
pc_height = best['pc_height']
opt_cor_id = pc2cor_id(pc, pc_vec, pc_theta, pc_height).detach().numpy()
split_num = int(opt_cor_id.shape[0]//2)
opt_cor_id = np.stack([opt_cor_id[:split_num], opt_cor_id[split_num:]], axis=1).reshape(split_num*2, 2)
#print(opt_cor_id)
#print(cor_id)
#time.sleep(500)
return opt_cor_id
|
app/auth/views.py | taogeT/livetv_server | 283 | 12741800 | # -*- coding: UTF-8 -*-
from flask import url_for, g, redirect
from flask_login import logout_user, current_user
from datetime import datetime
from importlib import import_module
from .. import db, login_manager
from ..models import User
from . import auth
@auth.route('/login/<string:authtype>')
def login_authorize(authtype):
oauth = getattr(import_module('.'+authtype, __package__), authtype)
return oauth.authorize(callback=url_for('auth.{}_authorized'.format(authtype), _external=True))
@auth.route('/logout')
def logout():
logout_user()
return redirect('/')
@auth.before_app_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
tests/unit/cloudsearchdomain/test_cloudsearchdomain.py | Yurzs/boto | 5,079 | 12741803 | #!/usr/bin env python
import json
import mock
from tests.unit import AWSMockServiceTestCase
from boto.cloudsearch2.domain import Domain
from boto.cloudsearch2.layer1 import CloudSearchConnection
from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
class CloudSearchDomainConnectionTest(AWSMockServiceTestCase):
connection_class = CloudSearchDomainConnection
domain_status = """{
"SearchInstanceType": null,
"DomainId": "1234567890/demo",
"DomainName": "demo",
"Deleted": false,
"SearchInstanceCount": 0,
"Created": true,
"SearchService": {
"Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com"
},
"RequiresIndexDocuments": false,
"Processing": false,
"DocService": {
"Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com"
},
"ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo",
"SearchPartitionCount": 0
}"""
def create_service_connection(self, **kwargs):
if kwargs.get('host', None) is None:
kwargs['host'] = 'search-demo.us-east-1.cloudsearch.amazonaws.com'
return super(CloudSearchDomainConnectionTest, self).\
create_service_connection(**kwargs)
def test_get_search_service(self):
layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
sign_request=True)
domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
search_service = domain.get_search_service()
self.assertEqual(search_service.sign_request, True)
def test_get_document_service(self):
layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
sign_request=True)
domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
document_service = domain.get_document_service()
self.assertEqual(document_service.sign_request, True)
def test_search_with_auth(self):
layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
sign_request=True)
domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
search_service = domain.get_search_service()
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': {
'id': '12341',
'fields': {
'title': 'Document 1',
'rank': 1
}
}
},
'status': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
}
}
self.set_http_response(status_code=200,
body=json.dumps(response).encode('utf-8'))
search_service.domain_connection = self.service_connection
resp = search_service.search()
headers = self.actual_request.headers
self.assertIsNotNone(headers.get('Authorization'))
def test_upload_documents_with_auth(self):
layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
sign_request=True)
domain = Domain(layer1=layer1, data=json.loads(self.domain_status))
document_service = domain.get_document_service()
response = {
'status': 'success',
'adds': 1,
'deletes': 0,
}
document = {
"id": "1234",
"title": "Title 1",
"category": ["cat_a", "cat_b", "cat_c"]
}
self.set_http_response(status_code=200,
body=json.dumps(response).encode('utf-8'))
document_service.domain_connection = self.service_connection
document_service.add("1234", document)
resp = document_service.commit()
headers = self.actual_request.headers
self.assertIsNotNone(headers.get('Authorization'))
def test_no_host_provided(self):
# A host must be provided or a error is thrown.
with self.assertRaises(ValueError):
CloudSearchDomainConnection(
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key'
)
|
evcouplings/complex/protocol.py | mrunalimanj/EVcouplings | 117 | 12741836 | <reponame>mrunalimanj/EVcouplings
"""
Protocols for matching putatively interacting sequences
in protein complexes to create a concatenated sequence
alignment
Authors:
<NAME>
<NAME>
"""
from collections import Counter
import numpy as np
import pandas as pd
from evcouplings.couplings.mapping import Segment
from evcouplings.utils.config import (
check_required, InvalidParameterError
)
from evcouplings.utils.system import (
create_prefix_folders, verify_resources
)
from evcouplings.align.protocol import modify_alignment
from evcouplings.complex.alignment import (
write_concatenated_alignment
)
from evcouplings.complex.distance import (
find_possible_partners, best_reciprocal_matching,
plot_distance_distribution
)
from evcouplings.complex.similarity import (
read_species_annotation_table,
most_similar_by_organism,
filter_best_reciprocal,
find_paralogs
)
def modify_complex_segments(outcfg, **kwargs):
"""
Modifies the output configuration so
that the segments are correct for a
concatenated alignment
Parameters
----------
outcfg : dict
The output configuration
Returns
-------
outcfg: dict
The output configuration, with
a new field called "segments"
"""
def _modify_segments(seg_list, seg_prefix):
# extract segments from list representation into objects
segs = [
Segment.from_list(s) for s in seg_list
]
# update segment IDs
for i, s in enumerate(segs, start=1):
s.segment_id = "{}_{}".format(seg_prefix, i)
return segs
# merge segments - this allows to have more than one segment per
# "monomer" alignment
segments_1 = _modify_segments(kwargs["first_segments"], "A")
segments_2 = _modify_segments(kwargs["second_segments"], "B")
segments_complex = segments_1 + segments_2
outcfg["segments"] = [s.to_list() for s in segments_complex]
return outcfg
def _run_describe_concatenation(outcfg, **kwargs):
"""
calculate some basic statistics on the concatenated alignment
"""
prefix = kwargs["prefix"]
outcfg["concatentation_statistics_file"] = prefix + "_concatenation_statistics.csv"
describe_concatenation(
kwargs["first_annotation_file"],
kwargs["second_annotation_file"],
kwargs["first_genome_location_file"],
kwargs["second_genome_location_file"],
outcfg["concatentation_statistics_file"]
)
return outcfg
def describe_concatenation(annotation_file_1, annotation_file_2,
genome_location_filename_1, genome_location_filename_2,
outfile):
"""
Describes properties of concatenated alignment.
Writes a csv with the following columns
num_seqs_1 : number of sequences in the first monomer alignment
num_seqs_2 : number of sequences in the second monomer alignment
num_nonred_species_1 : number of unique species annotations in the
first monomer alignment
num_nonred_species_2 : number of unique species annotations in the
second monomer alignment
num_species_overlap: number of unique species found in both alignments
median_num_per_species_1 : median number of paralogs per species in the
first monomer alignmment
median_num_per_species_2 : median number of paralogs per species in
the second monomer alignment
num_with_embl_cds_1 : number of IDs for which we found an EMBL CDS in the
first monomer alignment (relevant to distance concatention only)
num_with_embl_cds_2 : number of IDs for which we found an EMBL CDS in the
first monomer alignment (relevant to distance concatention only)
Parameters
----------
annotation_file_1 : str
Path to annotation.csv file for first monomer alignment
annotation_file_2 : str
Path to annotation.csv file for second monomer alignment
genome_location_filename_1 : str
Path to genome location mapping file for first alignment
genome_location_filename_2 : str
Path to genome location mapping file for second alignment
outfile: str
Path to output file
"""
# load the annotations for each alignment
# as a pd.DataFrame
annotations_1 = read_species_annotation_table(
annotation_file_1
)
species_1 = annotations_1.species.values
annotations_2 = read_species_annotation_table(
annotation_file_2
)
species_2 = annotations_2.species.values
# calculate the number of sequences found in each alignment
num_seqs_1 = len(annotations_1)
num_seqs_2 = len(annotations_2)
# calculate the number of species found in each alignment
# where a species is defined as a unique OS or Tax annotation field
nonredundant_annotations_1 = len(set(species_1))
nonredundant_annotations_2 = len(set(species_2))
# calculate the number of overlapping species
species_overlap = list(
set(species_1).intersection(set(species_2))
)
n_species_overlap = len(species_overlap)
# calculate the median number of paralogs per species
n_paralogs_1 = float(
# counts the number of times each species occurs in the list
# then takes the median
np.median(list(Counter(species_1).values()))
)
n_paralogs_2 = float(
np.median(list(Counter(species_2).values()))
)
# If the user provided genome location files, calculate the number
# of ids for which we found an embl CDS. Default value is np.nan
embl_cds1 = np.nan
embl_cds2 = np.nan
if (genome_location_filename_1 is not None and
genome_location_filename_2 is not None):
genome_location_table_1 = pd.read_csv(genome_location_filename_1)
genome_location_table_2 = pd.read_csv(genome_location_filename_2)
# Number uniprot IDs with EMBL CDS that is not NA
if "uniprot_ac" in genome_location_table_1.columns:
embl_cds1 = len(list(set(genome_location_table_1.uniprot_ac)))
if "uniprot_ac" in genome_location_table_2.columns:
embl_cds2 = len(list(set(genome_location_table_2.uniprot_ac)))
concatenation_data = [
num_seqs_1,
num_seqs_2,
nonredundant_annotations_1,
nonredundant_annotations_2,
n_species_overlap,
n_paralogs_1,
n_paralogs_2,
embl_cds1,
embl_cds2,
]
cols = [
"num_seqs_1",
"num_seqs_2",
"num_nonred_species_1",
"num_nonred_species_2",
"num_species_overlap",
"median_num_per_species_1",
"median_num_per_species_2",
"num_with_embl_cds_1",
"num_with_embl_cds_2",
]
# create dataframe and store
data_df = pd.DataFrame(
[concatenation_data], columns=cols
)
data_df.to_csv(outfile)
def genome_distance(**kwargs):
"""
Protocol:
Concatenate alignments based on genomic distance
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
* alignment_file
* raw_alignment_file
* focus_mode
* focus_sequence
* segments
* frequencies_file
* identities_file
* num_sequences
* num_sites
* raw_focus_alignment_file
* statistics_file
"""
check_required(
kwargs,
[
"prefix",
"first_alignment_file", "second_alignment_file",
"first_focus_sequence", "second_focus_sequence",
"first_focus_mode", "second_focus_mode",
"first_region_start", "second_region_start",
"first_segments", "second_segments",
"genome_distance_threshold",
"first_genome_location_file",
"second_genome_location_file",
"first_annotation_file",
"second_annotation_file"
]
)
prefix = kwargs["prefix"]
# make sure input alignments exist
verify_resources(
"Input alignment does not exist",
kwargs["first_alignment_file"], kwargs["second_alignment_file"]
)
verify_resources(
"Genome location file does not exist",
kwargs["first_genome_location_file"],
kwargs["second_genome_location_file"]
)
# make sure output directory exists
create_prefix_folders(prefix)
# load the information for each monomer alignment
alignment_1 = kwargs["first_alignment_file"]
alignment_2 = kwargs["second_alignment_file"]
genome_location_filename_1 = kwargs["first_genome_location_file"]
genome_location_filename_2 = kwargs["second_genome_location_file"]
gene_location_table_1 = pd.read_csv(genome_location_filename_1, header=0)
gene_location_table_2 = pd.read_csv(genome_location_filename_2, header=0)
# find all possible matches
possible_partners = find_possible_partners(
gene_location_table_1, gene_location_table_2
)
# find the best reciprocal matches
id_pairing_unfiltered = best_reciprocal_matching(possible_partners)
# filter best reciprocal matches by genome distance threshold
if kwargs["genome_distance_threshold"]:
distance_threshold = kwargs["genome_distance_threshold"]
id_pairing = id_pairing_unfiltered.query("distance < @distance_threshold")
else:
id_pairing = id_pairing_unfiltered
id_pairing.loc[:, "id_1"] = id_pairing.loc[:, "uniprot_id_1"]
id_pairing.loc[:, "id_2"] = id_pairing.loc[:, "uniprot_id_2"]
# write concatenated alignment with distance filtering
# TODO: save monomer alignments?
target_seq_id, target_seq_index, raw_ali, mon_ali_1, mon_ali_2 = \
write_concatenated_alignment(
id_pairing,
alignment_1,
alignment_2,
kwargs["first_focus_sequence"],
kwargs["second_focus_sequence"]
)
# save the alignment files
raw_alignment_file = prefix + "_raw.fasta"
with open(raw_alignment_file, "w") as of:
raw_ali.write(of)
mon_alignment_file_1 = prefix + "_monomer_1.fasta"
with open(mon_alignment_file_1, "w") as of:
mon_ali_1.write(of)
mon_alignment_file_2 = prefix + "_monomer_2.fasta"
with open(mon_alignment_file_2, "w") as of:
mon_ali_2.write(of)
# filter the alignment
aln_outcfg, _ = modify_alignment(
raw_ali,
target_seq_index,
target_seq_id,
kwargs["first_region_start"],
**kwargs
)
# make sure we return all the necessary information:
# * alignment_file: final concatenated alignment that will go into plmc
# * focus_sequence: this is the identifier of the concatenated target
# sequence which will be passed into plmc with -f
outcfg = aln_outcfg
outcfg["raw_alignment_file"] = raw_alignment_file
outcfg["first_concatenated_monomer_alignment_file"] = mon_alignment_file_1
outcfg["second_concatenated_monomer_alignment_file"] = mon_alignment_file_2
outcfg["focus_sequence"] = target_seq_id
# Update the segments
outcfg = modify_complex_segments(outcfg, **kwargs)
# Describe the statistics of the concatenation
outcfg = _run_describe_concatenation(outcfg, **kwargs)
# plot the genome distance distribution
outcfg["distance_plot_file"] = prefix + "_distplot.pdf"
plot_distance_distribution(id_pairing_unfiltered, outcfg["distance_plot_file"])
return outcfg
def best_hit(**kwargs):
"""
Protocol:
Concatenate alignments based on the best hit
to the focus sequence in each species
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
alignment_file
raw_alignment_file
focus_mode
focus_sequence
segments
frequencies_file
identities_file
num_sequences
num_sites
raw_focus_alignment_file
statistics_file
"""
check_required(
kwargs,
[
"prefix",
"first_alignment_file", "second_alignment_file",
"first_focus_sequence", "second_focus_sequence",
"first_focus_mode", "second_focus_mode",
"first_segments", "second_segments",
"first_identities_file", "second_identities_file",
"first_annotation_file", "second_annotation_file",
"use_best_reciprocal", "paralog_identity_threshold"
]
)
prefix = kwargs["prefix"]
# make sure input alignments
verify_resources(
"Input alignment does not exist",
kwargs["first_alignment_file"], kwargs["second_alignment_file"]
)
# make sure output directory exists
create_prefix_folders(prefix)
def _load_monomer_info(annotations_file, identities_file,
target_sequence, alignment_file,
use_best_reciprocal, identity_threshold):
# read in annotation to a file and rename the appropriate column
annotation_table = read_species_annotation_table(annotations_file)
# read identity file
similarities = pd.read_csv(identities_file)
# create a pd.DataFrame containing the best hit in each organism
most_similar_in_species = most_similar_by_organism(similarities, annotation_table)
if use_best_reciprocal:
paralogs = find_paralogs(
target_sequence, annotation_table, similarities,
identity_threshold
)
most_similar_in_species = filter_best_reciprocal(
alignment_file, paralogs, most_similar_in_species
)
return most_similar_in_species
# load the information about each monomer alignment
most_similar_in_species_1 = _load_monomer_info(
kwargs["first_annotation_file"],
kwargs["first_identities_file"],
kwargs["first_focus_sequence"],
kwargs["first_alignment_file"],
kwargs["use_best_reciprocal"],
kwargs["paralog_identity_threshold"]
)
most_similar_in_species_2 = _load_monomer_info(
kwargs["second_annotation_file"],
kwargs["second_identities_file"],
kwargs["second_focus_sequence"],
kwargs["second_alignment_file"],
kwargs["use_best_reciprocal"],
kwargs["paralog_identity_threshold"]
)
# merge the two dataframes to get all species found in
# both alignments
species_intersection = most_similar_in_species_1.merge(
most_similar_in_species_2,
how="inner", # takes the intersection
on="species", # merges on species identifiers
suffixes=("_1", "_2")
)
# write concatenated alignment with distance filtering
# TODO: save monomer alignments?
target_seq_id, target_seq_index, raw_ali, mon_ali_1, mon_ali_2 = \
write_concatenated_alignment(
species_intersection,
kwargs["first_alignment_file"],
kwargs["second_alignment_file"],
kwargs["first_focus_sequence"],
kwargs["second_focus_sequence"]
)
# save the alignment files
raw_alignment_file = prefix + "_raw.fasta"
with open(raw_alignment_file, "w") as of:
raw_ali.write(of)
mon_alignment_file_1 = prefix + "_monomer_1.fasta"
with open(mon_alignment_file_1, "w") as of:
mon_ali_1.write(of)
mon_alignment_file_2 = prefix + "_monomer_2.fasta"
with open(mon_alignment_file_2, "w") as of:
mon_ali_2.write(of)
aln_outcfg, _ = modify_alignment(
raw_ali,
target_seq_index,
target_seq_id,
kwargs["first_region_start"],
**kwargs
)
# make sure we return all the necessary information:
# * alignment_file: final concatenated alignment that will go into plmc
# * focus_sequence: this is the identifier of the concatenated target
# sequence which will be passed into plmc with -f
outcfg = aln_outcfg
outcfg["raw_alignment_file"] = raw_alignment_file
outcfg["first_concatenated_monomer_alignment_file"] = mon_alignment_file_1
outcfg["second_concatenated_monomer_alignment_file"] = mon_alignment_file_2
outcfg["focus_sequence"] = target_seq_id
# Update the segments
outcfg = modify_complex_segments(outcfg, **kwargs)
# Describe the statistics of the concatenation
outcfg = _run_describe_concatenation(outcfg, **kwargs)
return outcfg
# list of available EC inference protocols
PROTOCOLS = {
# concatenate based on genomic distance ("operon-based")
"genome_distance": genome_distance,
# concatenate based on best hit per genome ("species")
"best_hit": best_hit
}
def run(**kwargs):
"""
Run alignment concatenation protocol
Parameters
----------
Mandatory kwargs arguments:
protocol: concatenation protocol to run
prefix: Output prefix for all generated files
Returns
-------
outcfg : dict
Output configuration of concatenation stage
Dictionary with results in following fields:
(in brackets: not mandatory)
alignment_file
raw_alignment_file
focus_mode
focus_sequence
segments
frequencies_file
identities_file
num_sequences
num_sites
raw_focus_alignment_file
statistics_file
"""
check_required(kwargs, ["protocol"])
if kwargs["protocol"] not in PROTOCOLS:
raise InvalidParameterError(
"Invalid protocol selection: " +
"{}. Valid protocols are: {}".format(
kwargs["protocol"], ", ".join(PROTOCOLS.keys())
)
)
return PROTOCOLS[kwargs["protocol"]](**kwargs)
|
floweaver/layered_graph.py | guest-cc/floweaver | 342 | 12741848 | <filename>floweaver/layered_graph.py
import networkx as nx
from .sankey_definition import Ordering
class LayeredMixin(object):
def __init__(self):
super().__init__()
self.ordering = Ordering([])
def copy(self):
new = super().copy()
new.ordering = self.ordering
return new
def remove_node(self, u):
super().remove_node(u)
self.ordering = self.ordering.remove(u)
def get_node(self, u):
"""Get the ProcessGroup or Waypoint associated with `u`"""
return self.nodes[u]['node']
class LayeredGraph(LayeredMixin, nx.DiGraph):
pass
class MultiLayeredGraph(LayeredMixin, nx.MultiDiGraph):
pass
|
jobs/jobs/spiders/zhihu.py | xfsd521/crawler | 141 | 12741881 | # -*- coding: utf-8 -*-
import scrapy
import json
class ZhihuSpider(scrapy.Spider):
name = 'zhihu'
allowed_domains = ['www.zhihu.com']
start_urls = ['https://www.zhihu.com/']
loginUrl = 'https://www.zhihu.com/#signin'
siginUrl = 'https://www.zhihu.com/login/email'
feedUrl = 'https://www.zhihu.com/api/v3/feed/topstory'
nextFeedUrl = ''
curFeedId = 0
custom_settings = {
"COOKIES_ENABLED": True,
}
headers = {
'Host':
'www.zhihu.com',
'Connection':
'keep-alive',
'Origin':
'https://www.zhihu.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'Content-Type':
'application/x-www-form-urlencoded; charset=UTF-8',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'X-Requested-With':
'XMLHttpRequest',
'DNT':
1,
'Referer':
'https://www.zhihu.com/',
'Accept-Encoding':
'gzip, deflate, br',
'Accept-Language':
'zh-CN,zh;q=0.8,en;q=0.6',
'Upgrade-Insecure-Requests:':
1,
}
cookies = {
'd_c0':
'"AHCAtu1iqAmPTped76X1ZdN0X_qAwhjdLUU=|1458699045"',
'__utma':
'51854390.1407411155.1458699046.1458699046.1458699046.1',
'__utmv':
'51854390.000--|3=entry_date=20160322=1',
'_zap':
'850897bb-cba4-4d0b-8653-fd65e7578ac2',
'q_c1':
'b7918ff9a5514d2981c30050c8c732e1|1502937247000|1491446589000',
'aliyungf_tc':
'AQAAACtKLW+lywEAOhSntJwFFTilwpwt',
'_xsrf':
'f3ab08fc68489f44ae77236555367c70',
'r_cap_id':
'"M2NjNDAwNTZmY2ExNDA3NzgzNjZkZDA1ODNjZWJkNjI=|1503458111|36984ab33f21997b742d97ace2e02043cbb0a76e"',
'cap_id':
'"ZTIxMmM5Yzg1MGJkNDcxNjgxYzZjMjNlYTg3OGE0Yzk=|1503457914|8dce8550bca28e427771a0e7e1fe1bafb6e170f6"',
}
def start_requests(self):
return [
scrapy.http.FormRequest(
self.loginUrl,
headers=self.headers,
cookies=self.cookies,
meta={'cookiejar': 1},
callback=self.post_login)
]
def post_login(self, response):
xsrf = response.css(
'div.view-signin > form > input[name=_xsrf]::attr(value)'
).extract_first()
self.headers['X-Xsrftoken'] = xsrf
return [
scrapy.http.FormRequest(
self.siginUrl,
method='POST',
headers=self.headers,
meta={'cookiejar': response.meta['cookiejar']},
formdata={
'_xsrf': xsrf,
'captcha_type': 'cn',
'email': '<EMAIL>',
'password': '<PASSWORD>',
},
callback=self.after_login)
]
def after_login(self, response):
jdict = json.loads(response.body)
print('after_login', jdict)
if jdict['r'] == 0:
z_c0 = response.headers.getlist('Set-Cookie')[2].split(';')[
0].split('=')[1]
self.headers['authorization'] = 'Bearer ' + z_c0
return scrapy.http.FormRequest(
url=self.feedUrl,
method='GET',
meta={'cookiejar': response.meta['cookiejar']},
headers=self.headers,
formdata={
'action_feed': 'True',
'limit': '10',
'action': 'down',
'after_id': str(self.curFeedId),
'desktop': 'true'
},
callback=self.parse)
else:
print(jdict['error'])
def parse(self, response):
with open('zhihu.json', 'a') as fd:
fd.write(response.body)
jdict = json.loads(response.body)
jdatas = jdict['data']
for entry in jdatas:
entry['pid'] = entry['id']
yield entry
jpaging = jdict['paging']
self.curFeedId += len(jdatas)
if jpaging['is_end'] == False and self.curFeedId < 50:
self.nextFeedUrl = jpaging['next']
yield self.next_request(response)
def next_request(self, response):
return scrapy.http.FormRequest(
url=self.nextFeedUrl,
method='GET',
meta={'cookiejar': response.meta['cookiejar']},
headers=self.headers,
callback=self.parse)
|
software/glasgow/applet/memory/floppy/mfm.py | electroniceel/Glasgow | 1,014 | 12741887 | import logging
__all__ = ["SoftwareMFMDecoder"]
class SoftwareMFMDecoder:
def __init__(self, logger):
self._logger = logger
self._lock_time = 0
self._bit_time = 0
def _log(self, message, *args):
self._logger.log(logging.DEBUG, "soft-MFM: " + message, *args)
def edges(self, bytestream):
edge_len = 0
for byte in bytestream:
edge_len += 1 + byte
if byte == 0xfd:
continue
yield edge_len
edge_len = 0
def bits(self, bytestream):
prev_byte = 0
for curr_byte in bytestream:
if prev_byte != 0xfd:
yield 1
for _ in range(curr_byte):
yield 0
prev_byte = curr_byte
def domains(self, bitstream):
polarity = 1
for has_edge in bitstream:
if has_edge:
polarity *= -1
yield polarity
def lock(self, bitstream, *, debug=False,
nco_init_period=0, nco_min_period=16, nco_max_period=256,
nco_frac_bits=8, pll_kp_exp=2, pll_gph_exp=1):
nco_period = nco_init_period << nco_frac_bits
nco_phase = 0
nco_step = 1 << nco_frac_bits
nco_clock = 0
pll_error = 0
pll_feedbk = 0
bit_curr = 0
for has_edge in bitstream:
if nco_period < nco_min_period << nco_frac_bits:
nco_period = nco_min_period << nco_frac_bits
if nco_period >= nco_max_period << nco_frac_bits:
nco_period = nco_max_period << nco_frac_bits
if has_edge:
bit_curr = 1
pll_error = nco_phase - (nco_period >> 1)
pll_p_term = abs(pll_error) >> pll_kp_exp
pll_gain = max(1 << pll_gph_exp, pll_p_term)
if pll_error < 0:
pll_feedbk = +1 * pll_gain
else:
pll_feedbk = -1 * pll_gain
if nco_phase >= nco_period:
nco_phase = 0
if not debug:
yield bit_curr
bit_curr = 0
else:
nco_phase += nco_step + pll_feedbk
nco_period -= pll_feedbk >> pll_gph_exp
pll_feedbk = 0
if debug:
yield (nco_phase / nco_step,
nco_period / nco_step,
pll_error / nco_step)
def demodulate(self, chipstream):
shreg = []
offset = 0
synced = False
prev = 0
bits = []
while True:
while len(shreg) < 64:
try:
shreg.append(next(chipstream))
except StopIteration:
return
synced_now = False
for sync_offset in (0, 1):
if shreg[sync_offset:sync_offset + 16] == [0,1,0,0,0,1,0,0,1,0,0,0,1,0,0,1]:
if not synced or sync_offset != 0:
self._log("sync=K.A1 chip-off=%d", offset + sync_offset)
offset += sync_offset + 16
shreg = shreg[sync_offset + 16:]
synced = True
prev = 1
bits = []
yield (1, 0xA1)
synced_now = True
if synced_now: break
if synced_now:
continue
elif not synced and len(shreg) >= 1:
offset += 1
shreg = shreg[1:]
if synced and len(shreg) >= 2:
if shreg[0:2] == [0,1]:
curr = 1
elif prev == 1 and shreg[0:2] == [0,0]:
curr = 0
elif prev == 0 and shreg[0:2] == [1,0]:
curr = 0
else:
synced = False
self._log("desync chip-off=%d bitno=%d prev=%d cell=%d%d",
offset, len(bits), prev, *shreg[0:2])
if synced:
offset += 2
shreg = shreg[2:]
prev = curr
bits.append(curr)
if len(bits) == 8:
yield (0, sum(bit << (7 - n) for n, bit in enumerate(bits)))
bits = []
|
src/examples/voice/assistant_library_demo.py | SanchitMisal/aiyprojects-raspbian | 1,610 | 12741892 | <reponame>SanchitMisal/aiyprojects-raspbian
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activates the Google Assistant with hotword detection, using the Google Assistant Library.
The Google Assistant Library has direct access to the audio API, so this Python
code doesn't need to record audio.
.. note:
This example depends on hotword detection (such as "Okay Google") to activate the Google
Assistant, which is supported only with Raspberry Pi 2/3. If you're using a Pi Zero, this
code won't work. Instead, you must use the button or another type of trigger, as shown
in assistant_library_with_button_demo.py.
"""
import logging
import platform
import sys
from google.assistant.library.event import EventType
from aiy.assistant import auth_helpers
from aiy.assistant.library import Assistant
from aiy.board import Board, Led
def process_event(led, event):
logging.info(event)
if event.type == EventType.ON_START_FINISHED:
led.state = Led.BEACON_DARK # Ready.
logging.info('Say "OK, Google" then speak, or press Ctrl+C to quit...')
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
led.state = Led.ON # Listening.
elif event.type == EventType.ON_END_OF_UTTERANCE:
led.state = Led.PULSE_QUICK # Thinking.
elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
or event.type == EventType.ON_NO_RESPONSE):
led.state = Led.BEACON_DARK
elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']:
sys.exit(1)
def main():
logging.basicConfig(level=logging.INFO)
credentials = auth_helpers.get_assistant_credentials()
with Board() as board, Assistant(credentials) as assistant:
for event in assistant.start():
process_event(board.led, event)
if __name__ == '__main__':
main()
|
env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_scattermapbox.py | acrucetta/Chicago_COVI_WebApp | 11,750 | 12741917 | <gh_stars>1000+
import _plotly_utils.basevalidators
class ScattermapboxValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="scattermapbox", parent_name="layout.template.data", **kwargs
):
super(ScattermapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Scattermapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
webapp/tests/test_dashboard.py | romanek-adam/graphite-web | 4,281 | 12741925 | <filename>webapp/tests/test_dashboard.py
import copy
import errno
import mock
import os
from . import TEST_CONF_DIR
from django.conf import settings
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from .base import TestCase
from django.test.utils import override_settings
from graphite.util import json
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
class DashboardTest(TestCase):
# Set config to the test config file
settings.DASHBOARD_CONF = os.path.join(TEST_CONF_DIR, 'dashboard.conf')
# Define a testtemplate
testtemplate = {"state": '{"graphs": [[ "target=a.b.c.*.__VALUE__.d", { "from":"-2days", "target":[ "a.b.c.*.__VALUE__.d" ], "until":"now" }, "/render?width=400&from=-2days&until=now&height=250&target=a.b.c.*.__VALUE__.d&_uniq=0.6526056618895382&title=a.b.c.*.__VALUE__.d" ]]}'}
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing'))
def test_dashboard_missing_conf(self):
url = reverse('dashboard')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing'))
def test_dashboard_template_missing_template(self):
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@mock.patch('graphite.dashboard.views.DashboardConfig.check')
def test_dashboard_conf_read_failure(self, check):
check.side_effect = OSError(errno.EPERM, 'Operation not permitted')
url = reverse('dashboard')
with self.assertRaises(Exception):
_ = self.client.get(url)
@mock.patch('graphite.dashboard.views.DashboardConfig.check')
def test_dashboard_template_conf_read_failure(self, check):
check.side_effect = OSError(errno.EPERM, 'Operation not permitted')
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
with self.assertRaises(Exception):
_ = self.client.get(url)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing_ui'))
def test_dashboard_conf_missing_ui(self):
url = reverse('dashboard')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing_ui'))
def test_dashboard_template_missing_ui(self):
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing_keyboard-shortcuts'))
def test_dashboard_conf_missing_keyboard_shortcuts(self):
url = reverse('dashboard')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing_keyboard-shortcuts'))
def test_dashboard_template_missing_keyboard_shortcuts(self):
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.invalid_theme'))
def test_dashboard_conf_invalid_theme(self):
url = reverse('dashboard')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.invalid_theme'))
def test_dashboard_template_invalid_theme(self):
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard(self):
url = reverse('dashboard')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard_no_user(self):
url = reverse('dashboard')
request = {"user": '', "state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
def test_dashboard_pass_valid(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard', args=['testdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard_pass_invalid_name(self):
url = reverse('dashboard', args=['bogusdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard_find_empty(self):
url = reverse('dashboard_find')
request = {"query": ""}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"dashboards": []}')
def test_dashboard_save_empty(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
def test_dashboard_save_overwrite(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
def test_dashboard_find_existing(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_find')
request = {"query": "test"}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"dashboards": [{"name": "testdashboard"}]}')
def test_dashboard_find_not_existing(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_find')
request = {"query": "not here"}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"dashboards": []}')
def test_dashboard_load_not_existing(self):
url = reverse('dashboard_load', args=['bogusdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Dashboard \'bogusdashboard\' does not exist. "}')
def test_dashboard_load_existing(self):
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_load', args=['testdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"state": {}}')
def test_dashboard_delete_nonexisting(self):
url = reverse('dashboard_delete', args=['bogusdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Dashboard \'bogusdashboard\' does not exist. "}')
def test_dashboard_delete_existing(self):
# Create a dashboard entry
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
# Delete it
url = reverse('dashboard_delete', args=['testdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"success": true}')
# Confirm it was deleted
url = reverse('dashboard_find')
request = {"query": ""}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"dashboards": []}')
def test_dashboard_create_temporary(self):
url = reverse('dashboard_create_temporary')
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"name": "temporary-0"}')
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"name": "temporary-1"}')
url = reverse('dashboard_find')
request = {"query": ""}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"dashboards": []}')
def test_dashboard_template_pass_invalid(self):
url = reverse('dashboard_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard_template_pass_valid(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_template', args=['testtemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dashboard_find_template_empty(self):
url = reverse('dashboard_find_template')
request = {"query": ""}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"templates": []}')
def test_dashboard_save_template(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
# Save again after it now exists
def test_dashboard_save_template_overwrite(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
def test_dashboard_find_template(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_find_template')
request = {"query": "test"}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"templates": [{"name": "testtemplate"}]}')
def test_dashboard_find_template_nonexistent(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_find_template')
request = {"query": "not here"}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"templates": []}')
def test_dashboard_load_template_nonexistent(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_load_template', args=['bogustemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Template \'bogustemplate\' does not exist. "}')
def test_dashboard_load_template_existing(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_load_template', args=['testtemplate', 'testkey'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
graph_data = json.loads(self.testtemplate["state"].replace('__VALUE__', 'testkey'))
self.assertEqual(data, json.loads('{"state": {"name": "testtemplate/testkey", "graphs": ' + json.dumps(graph_data['graphs']) + '}}'))
def test_dashboard_delete_template_nonexisting(self):
# Delete nonexistent template
url = reverse('dashboard_delete_template', args=['bogustemplate'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Template \'bogustemplate\' does not exist. "}')
def test_dashboard_delete_template_existing(self):
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
url = reverse('dashboard_delete_template', args=['testtemplate'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"success": true}')
url = reverse('dashboard_find_template')
request = {"query": ""}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"templates": []}')
def test_dashboard_help(self):
url = reverse('dashboard_help')
request = {}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
def test_dashboard_email(self):
url = reverse('dashboard_email')
request = {"sender": "<EMAIL>",
"recipients": "noreply@localhost",
"subject": "Test email",
"message": "Here is the test graph",
"graph_params": '{"target":["sumSeries(a.b.c.d)"],"title":"Test","width":"500","from":"-55minutes","until":"now","height":"400"}'}
response = self.client.post(url, request)
self.assertEqual(response.content, b'{"success": true}')
@mock.patch('graphite.dashboard.views.renderView')
def test_dashboard_email_mock_renderView(self, rv):
url = reverse('dashboard_email')
request = {"sender": "nore<EMAIL>",
"recipients": "nore<EMAIL>",
"subject": "Test email",
"message": "Here is the test graph",
"graph_params": '{"target":["sumSeries(a.b.c.d)"],"title":"Test","width":"500","from":"-55minutes","until":"now","height":"400"}'}
responseObject = HttpResponse()
responseObject.content = ''
rv.return_value = responseObject
response = self.client.post(url, request)
self.assertEqual(response.content, b'{"success": true}')
def test_dashboard_login_invalid_authenticate(self):
url = reverse('dashboard_login')
request = {"username": "testuser",
"password": "<PASSWORD>"}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), json.loads('{"errors": {"reason": "Username and/or password invalid."}, "success": false, "text": {}, "permissions": []}'))
@mock.patch('graphite.dashboard.views.authenticate')
def test_dashboard_login_valid_authenticate(self, authenticate):
url = reverse('dashboard_login')
request = {"username": "testuser",
"password": "<PASSWORD>"}
user = User.objects.create(email='<EMAIL>')
user.backend = ''
authenticate.return_value = user
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), json.loads('{"permissions": ["change", "delete"], "success": true, "text": {}, "errors": {}}'))
@mock.patch('graphite.dashboard.views.authenticate')
def test_dashboard_login_valid_authenticate_not_active(self, authenticate):
url = reverse('dashboard_login')
request = {"username": "testuser",
"password": "<PASSWORD>"}
user = User.objects.create(email='<EMAIL>')
user.backend = ''
user.is_active = False
authenticate.return_value = user
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), json.loads('{"permissions": [], "success": false, "errors": {"reason": "Account disabled."}, "text": {}}'))
def test_dashboard_logout(self):
url = reverse('dashboard_logout')
request = {"username": "testuser"}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), json.loads('{"errors": {}, "success": true, "text": {}}'))
@mock.patch('graphite.dashboard.views.getPermissions')
def test_dashboard_save_no_permissions(self, gp):
gp.return_value = [None]
url = reverse('dashboard_save', args=['testdashboard'])
request = {"state": '{}'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Must be logged in with appropriate permissions to save"}')
@mock.patch('graphite.dashboard.views.getPermissions')
def test_dashboard_delete_no_permissions(self, gp):
gp.return_value = [None]
url = reverse('dashboard_delete', args=['testdashboard'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Must be logged in with appropriate permissions to delete"}')
@mock.patch('graphite.dashboard.views.getPermissions')
def test_dashboard_save_template_no_permissions(self, gp):
gp.return_value = [None]
url = reverse('dashboard_save_template', args=['testtemplate', 'testkey'])
request = copy.deepcopy(self.testtemplate)
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Must be logged in with appropriate permissions to save the template"}')
@mock.patch('graphite.dashboard.views.getPermissions')
def test_dashboard_delete_template_no_permissions(self, gp):
gp.return_value = [None]
url = reverse('dashboard_delete_template', args=['testtemplate'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"error": "Must be logged in with appropriate permissions to delete the template"}')
def test_getPermissions_no_user(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=False
settings.DASHBOARD_REQUIRE_PERMISSIONS=False
settings.DASHBOARD_REQUIRE_EDIT_GROUP=False
from graphite.dashboard.views import getPermissions
self.assertEqual(getPermissions(False), ['change', 'delete'])
def test_getPermissions_no_user_require_auth(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=True
settings.DASHBOARD_REQUIRE_PERMISSIONS=False
settings.DASHBOARD_REQUIRE_EDIT_GROUP=False
from graphite.dashboard.views import getPermissions
self.assertEqual(getPermissions(False), [])
def test_getPermissions_valid_user(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=True
settings.DASHBOARD_REQUIRE_PERMISSIONS=False
settings.DASHBOARD_REQUIRE_EDIT_GROUP=False
from graphite.dashboard.views import getPermissions
user = User.objects.create(email='<EMAIL>')
user.backend = ''
self.assertEqual(getPermissions(user), ['change', 'delete'])
def test_getPermissions_valid_user_require_perm(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=True
settings.DASHBOARD_REQUIRE_PERMISSIONS=True
settings.DASHBOARD_REQUIRE_EDIT_GROUP=False
from graphite.dashboard.views import getPermissions
user = User.objects.create(email='<EMAIL>')
user.backend = ''
self.assertEqual(getPermissions(user), [])
def test_getPermissions_valid_user_edit_group(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=True
settings.DASHBOARD_REQUIRE_PERMISSIONS=False
settings.DASHBOARD_REQUIRE_EDIT_GROUP=True
from graphite.dashboard.views import getPermissions
user = User.objects.create(email='<EMAIL>')
user.backend = ''
self.assertEqual(getPermissions(user), [])
def test_getPermissions_valid_user_require_perms_edit_group(self):
settings.DASHBOARD_REQUIRE_AUTHENTICATION=True
settings.DASHBOARD_REQUIRE_PERMISSIONS=True
settings.DASHBOARD_REQUIRE_EDIT_GROUP=True
from graphite.dashboard.views import getPermissions
user = User.objects.create(email='<EMAIL>')
user.backend = ''
self.assertEqual(getPermissions(user), [])
|
aws_ir/libs/connection.py | jeffb4/aws_ir | 308 | 12741934 | import boto3
import logging
logger = logging.getLogger(__name__)
class Connection(object):
def __init__(self, type, service=None, region='us-west-2', profile='default'):
self.region = region
self.connection_type = type
self.service = service
self.client = None
self.resource = None
self.profile = profile
try:
boto3.setup_default_session(profile_name=self.profile)
except Exception as e:
logger.info("Problem setting default boto3 session: {}".format(e))
def connect(self):
if self.connection_type is None:
raise AttributeError(
"Could not determine connect type. Set client or resource."
)
elif self.connection_type == "client":
client = boto3.client(
self.service,
region_name=self.region
)
self.client = client
return self.client
elif self.connection_type == "resource":
resource = boto3.resource(
self.service,
region_name=self.region
)
self.resource = resource
return self.resource
elif self.connection_type == "session":
try:
session = boto3.Session(
region_name=self.region,
profile_name=self.profile
)
logger.info(
"Returning session for default profile."
)
except Exception as e:
logger.info(
"We are likely running on AWS instance.: {}".format(e)
)
session = boto3.Session(
region_name=self.region
)
return session
else:
raise AttributeError(
"Connection type is not supported."
)
|
Ryven/packages/auto_generated/asynchat/nodes.py | tfroehlich82/Ryven | 2,872 | 12741938 | <gh_stars>1000+
from NENV import *
import asynchat
class NodeBase(Node):
pass
class Find_Prefix_At_End_Node(NodeBase):
"""
"""
title = 'find_prefix_at_end'
type_ = 'asynchat'
init_inputs = [
NodeInputBP(label='haystack'),
NodeInputBP(label='needle'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, asynchat.find_prefix_at_end(self.input(0), self.input(1)))
export_nodes(
Find_Prefix_At_End_Node,
)
|
generator.py | Nyrize/LipGAN | 435 | 12741957 | <gh_stars>100-1000
from keras.models import load_model
import numpy as np
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Dense, Conv2DTranspose, Conv2D, BatchNormalization, \
Activation, Concatenate, Input, MaxPool2D,\
UpSampling2D, ZeroPadding2D, Lambda, Add
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import keras
import cv2
import os
import librosa
import scipy
from keras.utils import plot_model
import tensorflow as tf
from keras.utils import multi_gpu_model
from discriminator import contrastive_loss
class ModelMGPU(Model):
def __init__(self, ser_model, gpus):
pmodel = multi_gpu_model(ser_model, gpus)
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
def conv_block(x, num_filters, kernel_size=3, strides=1, padding='same', act=True):
x = Conv2D(filters=num_filters, kernel_size= kernel_size,
strides=strides, padding=padding)(x)
x = BatchNormalization(momentum=.8)(x)
if act:
x = Activation('relu')(x)
return x
def conv_t_block(x, num_filters, kernel_size=3, strides=2, padding='same'):
x = Conv2DTranspose(filters=num_filters, kernel_size= kernel_size,
strides=strides, padding=padding)(x)
x = BatchNormalization(momentum=.8)(x)
x = Activation('relu')(x)
return x
def create_model(args):
############# encoder for face/identity
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face")
identity_mapping = conv_block(input_face, 32, kernel_size=11) # 96x96
x1_face = conv_block(identity_mapping, 64, kernel_size=7, strides=2) # 48x48
x2_face = conv_block(x1_face, 128, 5, 2) # 24x24
x3_face = conv_block(x2_face, 256, 3, 2) #12x12
x4_face = conv_block(x3_face, 512, 3, 2) #6x6
x5_face = conv_block(x4_face, 512, 3, 2) #3x3
x6_face = conv_block(x5_face, 512, 3, 1, padding='valid')
x7_face = conv_block(x6_face, 256, 1, 1)
############# encoder for audio
input_audio = Input(shape=(12,35,1), name="input_audio")
x = conv_block(input_audio, 64)
x = conv_block(input_audio, 128)
x = ZeroPadding2D(((1,0),(0,0)))(x)
x = conv_block(x, 256, strides=(1, 2))
x = conv_block(x, 256)
x = conv_block(x, 256, strides=2)
x = conv_block(x, 512, strides=2)
x = conv_block(x, 512, (4, 5), 1, padding='valid')
x = conv_block(x, 256, 1, 1)
embedding = Concatenate(axis=3)([x7_face, x])
############# decoder
x = conv_block(embedding, 512, 1)
x = conv_t_block(embedding, 512, 3, 3)# 3x3
x = Concatenate(axis=3) ([x5_face, x])
x = conv_t_block(x, 512) #6x6
x = Concatenate(axis=3) ([x4_face, x])
x = conv_t_block(x, 256) #12x12
x = Concatenate(axis=3) ([x3_face, x])
x = conv_t_block(x, 128) #24x24
x = Concatenate(axis=3) ([x2_face, x])
x = conv_t_block(x, 64) #48x48
x = Concatenate(axis=3) ([x1_face, x])
x = conv_t_block(x, 32) #96x96
x = Concatenate(axis=3) ([identity_mapping, x])
x = conv_block(x, 16) #96x96
x = conv_block(x, 16) #96x96
x = Conv2D(filters=3, kernel_size=1, strides=1, padding="same") (x)
prediction = Activation("sigmoid", name="prediction")(x)
model = Model(inputs=[input_face, input_audio], outputs=prediction)
model.summary()
ser_model = model
if args.n_gpu > 1:
parallel_model = ModelMGPU(ser_model , args.n_gpu)
else:
parallel_model = ser_model
parallel_model.compile(loss='mae', optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'))
return parallel_model, ser_model
def create_model_residual(args):
def residual_block(inp, num_filters):
x = conv_block(inp, num_filters)
x = conv_block(x, num_filters)
x = Add()([x, inp])
x = Activation('relu') (x)
return x
############# encoder for face/identity
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face")
identity_mapping = conv_block(input_face, 32, kernel_size=7) # 96x96
x1_face = conv_block(identity_mapping, 64, kernel_size=5, strides=2) # 48x48
x1_face = residual_block(x1_face, 64)
x1_face = residual_block(x1_face, 64)
x2_face = conv_block(x1_face, 128, 3, 2) # 24x24
x2_face = residual_block(x2_face, 128)
x2_face = residual_block(x2_face, 128)
x2_face = residual_block(x2_face, 128)
x3_face = conv_block(x2_face, 256, 3, 2) #12x12
x3_face = residual_block(x3_face, 256)
x3_face = residual_block(x3_face, 256)
x4_face = conv_block(x3_face, 512, 3, 2) #6x6
x4_face = residual_block(x4_face, 512)
x4_face = residual_block(x4_face, 512)
x5_face = conv_block(x4_face, 512, 3, 2) #3x3
x6_face = conv_block(x5_face, 512, 3, 1, padding='valid')
x7_face = conv_block(x6_face, 512, 1, 1)
############# encoder for audio
input_audio = Input(shape=(12,35,1), name="input_audio")
x = conv_block(input_audio, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = ZeroPadding2D(((1,0),(0,0)))(x)
x = conv_block(x, 256, strides=(1, 2))
x = residual_block(x, 256)
x = residual_block(x, 256)
x = conv_block(x, 512, strides=2)
x = residual_block(x, 512)
x = residual_block(x, 512)
x = conv_block(x, 512, strides=2)
x = residual_block(x, 512)
x = conv_block(x, 512, (4, 5), 1, padding='valid')
x = conv_block(x, 512, 1, 1)
embedding = Concatenate(axis=3)([x7_face, x])
############# decoder
x = conv_t_block(embedding, 512, 3, 3)# 3x3
x = Concatenate(axis=3) ([x5_face, x])
x = conv_t_block(x, 512) #6x6
x = residual_block(x, 512)
x = residual_block(x, 512)
x = Concatenate(axis=3) ([x4_face, x])
x = conv_t_block(x, 256) #12x12
x = residual_block(x, 256)
x = residual_block(x, 256)
x = Concatenate(axis=3) ([x3_face, x])
x = conv_t_block(x, 128) #24x24
x = residual_block(x, 128)
x = residual_block(x, 128)
x = Concatenate(axis=3) ([x2_face, x])
x = conv_t_block(x, 64) #48x48
x = residual_block(x, 64)
x = residual_block(x, 64)
x = Concatenate(axis=3) ([x1_face, x])
x = conv_t_block(x, 32) #96x96
x = Concatenate(axis=3) ([identity_mapping, x])
x = conv_block(x, 16) #96x96
x = conv_block(x, 16) #96x96
x = Conv2D(filters=3, kernel_size=1, strides=1, padding="same") (x)
prediction = Activation("sigmoid", name="prediction")(x)
model = Model(inputs=[input_face, input_audio], outputs=prediction)
model.summary()
if args.n_gpu > 1:
model = ModelMGPU(model , args.n_gpu)
model.compile(loss='mae', optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'))
return model
def create_combined_model(generator, discriminator, args):
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face_comb")
input_audio = Input(shape=(12, 35, 1), name="input_audio_comb")
fake_face = generator([input_face, input_audio])
discriminator.trainable = False
d = discriminator([fake_face, input_audio])
model = Model([input_face, input_audio], [fake_face, d])
if args.n_gpu > 1:
model = ModelMGPU(model , args.n_gpu)
model.compile(loss=['mae', contrastive_loss],
optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'), loss_weights=[1., .01])
return model
if __name__ == '__main__':
model = create_model_residual()
#plot_model(model, to_file='model.png', show_shapes=True) |
lite/examples/recommendation/ml/model/recommendation_model_launcher_test.py | duy-maimanh/examples | 6,484 | 12741966 | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recommendation_model_launcher."""
import os
from absl import flags
import tensorflow as tf
from model import input_pipeline
from model import recommendation_model_launcher as launcher
from google.protobuf import text_format
FLAGS = flags.FLAGS
FAKE_MOVIE_GENRE_VOCAB = [
'UNK',
'Comedy',
'Drama',
'Romance',
'Animation',
'Children'
]
TEST_INPUT_CONFIG = """
activity_feature_groups {
features {
feature_name: "context_movie_id"
feature_type: INT
vocab_size: 3952
embedding_dim: 8
feature_length: 5
}
features {
feature_name: "context_movie_rating"
feature_type: FLOAT
feature_length: 5
}
encoder_type: CNN
}
activity_feature_groups {
features {
feature_name: "context_movie_genre"
feature_type: STRING
vocab_name: "movie_genre_vocab.txt"
vocab_size: 19
embedding_dim: 8
feature_length: 8
}
encoder_type: BOW
}
label_feature {
feature_name: "label_movie_id"
feature_type: INT
vocab_size: 3952
embedding_dim: 8
feature_length: 1
}
"""
EXAMPLE1 = text_format.Parse(
"""
features {
feature {
key: "context_movie_id"
value {
int64_list {
value: [1, 2, 0, 0, 0]
}
}
}
feature {
key: "context_movie_rating"
value {
float_list {
value: [3.5, 4.0, 0.0, 0.0, 0.0]
}
}
}
feature {
key: "context_movie_genre"
value {
bytes_list {
value: [
"Animation", "Children", "Comedy", "Comedy", "Romance", "UNK", "UNK", "UNK"
]
}
}
}
feature {
key: "label_movie_id"
value {
int64_list {
value: [3]
}
}
}
}""", tf.train.Example())
class RecommendationModelLauncherTest(tf.test.TestCase):
def _AssertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def _assertInputDetail(self, input_details, index, name, shape):
self.assertEqual(name, input_details[index]['name'])
self.assertEqual(shape, input_details[index]['shape'])
def setUp(self):
super().setUp()
self.tmp_dir = self.create_tempdir()
self.test_input_config_file = os.path.join(self.tmp_dir,
'input_config.pbtxt')
self.test_movie_genre_vocab_file = os.path.join(self.tmp_dir,
'movie_genre_vocab.txt')
self.test_input_data_file = os.path.join(self.tmp_dir,
'test_input_data.tfrecord')
with open(self.test_input_config_file, 'w', encoding='utf-8') as f:
f.write(TEST_INPUT_CONFIG)
with open(self.test_movie_genre_vocab_file, 'w', encoding='utf-8') as f:
for item in FAKE_MOVIE_GENRE_VOCAB:
f.write(item + '\n')
with tf.io.TFRecordWriter(self.test_input_data_file) as file_writer:
file_writer.write(EXAMPLE1.SerializeToString())
self.test_model_dir = os.path.join(self.tmp_dir, 'test_model_dir')
FLAGS.training_data_filepattern = self.test_input_data_file
FLAGS.testing_data_filepattern = self.test_input_data_file
FLAGS.input_config_file = self.test_input_config_file
FLAGS.model_dir = self.test_model_dir
FLAGS.hidden_layer_dims = [8, 4]
FLAGS.eval_top_k = [1, 5]
FLAGS.num_predictions = 5
FLAGS.conv_num_filter_ratios = [2, 4]
FLAGS.conv_kernel_size = 4
FLAGS.lstm_num_units = 16
def testModelTrainEvalExport(self):
"""Verifies that model can be trained and evaluated."""
tf.io.gfile.mkdir(FLAGS.model_dir)
input_config = launcher.load_input_config()
model_config = launcher.prepare_model_config()
dataset = input_pipeline.get_input_dataset(
data_filepattern=self.test_input_data_file,
input_config=input_config,
vocab_file_dir=self.tmp_dir,
batch_size=8)
model = launcher.build_keras_model(input_config, model_config)
launcher.train_and_eval(
model=model,
model_dir=FLAGS.model_dir,
train_input_dataset=dataset,
eval_input_dataset=dataset,
steps_per_epoch=2,
epochs=2,
eval_steps=1)
self.assertTrue(os.path.exists(self.test_model_dir))
summaries_dir = os.path.join(self.test_model_dir, 'summaries')
self.assertTrue(os.path.exists(summaries_dir))
export_dir = os.path.join(FLAGS.model_dir, 'export')
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
launcher.save_model(
checkpoint_path=latest_checkpoint,
export_dir=export_dir,
input_config=input_config,
model_config=model_config)
savedmodel_path = os.path.join(export_dir, 'saved_model.pb')
self.assertTrue(os.path.exists(savedmodel_path))
imported = tf.saved_model.load(export_dir, tags=None)
infer = imported.signatures['serving_default']
context_movie_id = tf.range(5, dtype=tf.int32)
context_movie_rating = tf.range(5, dtype=tf.float32)
context_movie_genre = tf.range(8, dtype=tf.int32)
predictions = infer(context_movie_id=context_movie_id,
context_movie_rating=context_movie_rating,
context_movie_genre=context_movie_genre)
self.assertAllEqual([5], predictions['top_prediction_ids'].shape)
self.assertAllEqual([5], predictions['top_prediction_scores'].shape)
launcher.export_tflite(export_dir)
tflite_model_path = os.path.join(export_dir, 'model.tflite')
self.assertTrue(os.path.exists(tflite_model_path))
f = open(tflite_model_path, 'rb')
interpreter = tf.lite.Interpreter(model_content=f.read())
interpreter.allocate_tensors()
inference_signature = interpreter.get_signature_list()['serving_default']
self.assertAllEqual(
['context_movie_genre', 'context_movie_id', 'context_movie_rating'],
inference_signature['inputs'])
self.assertAllEqual(['top_prediction_ids', 'top_prediction_scores'],
inference_signature['outputs'])
serving_name_to_tenors = {
'serving_default_context_movie_id:0': context_movie_id,
'serving_default_context_movie_rating:0': context_movie_rating,
'serving_default_context_movie_genre:0': context_movie_genre
}
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
indice_to_tensors = {}
for input_detail in input_details:
indice_to_tensors[input_detail['index']] = serving_name_to_tenors[
input_detail['name']]
for index, tensor in indice_to_tensors.items():
interpreter.set_tensor(index, tensor)
interpreter.invoke()
tflite_top_predictions_ids = interpreter.get_tensor(
output_details[0]['index'])
tflite_top_prediction_scores = interpreter.get_tensor(
output_details[1]['index'])
self.assertAllEqual([5], tflite_top_predictions_ids.shape)
self.assertAllEqual([5], tflite_top_prediction_scores.shape)
if __name__ == '__main__':
launcher.define_flags()
tf.test.main()
|
tests/solver_tests.py | stantonious/pymaze | 154 | 12741968 | from __future__ import absolute_import
import unittest
from src.solver import Solver
class TestSolver(unittest.TestCase):
def test_ctor(self):
solver = Solver("", "", False)
self.assertEqual(solver.name, "")
self.assertEqual(solver.quiet_mode, False)
if __name__ == "__main__":
unittest.main() |
app/lib/api/records.py | grepleria/SnitchDNS | 152 | 12741991 | <filename>app/lib/api/records.py
from app.lib.base.provider import Provider
from app.lib.api.base import ApiBase
from app.lib.api.definitions.record import Record
class ApiRecords(ApiBase):
def all(self, user_id, zone_id=None, domain=None):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
zone = zones.get(zone_id, user_id) if zone_id is not None else zones.find(domain, user_id=user_id)
if not zone:
return self.send_not_found_response()
results = records.get_zone_records(zone.id)
data = []
for result in results:
data.append(self.__load_record(result))
return self.send_valid_response(data)
def one(self, user_id, record_id, zone_id=None, domain=None):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
zone = zones.get(zone_id, user_id) if zone_id is not None else zones.find(domain, user_id=user_id)
if not zone:
return self.send_not_found_response()
record = records.get(record_id, dns_zone_id=zone_id)
if not record:
return self.send_not_found_response()
return self.send_valid_response(self.__load_record(record))
def __load_record(self, item):
record = Record()
record.id = item.id
record.zone_id = item.dns_zone_id
record.active = item.active
record.cls = item.cls
record.type = item.type
record.ttl = int(item.ttl)
record.data = item.data
record.is_conditional = item.has_conditional_responses
record.conditional_count = item.conditional_count
record.conditional_limit = item.conditional_limit
record.confitional_reset = item.conditional_reset
record.conditional_data = item.conditional_data
return record
def classes(self):
records = Provider().dns_records()
return self.send_valid_response(records.get_classes())
def types(self):
records = Provider().dns_records()
return self.send_valid_response(records.get_types())
def delete(self, user_id, record_id, zone_id=None, domain=None):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
zone = zones.get(zone_id, user_id) if zone_id is not None else zones.find(domain, user_id=user_id)
if not zone:
return self.send_not_found_response()
record = records.get(record_id, dns_zone_id=zone_id)
if not record:
return self.send_not_found_response()
records.delete(record)
return self.send_success_response()
def create(self, user_id, zone_id=None, domain=None):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
zone = zones.get(zone_id, user_id) if zone_id is not None else zones.find(domain, user_id=user_id)
if not zone:
return self.send_not_found_response()
# First get the mandatory fields for all record types.
required_fields = [
'class',
'type',
'ttl',
'active',
'data',
'is_conditional',
'conditional_count',
'conditional_limit',
'conditional_reset',
'conditional_data'
]
data = self.get_json(required_fields)
if data is False:
return self.send_error_response(
5000,
'Missing fields',
'Required fields are: {0}'.format(', '.join(required_fields))
)
# Validate.
if data['class'] not in records.get_classes():
return self.send_error_response(5005, 'Invalid class', '')
elif data['type'] not in records.get_types():
return self.send_error_response(5005, 'Invalid type', '')
if isinstance(data['ttl'], str) and data['ttl'].isdigit() is False:
return self.send_error_response(5005, 'Invalid TTL', '')
data['ttl'] = int(data['ttl'])
if data['ttl'] < 0:
return self.send_error_response(5005, 'Invalid TTL', '')
elif data['conditional_count'] < 0:
return self.send_error_response(5005, 'Invalid Conditional Count', '')
elif data['conditional_limit'] < 0:
return self.send_error_response(5005, 'Invalid Conditional Limit', '')
# Fix types.
data['active'] = True if data['active'] else False
data['is_conditional'] = True if data['is_conditional'] else False
# Now that we have the type, we can get the type-specific properties.
record_type_properties = records.get_record_type_properties(data['type'], clean=True)
record_type_conditional_properties = records.get_record_type_properties(data['type'], clean=True)
all_errors = []
basic_data, errors = self.__parse_data_properties(data['data'], record_type_properties)
all_errors += errors
if data['is_conditional']:
conditional_data, errors = self.__parse_data_properties(data['conditional_data'], record_type_conditional_properties)
all_errors += errors
else:
conditional_data = {}
if len(errors) > 0:
return self.send_error_response(
5005,
'Invalid type property fields',
errors
)
# Create the record.
record = records.create()
record = records.save(record, zone.id, data['ttl'], data['class'], data['type'], basic_data, data['active'])
record = records.save_conditions(record, enabled=data['is_conditional'], data=conditional_data,
count=data['conditional_count'], limit=data['conditional_limit'],
reset=data['conditional_reset'])
return self.one(user_id, record.id, zone_id=zone.id)
def __parse_data_properties(self, data, properties):
errors = []
output = {}
for property, type in properties.items():
if property not in data:
errors.append('Missing type property {0}'.format(property))
continue
value = data[property]
if (type == 'int') and (isinstance(value, str)):
if not value.isdigit():
errors.append('Invalid {0} value'.format(property))
continue
value = int(value)
if (type == 'str') and (len(value) == 0):
errors.append('Invalid {0} value'.format(property))
elif (type == 'int') and (value < 0):
errors.append('Invalid {0} value'.format(property))
output[property] = value
return output, errors
def update(self, user_id, record_id, zone_id=None, domain=None):
provider = Provider()
zones = provider.dns_zones()
records = provider.dns_records()
zone = zones.get(zone_id, user_id) if zone_id is not None else zones.find(domain, user_id=user_id)
if not zone:
return self.send_not_found_response()
# Get record.
record = records.get(record_id, dns_zone_id=zone.id)
if not record:
return self.send_not_found_response()
data = self.get_json([])
if 'class' in data:
if data['class'] not in records.get_classes():
return self.send_error_response(5005, 'Invalid class', '')
else:
data['class'] = record.cls
if 'type' in data:
if data['type'] not in records.get_types():
return self.send_error_response(5005, 'Invalid type', '')
else:
data['type'] = record.type
if 'ttl' in data:
if isinstance(data['ttl'], str):
if not data['ttl'].isdigit():
return self.send_error_response(5005, 'Invalid TTL', '')
data['ttl'] = int(data['ttl'])
if data['ttl'] < 0:
return self.send_error_response(5005, 'Invalid TTL', '')
else:
data['ttl'] = record.ttl
if 'active' in data:
data['active'] = True if data['active'] else False
else:
data['active'] = record.active
if 'is_conditional' in data:
data['is_conditional'] = True if data['is_conditional'] else False
else:
data['is_conditional'] = record.has_conditional_responses
data['conditional_limit'] = data['conditional_limit'] if 'conditional_limit' in data else record.conditional_limit
data['conditional_count'] = data['conditional_count'] if 'conditional_count' in data else record.conditional_count
data['conditional_reset'] = data['conditional_reset'] if 'conditional_reset' in data else record.conditional_reset
if 'data' in data:
record_type_properties = records.get_record_type_properties(data['type'], clean=True)
data['data'], errors = self.__parse_data_properties(data['data'], record_type_properties)
if len(errors) > 0:
return self.send_error_response(
5005,
'Invalid type property fields',
errors
)
else:
data['data'] = record.data
if ('conditional_data' in data) and (data['is_conditional'] is True):
record_type_properties = records.get_record_type_properties(data['type'], clean=True)
data['conditional_data'], errors = self.__parse_data_properties(data['conditional_data'], record_type_properties)
if len(errors) > 0:
return self.send_error_response(
5005,
'Invalid type property fields',
errors
)
else:
data['conditional_data'] = record.conditional_data
record = records.save(record, zone.id, data['ttl'], data['class'], data['type'], data['data'], data['active'])
record = records.save_conditions(record, enabled=data['is_conditional'], data=data['conditional_data'],
count=data['conditional_count'], limit=data['conditional_limit'],
reset=data['conditional_reset'])
return self.one(user_id, record.id, zone_id=zone.id)
|
qutip/tests/test_stochastic_me.py | quantshah/qutip | 1,205 | 12742005 | import pytest
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip import (smesolve, mesolve, photocurrent_mesolve, liouvillian,
QobjEvo, spre, spost, destroy, coherent, parallel_map,
qeye, fock_dm, general_stochastic, ket2dm, num)
def f(t, args):
return args["a"] * t
@pytest.mark.slow
def test_smesolve_homodyne_methods():
"Stochastic: smesolve: homodyne methods with single jump operator"
def arccoth(x):
return 0.5*np.log((1.+x)/(x-1.))
th = 0.1 # Interaction parameter
alpha = np.cos(th)
beta = np.sin(th)
gamma = 1.
N = 30 # number of Fock states
Id = qeye(N)
a = destroy(N)
s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag())
x = (a + a.dag()) * 2**-0.5
H = Id
c_op = [gamma**0.5 * a]
sc_op = [s]
e_op = [x, x*x]
rho0 = fock_dm(N,0) # initial vacuum state
T = 3. # final time
# number of time steps for which we save the expectation values
N_store = 121
Nsub = 10
tlist = np.linspace(0, T, N_store)
ddt = (tlist[1]-tlist[0])
#### Analytic solution
y0 = 0.5
A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5
B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A)
y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2)
list_methods_tol = [['euler-maruyama', 2e-2],
['pc-euler', 2e-3],
['pc-euler-2', 2e-3],
['platen', 1e-3],
['milstein', 1e-3],
['milstein-imp', 1e-3],
['rouchon', 1e-3],
['taylor1.5', 1e-4],
['taylor1.5-imp', 1e-4],
['explicit1.5', 1e-4],
['taylor2.0', 1e-4]]
for n_method in list_methods_tol:
sol = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub, method='homodyne', solver = n_method[0])
sol2 = smesolve(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0,
nsubsteps=Nsub, method='homodyne', solver = n_method[0],
noise = sol.noise)
sol3 = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub*5, method='homodyne',
solver = n_method[0], tol=1e-8)
err = 1/T * np.sum(np.abs(y_an - \
(sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
err3 = 1/T * np.sum(np.abs(y_an - \
(sol3.expect[1]-sol3.expect[0]*sol3.expect[0].conj())))*ddt
print(n_method[0], ': deviation =', err, ', tol =', n_method[1])
assert_(err < n_method[1])
# 5* more substep should decrease the error
assert_(err3 < err)
# just to check that noise is not affected by smesolve
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.expect[0] == sol2.expect[0]))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=1)
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=0)
sol3 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the same seed, sol3 differ.
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.noise != sol3.noise))
assert_(not np.all(sol.measurement[0] == 0.+0j))
assert_(np.all(sol2.measurement[0] == 0.+0j))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([1,2]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([2,1]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the seed of traj 1 and 2 reversed.
assert_(np.all(sol.noise[0,:,:,:] == sol2.noise[1,:,:,:]))
assert_(np.all(sol.noise[1,:,:,:] == sol2.noise[0,:,:,:]))
def test_smesolve_photocurrent():
"Stochastic: photocurrent_mesolve"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
res = photocurrent_mesolve(H, psi0, times, [], sc_ops, e_ops, args={"a":2},
ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
def test_smesolve_homodyne():
"Stochastic: smesolve: homodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='homodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
@pytest.mark.slow
def test_smesolve_heterodyne():
"Stochastic: smesolve: heterodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a, f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='heterodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops), 2)
for m in res.measurement]))
@pytest.mark.slow
def test_general_stochastic():
"Stochastic: general_stochastic"
"Reproduce smesolve homodyne"
tol = 0.025
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 50
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
L = liouvillian(QobjEvo([[a.dag() * a,f]], args={"a":2}), c_ops = sc_ops)
L.compile()
sc_opsM = [QobjEvo(spre(op) + spost(op.dag())) for op in sc_ops]
[op.compile() for op in sc_opsM]
e_opsM = [spre(op) for op in e_ops]
def d1(t, vec):
return L.mul_vec(t,vec)
def d2(t, vec):
out = []
for op in sc_opsM:
out.append(op.mul_vec(t,vec)-op.expect(t,vec)*vec)
return np.stack(out)
times = np.linspace(0, 0.5, 13)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'platen',
'explicit15']
for solver in list_methods_tol:
res = general_stochastic(ket2dm(psi0),times,d1,d2,len_d2=2, e_ops=e_opsM,
normalize=False, ntraj=ntraj, nsubsteps=nsubsteps,
solver=solver)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
def f_dargs(a, args):
return args["expect_op_3"] - 1
def test_ssesolve_feedback():
"Stochastic: ssesolve: time-dependent H with feedback"
tol = 0.01
N = 4
ntraj = 10
nsubsteps = 100
a = destroy(N)
H = [num(N)]
psi0 = coherent(N, 2.5)
sc_ops = [[a + a.dag(), f_dargs]]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N)]
times = np.linspace(0, 10, 101)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops,
args={"expect_op_3":qeye(N)})
res = smesolve(H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, noise=1,
ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne',
map_func=parallel_map, args={"expect_op_3":qeye(N)})
print(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
if __name__ == "__main__":
run_module_suite()
|
neuralmonkey/runners/xent_runner.py | ufal/neuralmonkey | 446 | 12742010 | from typing import Dict, List, Union
from typeguard import check_argument_types
import tensorflow as tf
import numpy as np
from neuralmonkey.decoders.autoregressive import AutoregressiveDecoder
from neuralmonkey.decoders.sequence_labeler import SequenceLabeler
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
SupportedDecoders = Union[AutoregressiveDecoder, SequenceLabeler]
class XentRunner(BaseRunner[SupportedDecoders]):
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["XentRunner"]):
def collect_results(self, results: List[Dict]) -> None:
xents = np.mean([res["xents"] for res in results], axis=0)
self.set_runner_result(outputs=xents.tolist(),
losses=[float(np.mean(xents))])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SupportedDecoders) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"xents": self.decoder.train_xents}
@property
def loss_names(self) -> List[str]:
return ["xent"]
|
tests/integration/cattletest/core/test_audit_log.py | lifecontrol/cattle | 482 | 12742014 | <reponame>lifecontrol/cattle
from common_fixtures import * # NOQA
from copy import deepcopy
def made_log(object, admin_user_client, context, accountId=None):
t = object.type
if t == 'stack':
t = 'stack'
logs = admin_user_client.list_audit_log(resourceId=object.id,
resourceType=t)
assert len(logs) == 1
assert logs[0].resourceType == t
if str(logs[0].resourceId) != object.id:
assert str(logs[0].resourceId).replace('1s', '1e') == object.id
else:
assert str(logs[0].resourceId) == object.id
if accountId is None:
assert logs[0].accountId == context.project.id
else:
assert logs[0].accountId == accountId
assert logs[0].authenticatedAsAccountId == context.account.id
def test_audit_entry_created(new_context, admin_user_client):
objects = []
new_headers = deepcopy(new_context.user_client._headers)
new_headers['X-API-Project-Id'] = new_context.project.id
made_log(new_context.user_client.create_project(), admin_user_client,
new_context, accountId=new_context.account.id)
new_context.user_client._headers = new_headers
new_context.user_client.reload_schema()
objects.append(new_context.user_client.create_container(
imageUuid=new_context.image_uuid))
objects.append(new_context.user_client.create_container(
imageUuid=new_context.image_uuid))
objects.append(new_context.user_client.create_api_key())
objects.append(new_context.user_client.create_registry(
serverAddress='test.io', name='test'))
objects.append(new_context.user_client.create_api_key())
objects.append(new_context.user_client.create_stack(
name='env-' + random_str()))
for object in objects:
made_log(object, admin_user_client, new_context)
|
smoke-test/test_e2e.py | ShubhamThakre/datahub | 289 | 12742018 | <reponame>ShubhamThakre/datahub
import time
import urllib
import pytest
import requests
from datahub.cli.docker import check_local_docker_containers
from datahub.ingestion.run.pipeline import Pipeline
from tests.utils import ingest_file_via_rest
GMS_ENDPOINT = "http://localhost:8080"
FRONTEND_ENDPOINT = "http://localhost:9002"
KAFKA_BROKER = "localhost:9092"
bootstrap_sample_data = "../metadata-ingestion/examples/mce_files/bootstrap_mce.json"
usage_sample_data = (
"../metadata-ingestion/tests/integration/bigquery-usage/bigquery_usages_golden.json"
)
bq_sample_data = "./sample_bq_data.json"
restli_default_headers = {
"X-RestLi-Protocol-Version": "2.0.0",
}
kafka_post_ingestion_wait_sec = 60
@pytest.fixture(scope="session")
def wait_for_healthchecks():
# Simply assert that everything is healthy, but don't wait.
assert not check_local_docker_containers()
yield
@pytest.mark.dependency()
def test_healthchecks(wait_for_healthchecks):
# Call to wait_for_healthchecks fixture will do the actual functionality.
pass
@pytest.fixture(scope="session")
def frontend_session(wait_for_healthchecks):
session = requests.Session()
headers = {
"Content-Type": "application/json",
}
data = '{"username":"datahub", "password":"<PASSWORD>"}'
response = session.post(f"{FRONTEND_ENDPOINT}/logIn", headers=headers, data=data)
response.raise_for_status()
yield session
@pytest.mark.dependency(depends=["test_healthchecks"])
def test_ingestion_via_rest(wait_for_healthchecks):
ingest_file_via_rest(bootstrap_sample_data)
@pytest.mark.dependency(depends=["test_healthchecks"])
def test_ingestion_usage_via_rest(wait_for_healthchecks):
ingest_file_via_rest(usage_sample_data)
@pytest.mark.dependency(depends=["test_healthchecks"])
def test_ingestion_via_kafka(wait_for_healthchecks):
pipeline = Pipeline.create(
{
"source": {
"type": "file",
"config": {"filename": bq_sample_data},
},
"sink": {
"type": "datahub-kafka",
"config": {
"connection": {
"bootstrap": KAFKA_BROKER,
}
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
# Since Kafka emission is asynchronous, we must wait a little bit so that
# the changes are actually processed.
time.sleep(kafka_post_ingestion_wait_sec)
@pytest.mark.dependency(
depends=[
"test_ingestion_via_rest",
"test_ingestion_via_kafka",
"test_ingestion_usage_via_rest",
]
)
def test_run_ingestion(wait_for_healthchecks):
# Dummy test so that future ones can just depend on this one.
pass
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_get_user():
username = "jdoe"
urn = f"urn:li:corpuser:{username}"
response = requests.get(
f"{GMS_ENDPOINT}/entities/{urllib.parse.quote(urn)}",
headers={
**restli_default_headers,
},
)
response.raise_for_status()
data = response.json()
assert data["value"]
assert data["value"]["com.linkedin.metadata.snapshot.CorpUserSnapshot"]
assert (
data["value"]["com.linkedin.metadata.snapshot.CorpUserSnapshot"]["urn"] == urn
)
@pytest.mark.parametrize(
"platform,dataset_name,env",
[
(
# This one tests the bootstrap sample data.
"urn:li:dataPlatform:kafka",
"SampleKafkaDataset",
"PROD",
),
(
# This one tests BigQuery ingestion.
"urn:li:dataPlatform:bigquery",
"bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times",
"PROD",
),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_get_dataset(platform, dataset_name, env):
platform = "urn:li:dataPlatform:bigquery"
dataset_name = (
"bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times"
)
env = "PROD"
urn = f"urn:li:dataset:({platform},{dataset_name},{env})"
response = requests.get(
f"{GMS_ENDPOINT}/entities/{urllib.parse.quote(urn)}",
headers={
**restli_default_headers,
"X-RestLi-Method": "get",
},
)
response.raise_for_status()
res_data = response.json()
assert res_data["value"]
assert res_data["value"]["com.linkedin.metadata.snapshot.DatasetSnapshot"]
assert (
res_data["value"]["com.linkedin.metadata.snapshot.DatasetSnapshot"]["urn"]
== urn
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_batch_get_v2():
platform = "urn:li:dataPlatform:bigquery"
env = "PROD"
name_1 = "bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times"
name_2 = "bigquery-public-data.covid19_geotab_mobility_impact.ca_border_wait_times"
urn1 = f"urn:li:dataset:({platform},{name_1},{env})"
urn2 = f"urn:li:dataset:({platform},{name_2},{env})"
response = requests.get(
f"{GMS_ENDPOINT}/entitiesV2?ids=List({urllib.parse.quote(urn1)},{urllib.parse.quote(urn2)})&aspects=List(datasetProperties,ownership)",
headers={
**restli_default_headers,
"X-RestLi-Method": "batch_get",
},
)
response.raise_for_status()
res_data = response.json()
# Verify both urns exist and have correct aspects
assert res_data["results"]
assert res_data["results"][urn1]
assert res_data["results"][urn1]["aspects"]["datasetProperties"]
assert res_data["results"][urn1]["aspects"]["ownership"]
assert res_data["results"][urn2]
assert res_data["results"][urn2]["aspects"]["datasetProperties"]
assert (
"ownership" not in res_data["results"][urn2]["aspects"]
) # Aspect does not exist.
@pytest.mark.parametrize(
"query,min_expected_results",
[
("covid", 1),
("sample", 3),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_search_dataset(query, min_expected_results):
json = {"input": f"{query}", "entity": "dataset", "start": 0, "count": 10}
print(json)
response = requests.post(
f"{GMS_ENDPOINT}/entities?action=search",
headers=restli_default_headers,
json=json,
)
response.raise_for_status()
res_data = response.json()
assert res_data["value"]
assert res_data["value"]["numEntities"] >= min_expected_results
assert len(res_data["value"]["entities"]) >= min_expected_results
@pytest.mark.parametrize(
"query,min_expected_results",
[
("covid", 1),
("sample", 3),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_search_across_entities(query, min_expected_results):
json = {"input": f"{query}", "entities": [], "start": 0, "count": 10}
print(json)
response = requests.post(
f"{GMS_ENDPOINT}/entities?action=searchAcrossEntities",
headers=restli_default_headers,
json=json,
)
response.raise_for_status()
res_data = response.json()
assert res_data["value"]
assert res_data["value"]["numEntities"] >= min_expected_results
assert len(res_data["value"]["entities"]) >= min_expected_results
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_gms_usage_fetch():
response = requests.post(
f"{GMS_ENDPOINT}/usageStats?action=queryRange",
headers=restli_default_headers,
json={
"resource": "urn:li:dataset:(urn:li:dataPlatform:bigquery,harshal-playground-306419.test_schema.excess_deaths_derived,PROD)",
"duration": "DAY",
"rangeFromEnd": "ALL",
},
)
response.raise_for_status()
data = response.json()["value"]
assert len(data["buckets"]) == 6
assert data["buckets"][0]["metrics"]["topSqlQueries"]
fields = data["aggregations"].pop("fields")
assert len(fields) == 12
assert fields[0]["count"] == 7
users = data["aggregations"].pop("users")
assert len(users) == 1
assert users[0]["count"] == 7
assert data["aggregations"] == {
# "fields" and "users" already popped out
"totalSqlQueries": 7,
"uniqueUserCount": 1,
}
@pytest.mark.dependency(depends=["test_healthchecks"])
def test_frontend_auth(frontend_session):
pass
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_browse_datasets(frontend_session):
json = {
"query": """query browse($input: BrowseInput!) {\n
browse(input: $input) {\n
start\n
count\n
total\n
groups {
name
}
entities {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}""",
"variables": {"input": {"type": "DATASET", "path": ["prod"]}},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["browse"]
assert len(res_data["data"]["browse"]["entities"]) == 0
assert len(res_data["data"]["browse"]["groups"]) > 0
@pytest.mark.parametrize(
"query,min_expected_results",
[
("covid", 1),
("sample", 3),
("", 1),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_search_datasets(frontend_session, query, min_expected_results):
json = {
"query": """query search($input: SearchInput!) {\n
search(input: $input) {\n
start\n
count\n
total\n
searchResults {\n
entity {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}\n
}""",
"variables": {
"input": {"type": "DATASET", "query": f"{query}", "start": 0, "count": 10}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["search"]
assert res_data["data"]["search"]["total"] >= min_expected_results
assert len(res_data["data"]["search"]["searchResults"]) >= min_expected_results
@pytest.mark.parametrize(
"query,min_expected_results",
[
("covid", 1),
("sample", 3),
("", 1),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_search_across_entities(frontend_session, query, min_expected_results):
json = {
"query": """query searchAcrossEntities($input: SearchAcrossEntitiesInput!) {\n
searchAcrossEntities(input: $input) {\n
start\n
count\n
total\n
searchResults {\n
entity {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}\n
}""",
"variables": {
"input": {"types": [], "query": f"{query}", "start": 0, "count": 10}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["searchAcrossEntities"]
assert res_data["data"]["searchAcrossEntities"]["total"] >= min_expected_results
assert (
len(res_data["data"]["searchAcrossEntities"]["searchResults"])
>= min_expected_results
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_user_info(frontend_session):
urn = "urn:li:corpuser:datahub"
json = {
"query": """query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
username\n
editableInfo {\n
pictureLink\n
}\n
info {\n
firstName\n
fullName\n
title\n
email\n
}\n
}\n
}""",
"variables": {"urn": urn},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpUser"]
assert res_data["data"]["corpUser"]["urn"] == urn
@pytest.mark.parametrize(
"platform,dataset_name,env",
[
(
# This one tests the bootstrap sample data.
"urn:li:dataPlatform:kafka",
"SampleKafkaDataset",
"PROD",
),
(
# This one tests BigQuery ingestion.
"urn:li:dataPlatform:bigquery",
"bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times",
"PROD",
),
],
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_datasets(frontend_session, platform, dataset_name, env):
urn = f"urn:li:dataset:({platform},{dataset_name},{env})"
json = {
"query": """query getDataset($urn: String!) {\n
dataset(urn: $urn) {\n
urn\n
name\n
description\n
platform {\n
urn\n
}\n
schemaMetadata {\n
name\n
version\n
createdAt\n
}\n
}\n
}""",
"variables": {"urn": urn},
}
# Basic dataset info.
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["dataset"]
assert res_data["data"]["dataset"]["urn"] == urn
assert res_data["data"]["dataset"]["name"] == dataset_name
assert res_data["data"]["dataset"]["platform"]["urn"] == platform
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_ingest_with_system_metadata():
response = requests.post(
f"{GMS_ENDPOINT}/entities?action=ingest",
headers=restli_default_headers,
json={
"entity": {
"value": {
"com.linkedin.metadata.snapshot.CorpUserSnapshot": {
"urn": "urn:li:corpuser:datahub",
"aspects": [
{
"com.linkedin.identity.CorpUserInfo": {
"active": True,
"displayName": "Data Hub",
"email": "<EMAIL>",
"title": "CEO",
"fullName": "Data Hub",
}
}
],
}
}
},
"systemMetadata": {
"lastObserved": 1628097379571,
"runId": "af0fe6e4-f547-11eb-81b2-acde48001122",
},
},
)
response.raise_for_status()
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_ingest_with_blank_system_metadata():
response = requests.post(
f"{GMS_ENDPOINT}/entities?action=ingest",
headers=restli_default_headers,
json={
"entity": {
"value": {
"com.linkedin.metadata.snapshot.CorpUserSnapshot": {
"urn": "urn:li:corpuser:datahub",
"aspects": [
{
"com.linkedin.identity.CorpUserInfo": {
"active": True,
"displayName": "Data Hub",
"email": "<EMAIL>",
"title": "CEO",
"fullName": "Data Hub",
}
}
],
}
}
},
"systemMetadata": {},
},
)
response.raise_for_status()
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_ingest_without_system_metadata():
response = requests.post(
f"{GMS_ENDPOINT}/entities?action=ingest",
headers=restli_default_headers,
json={
"entity": {
"value": {
"com.linkedin.metadata.snapshot.CorpUserSnapshot": {
"urn": "urn:li:corpuser:datahub",
"aspects": [
{
"com.linkedin.identity.CorpUserInfo": {
"active": True,
"displayName": "Data Hub",
"email": "<EMAIL>",
"title": "CEO",
"fullName": "Data Hub",
}
}
],
}
}
},
},
)
response.raise_for_status()
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_list_policies(frontend_session):
json = {
"query": """query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
type\n
name\n
description\n
state\n
resources {\n
type\n
allResources\n
resources\n
}\n
privileges\n
actors {\n
users\n
groups\n
allUsers\n
allGroups\n
resourceOwners\n
}\n
editable\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "20",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listPolicies"]
assert res_data["data"]["listPolicies"]["start"] == 0
assert res_data["data"]["listPolicies"]["count"] > 0
assert len(res_data["data"]["listPolicies"]["policies"]) > 0
@pytest.mark.dependency(
depends=["test_healthchecks", "test_run_ingestion", "test_frontend_list_policies"]
)
def test_frontend_update_policy(frontend_session):
json = {
"query": """mutation updatePolicy($urn: String!, $input: PolicyUpdateInput!) {\n
updatePolicy(urn: $urn, input: $input) }""",
"variables": {
"urn": "urn:li:dataHubPolicy:7",
"input": {
"type": "PLATFORM",
"state": "INACTIVE",
"name": "Updated Platform Policy",
"description": "My Metadaata Policy",
"privileges": ["MANAGE_POLICIES"],
"actors": {
"users": ["urn:li:corpuser:datahub"],
"resourceOwners": False,
"allUsers": False,
"allGroups": False,
},
},
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["updatePolicy"]
assert res_data["data"]["updatePolicy"] == "urn:li:dataHubPolicy:7"
@pytest.mark.dependency(
depends=[
"test_healthchecks",
"test_run_ingestion",
"test_frontend_list_policies",
"test_frontend_update_policy",
]
)
def test_frontend_delete_policy(frontend_session):
json = {
"query": """mutation deletePolicy($urn: String!) {\n
deletePolicy(urn: $urn) }""",
"variables": {"urn": "urn:li:dataHubPolicy:7"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
# Now verify the policy has been removed.
json = {
"query": """query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "20",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listPolicies"]
# Verify that the URN is no longer in the list
result = filter(
lambda x: x["urn"] == "urn:li:dataHubPolicy:7",
res_data["data"]["listPolicies"]["policies"],
)
assert len(list(result)) == 0
@pytest.mark.dependency(
depends=[
"test_healthchecks",
"test_run_ingestion",
"test_frontend_list_policies",
"test_frontend_delete_policy",
]
)
def test_frontend_create_policy(frontend_session):
# Policy tests are not idempotent. If you rerun this test it will be wrong.
json = {
"query": """mutation createPolicy($input: PolicyUpdateInput!) {\n
createPolicy(input: $input) }""",
"variables": {
"input": {
"type": "METADATA",
"name": "Test Metadata Policy",
"description": "My Metadaata Policy",
"state": "ACTIVE",
"resources": {"type": "dataset", "allResources": True},
"privileges": ["EDIT_ENTITY_TAGS"],
"actors": {
"users": ["urn:li:corpuser:datahub"],
"resourceOwners": False,
"allUsers": False,
"allGroups": False,
},
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["createPolicy"]
new_urn = res_data["data"]["createPolicy"]
# Sleep for eventual consistency
time.sleep(3)
# Now verify the policy has been added.
json = {
"query": """query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "20",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listPolicies"]
# Verify that the URN appears in the list
result = filter(
lambda x: x["urn"] == new_urn, res_data["data"]["listPolicies"]["policies"]
)
assert len(list(result)) == 1
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_app_config(frontend_session):
json = {
"query": """query appConfig {\n
appConfig {\n
analyticsConfig {\n
enabled\n
}\n
policiesConfig {\n
enabled\n
platformPrivileges {\n
type\n
displayName\n
description\n
}\n
resourcePrivileges {\n
resourceType\n
resourceTypeDisplayName\n
entityType\n
privileges {\n
type\n
displayName\n
description\n
}\n
}\n
}\n
}\n
}"""
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["appConfig"]
assert res_data["data"]["appConfig"]["analyticsConfig"]["enabled"] is True
assert res_data["data"]["appConfig"]["policiesConfig"]["enabled"] is True
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_frontend_me_query(frontend_session):
json = {
"query": """query me {\n
me {\n
corpUser {\n
urn\n
username\n
editableInfo {\n
pictureLink\n
}\n
info {\n
firstName\n
fullName\n
title\n
email\n
}\n
}\n
platformPrivileges {\n
viewAnalytics
managePolicies
manageIdentities
generatePersonalAccessTokens
}\n
}\n
}"""
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["me"]["corpUser"]["urn"] == "urn:li:corpuser:datahub"
assert res_data["data"]["me"]["platformPrivileges"]["viewAnalytics"] is True
assert res_data["data"]["me"]["platformPrivileges"]["managePolicies"] is True
assert res_data["data"]["me"]["platformPrivileges"]["manageIdentities"] is True
assert (
res_data["data"]["me"]["platformPrivileges"]["generatePersonalAccessTokens"]
is True
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_list_users(frontend_session):
json = {
"query": """query listUsers($input: ListUsersInput!) {\n
listUsers(input: $input) {\n
start\n
count\n
total\n
users {\n
urn\n
type\n
username\n
properties {\n
firstName
}\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "2",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listUsers"]
assert res_data["data"]["listUsers"]["start"] == 0
assert res_data["data"]["listUsers"]["count"] == 2
assert (
len(res_data["data"]["listUsers"]["users"]) >= 2
) # Length of default user set.
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_list_groups(frontend_session):
json = {
"query": """query listGroups($input: ListGroupsInput!) {\n
listGroups(input: $input) {\n
start\n
count\n
total\n
groups {\n
urn\n
type\n
name\n
properties {\n
displayName
}\n
}\n
}\n
}""",
"variables": {
"input": {
"start": "0",
"count": "2",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["listGroups"]
assert res_data["data"]["listGroups"]["start"] == 0
assert res_data["data"]["listGroups"]["count"] == 2
assert (
len(res_data["data"]["listGroups"]["groups"]) >= 2
) # Length of default group set.
@pytest.mark.dependency(
depends=["test_healthchecks", "test_run_ingestion", "test_list_groups"]
)
def test_add_remove_members_from_group(frontend_session):
# Assert no group edges for user jdoe
json = {
"query": """query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpuser:jdoe"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpUser"]
assert res_data["data"]["corpUser"]["relationships"]["total"] == 0
# Add jdoe to group
json = {
"query": """mutation addGroupMembers($input: AddGroupMembersInput!) {\n
addGroupMembers(input: $input) }""",
"variables": {
"input": {
"groupUrn": "urn:li:corpGroup:bfoo",
"userUrns": ["urn:li:corpuser:jdoe"],
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
# Sleep for edge store to be updated. Not ideal!
time.sleep(3)
# Verify the member has been added
json = {
"query": """query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpuser:jdoe"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpUser"]
assert res_data["data"]["corpUser"]["relationships"]
assert res_data["data"]["corpUser"]["relationships"]["total"] == 1
# Now remove jdoe from the group
json = {
"query": """mutation removeGroupMembers($input: RemoveGroupMembersInput!) {\n
removeGroupMembers(input: $input) }""",
"variables": {
"input": {
"groupUrn": "urn:li:corpGroup:bfoo",
"userUrns": ["urn:li:corpuser:jdoe"],
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
# Sleep for edge store to be updated. Not ideal!
time.sleep(3)
# Verify the member has been removed
json = {
"query": """query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpuser:jdoe"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpUser"]
assert res_data["data"]["corpUser"]["relationships"]["total"] == 0
@pytest.mark.dependency(
depends=["test_healthchecks", "test_run_ingestion"]
)
def test_update_corp_group_properties(frontend_session):
group_urn = "urn:li:corpGroup:bfoo"
# Update Corp Group Description
json = {
"query": """mutation updateCorpGroupProperties($urn: String!, $input: CorpGroupUpdateInput!) {\n
updateCorpGroupProperties(urn: $urn, input: $input) { urn } }""",
"variables": {
"urn": group_urn,
"input": {
"description": "My test description",
"slack": "test_group_slack",
"email": "<EMAIL>",
},
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
print(res_data)
assert "error" not in res_data
assert res_data["data"]["updateCorpGroupProperties"] is not None
# Verify the description has been updated
json = {
"query": """query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
editableProperties {\n
description\n
slack\n
email\n
}\n
}\n
}""",
"variables": {"urn": group_urn},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert "error" not in res_data
assert res_data["data"]
assert res_data["data"]["corpGroup"]
assert res_data["data"]["corpGroup"]["editableProperties"]
assert res_data["data"]["corpGroup"]["editableProperties"] == {
"description": "My test description",
"slack": "test_group_slack",
"email": "<EMAIL>"
}
# Reset the editable properties
json = {
"query": """mutation updateCorpGroupProperties($urn: String!, $input: UpdateCorpGroupPropertiesInput!) {\n
updateCorpGroupProperties(urn: $urn, input: $input) }""",
"variables": {
"urn": group_urn,
"input": {
"description": "",
"slack": "",
"email": ""
},
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
@pytest.mark.dependency(
depends=["test_healthchecks", "test_run_ingestion", "test_update_corp_group_properties"]
)
def test_update_corp_group_description(frontend_session):
group_urn = "urn:li:corpGroup:bfoo"
# Update Corp Group Description
json = {
"query": """mutation updateDescription($input: DescriptionUpdateInput!) {\n
updateDescription(input: $input) }""",
"variables": {
"input": {
"description": "My test description",
"resourceUrn": group_urn
},
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
print(res_data)
assert "error" not in res_data
assert res_data["data"]["updateDescription"] is True
# Verify the description has been updated
json = {
"query": """query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
editableProperties {\n
description\n
}\n
}\n
}""",
"variables": {"urn": group_urn},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert "error" not in res_data
assert res_data["data"]
assert res_data["data"]["corpGroup"]
assert res_data["data"]["corpGroup"]["editableProperties"]
assert res_data["data"]["corpGroup"]["editableProperties"]["description"] == "My test description"
# Reset Corp Group Description
json = {
"query": """mutation updateDescription($input: DescriptionUpdateInput!) {\n
updateDescription(input: $input) }""",
"variables": {
"input": {
"description": "",
"resourceUrn": group_urn
},
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
@pytest.mark.dependency(
depends=[
"test_healthchecks",
"test_run_ingestion",
"test_list_groups",
"test_add_remove_members_from_group",
]
)
def test_remove_user(frontend_session):
json = {
"query": """mutation removeUser($urn: String!) {\n
removeUser(urn: $urn) }""",
"variables": {"urn": "urn:li:corpuser:jdoe"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
json = {
"query": """query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
properties {\n
firstName\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpuser:jdoe"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert "error" not in res_data
assert res_data["data"]
assert res_data["data"]["corpUser"]
assert res_data["data"]["corpUser"]["properties"] is None
@pytest.mark.dependency(
depends=[
"test_healthchecks",
"test_run_ingestion",
"test_list_groups",
"test_add_remove_members_from_group",
]
)
def test_remove_group(frontend_session):
json = {
"query": """mutation removeGroup($urn: String!) {\n
removeGroup(urn: $urn) }""",
"variables": {"urn": "urn:li:corpGroup:bfoo"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
json = {
"query": """query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
properties {\n
displayName\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpGroup:bfoo"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpGroup"]
assert res_data["data"]["corpGroup"]["properties"] is None
@pytest.mark.dependency(
depends=[
"test_healthchecks",
"test_run_ingestion",
"test_list_groups",
"test_remove_group",
]
)
def test_create_group(frontend_session):
json = {
"query": """mutation createGroup($input: CreateGroupInput!) {\n
createGroup(input: $input) }""",
"variables": {
"input": {
"id": "test-id",
"name": "Test Group",
"description": "My test group",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
json = {
"query": """query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
properties {\n
displayName\n
}\n
}\n
}""",
"variables": {"urn": "urn:li:corpGroup:test-id"},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["corpGroup"]
assert res_data["data"]["corpGroup"]["properties"]["displayName"] == "Test Group"
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_home_page_recommendations(frontend_session):
min_expected_recommendation_modules = 0
json = {
"query": """query listRecommendations($input: ListRecommendationsInput!) {\n
listRecommendations(input: $input) { modules { title } } }""",
"variables": {
"input": {
"userUrn": "urn:li:corpuser:datahub",
"requestContext": {"scenario": "HOME"},
"limit": 5,
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
print(res_data)
assert res_data
assert res_data["data"]
assert res_data["data"]["listRecommendations"]
assert "error" not in res_data
assert (
len(res_data["data"]["listRecommendations"]["modules"])
> min_expected_recommendation_modules
)
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_search_results_recommendations(frontend_session):
# This test simply ensures that the recommendations endpoint does not return an error.
json = {
"query": """query listRecommendations($input: ListRecommendationsInput!) {\n
listRecommendations(input: $input) { modules { title } }""",
"variables": {
"input": {
"userUrn": "urn:li:corpuser:datahub",
"requestContext": {
"scenario": "SEARCH_RESULTS",
"searchRequestContext": {"query": "asdsdsdds", "filters": []},
},
"limit": 5,
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert "error" not in res_data
@pytest.mark.dependency(depends=["test_healthchecks", "test_run_ingestion"])
def test_generate_personal_access_token(frontend_session):
# Test success case
json = {
"query": """query getAccessToken($input: GetAccessTokenInput!) {\n
getAccessToken(input: $input) {\n
accessToken\n
}\n
}""",
"variables": {
"input": {
"type": "PERSONAL",
"actorUrn": "urn:li:corpuser:datahub",
"duration": "ONE_MONTH",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert res_data["data"]
assert res_data["data"]["getAccessToken"]["accessToken"] is not None
assert "error" not in res_data
# Test unauthenticated case
json = {
"query": """query getAccessToken($input: GetAccessTokenInput!) {\n
accessToken\n
}""",
"variables": {
"input": {
"type": "PERSONAL",
"actorUrn": "urn:li:corpuser:jsmith",
"duration": "ONE_DAY",
}
},
}
response = frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql", json=json)
response.raise_for_status()
res_data = response.json()
assert res_data
assert "errors" in res_data # Assert the request fails
|
modules/rtm2org.py | hwiorn/orger | 241 | 12742048 | <filename>modules/rtm2org.py
#!/usr/bin/env python3
from orger import StaticView
from orger.inorganic import node, link
from orger.common import dt_heading
from my.rtm import active_tasks
class RtmView(StaticView):
def get_items(self):
for t in active_tasks():
yield t.uid, node(
dt_heading(t.time, t.title),
tags=t.tags,
body='\n'.join(t.notes),
)
if __name__ == '__main__':
RtmView.main()
|
algorithms/maths/primes_sieve_of_eratosthenes.py | zhengli0817/algorithms | 128 | 12742074 | <reponame>zhengli0817/algorithms
'''
Using sieve of Eratosthenes, primes(x) returns list of all primes less than x
Modification:
We don't need to check all even numbers, we can make the sieve excluding even
numbers and adding 2 to the primes list by default.
We are going to make an array of: x / 2 - 1 if number is even, else x / 2
(The -1 with even number it's to exclude the number itself)
Because we just need numbers [from 3..x if x is odd]
# We can get value represented at index i with (i*2 + 3)
For example, for x = 10, we start with an array of x / 2 - 1 = 4
[1, 1, 1, 1]
3 5 7 9
For x = 11:
[1, 1, 1, 1, 1]
3 5 7 9 11 # 11 is odd, it's included in the list
With this, we have reduced the array size to a half,
and complexity it's also a half now.
'''
def primes(x):
assert(x >= 0)
# If x is even, exclude x from list (-1):
sieve_size = (x//2 - 1) if x % 2 == 0 else (x//2)
sieve = [1 for v in range(sieve_size)] # Sieve
primes = [] # List of Primes
if x >= 2:
primes.append(2) # Add 2 by default
for i in range(sieve_size):
if sieve[i] == 1:
value_at_i = i*2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = 0
return primes
|
codalab/scripts/compute.py | AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public | 333 | 12742078 | <filename>codalab/scripts/compute.py
#!/usr/bin/env python
#
#
import web
urls = ('/api/computation/(.+)', 'computation')
class computation:
def GET(self, id):
print "ID: %s" % id
if __name__ == "__main__":
app = web.application(urls, globals())
app.internalerror = web.debugerror
app.run()
|
ad_examples/timeseries/word2vec_custom.py | matwey/ad_examples | 773 | 12742079 | <filename>ad_examples/timeseries/word2vec_custom.py
import numpy as np
import numpy.random as rnd
import tensorflow as tf
from ..common.utils import Timer, logger
"""
This is a simplified version of TensorFlow word2vec_basic.py
The primary purpose is pedagogical. Instead of calling some tensorflow functions
such as tf.nn.nce_loss, we directly sample uniformly at random for negative samples.
The other reason to use this is for the activity modeling example in activity_word2vec.py
where the 'vocabulary' is limited to the total number of sensors (few) such that a customized
implementation might be more efficient.
"""
class CustomWord2vec(object):
def __init__(self,
sensors=None, sensor2code=None, code2sensor=None,
dims=100, window_size=3, neg_samples=3, n_epochs=1,
learning_rate=0.001, debug=False):
self.sensors = sensors
self.sensor2code = sensor2code
self.code2sensor = code2sensor
self.dims = dims
self.window_size = window_size
self.neg_samples = neg_samples
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self.debug = debug
self.X = self.Y = self.Z = self.W = self.embedding = self.weights = None
self.normalized_embeddings = None
self.similarity = None
self.training_op = None
def fit(self, seq):
tf.set_random_seed(42)
self.X = tf.placeholder(tf.int32, shape=[None]) # input 'word'
self.Y = tf.placeholder(tf.int32, shape=[None]) # predicted 'word'
self.Z = tf.placeholder(tf.float32, shape=[None]) # multiplier {1, -1}
self.W = tf.placeholder(tf.float32, shape=[None]) # weight [0, 1.0]
vocab_size = len(self.sensors)
valid_examples = np.arange(0, vocab_size)
valid_size = len(valid_examples)
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
top_k = 4 # number of nearest neighbors for validation of similarity
init = tf.random_uniform((vocab_size, self.dims),
minval=-1.0, maxval=1.0, dtype=tf.float32)
# the encoding matrix
self.embedding = tf.Variable(init, name="embedding")
norm = tf.sqrt(tf.reduce_sum(tf.square(self.embedding), 1, keepdims=True))
self.normalized_embeddings = self.embedding / norm
self.valid_embeddings = tf.nn.embedding_lookup(self.normalized_embeddings,
valid_dataset)
self.similarity = tf.matmul(
self.valid_embeddings, self.normalized_embeddings, transpose_b=True)
w_i = tf.nn.embedding_lookup(self.embedding, self.X)
# the 'output' matrix, or the coefficients of logistic regression
# for each class (words). This will be ignored once the embeddings
# have been computed
self.weights = tf.Variable(init, name="weights") # weights
self.b = tf.Variable(tf.zeros(vocab_size), name="b", dtype=tf.float32) # biases
w_o = tf.nn.embedding_lookup(self.weights, self.Y)
w_b = tf.nn.embedding_lookup(self.b, self.Y)
with tf.name_scope("loss"):
"""
Refer to Equation 4 in:
Distributed Representations of Words and Phrases and their Compositionality,
by Mikolov et. al., 2014
loss = log(sigmoid(W_i.W_pos)) + E[log(sigmoid(-W_i.W_neg))]
Note: The second term above (E[.]) is an 'expectation'.
To compute the expectation, we multiply by the self.W.
To distinguish between pos/neg examples, we multiply by self.Z
"""
sim = tf.reduce_sum(tf.multiply(w_i, w_o), axis=1) + w_b
sim_sigmoids = tf.log(tf.nn.sigmoid(tf.multiply(sim, self.Z)))
log_lik_loss = -tf.reduce_mean(tf.multiply(sim_sigmoids, self.W))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.training_op = optimizer.minimize(log_lik_loss)
init = tf.global_variables_initializer()
self.session = tf.Session()
self.session.run(init)
timer = Timer()
i = 0
for epoch in range(self.n_epochs):
for x, y, z, w in self.get_batches_skip_gram(seq, window_size=self.window_size,
neg_samples=self.neg_samples):
# logger.debug(np.hstack([y, x, z, w]))
sim_v, log_lik_loss_v, _ = self.session.run([sim, log_lik_loss, self.training_op],
feed_dict={self.X: x, self.Y: y, self.Z: z, self.W: w})
if self.debug and (i == 0 or (i + 1) % 5000 == 0):
# the original word2vec code for logging the most similar
# words for a particular word
logger.debug("i: %d, log_lik_loss_v: %f" % (i, log_lik_loss_v))
sim_valid = self.session.run(self.similarity)
for j in range(valid_size):
valid_word = self.code2sensor[valid_examples[j]]
nearest = (-sim_valid[j, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = self.code2sensor[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
logger.debug(log_str)
if (i + 1) % 5000 == 0:
# logger.debug("sim_v: %s\n%s" % (str(sim_v.shape), str(sim_v)))
logger.debug("processed %d" % (i + 1))
# break # early terminate for DEBUG only
i += 1
logger.debug(timer.message("Completed epoch %d in" % epoch))
def get_embeddings(self, normalized=True):
return self.session.run(self.normalized_embeddings) if normalized \
else self.session.run(self.embedding)
def get_batches_skip_gram(self, seq, window_size=3, skip_size=1, n_contexts=10, neg_samples=3):
""" Skip-gram model for word2vec
The max #samples per batch will be:
n_contexts x ((window_size - 1) + neg_samples)
:param window_size: int
length of each context window. Must be > 1 and must be an odd number.
:param skip_size: int
:param n_contexts: int
Number of context windows per batch.
:param neg_samples: int
Number of negative samples per window
:return:
"""
if window_size <= 1 or window_size % 2 == 0:
raise ValueError("window_size must be greater than 1 and must be odd")
n = len(seq)
s = window_size // 2
all_sensors = set(self.code2sensor.keys())
st = 0
sz = (window_size - 1) + neg_samples # number of samples per context window
batch_size = n_contexts * sz
x = y = z = w = None
for i in range(s, n - s, skip_size):
if i + skip_size >= n:
logger.debug("i: %d, n: %d, s: %d, sz: %d" % (i, n, s, sz))
if st == 0:
x = np.zeros(batch_size, dtype=np.int32)
y = np.zeros(batch_size, dtype=np.int32)
z = np.zeros(batch_size, dtype=np.float32)
w = np.zeros(batch_size, dtype=np.float32)
w_in = seq[i]
# w_in will be same for both positive and negative samples
x[st:(st + sz)] = w_in
z[st:(st + 2 * s)] = 1
z[(st + 2 * s):(st + sz)] = -1
w[st:(st + 2 * s)] = 1 # weights for positive samples
w[(st + 2 * s):(st + sz)] = 1. / neg_samples # weights for negative samples
# first, populate the positive examples
y[st:(st + s)] = seq[(i - s):i]
y[(st + s):(st + 2 * s)] = seq[(i + 1):(i + s + 1)]
# Now, sample a few negative examples...
# sample a few sensor ids uniformly at random from those
# which do not occur in the current context
curr = set(seq[(i - s):(i + s)]) # sensors in current context window
non_context = list(all_sensors - curr) # sensors *not* in current context window
np.random.shuffle(non_context) # random subsample
y[(st + 2 * s):(st + sz)] = non_context[0:neg_samples]
st += sz
if st >= batch_size:
yield x, y, z, w
st = 0
|
removestar/tests/test_removestar.py | asmeurer/removestar | 102 | 12742089 | from pyflakes.checker import Checker
import sys
import ast
import os
from pathlib import Path
from filecmp import dircmp
import subprocess
from pytest import raises
import pytest
from ..removestar import (names_to_replace, star_imports, get_names,
get_names_from_dir, get_names_dynamically, fix_code,
get_mod_filename, replace_imports,
is_noqa_comment_allowing_star_import,
ExternalModuleError)
code_mod1 = """\
a = 1
aa = 2
b = 3
"""
mod1_names = {'a', 'aa', 'b'}
code_mod2 = """\
b = 1
c = 2
cc = 3
"""
mod2_names = {'b', 'c', 'cc'}
code_mod3 = """\
name = 0
"""
mod3_names = {'name'}
code_mod4 = """\
from .mod1 import *
from .mod2 import *
from .mod3 import name
def func():
return a + b + c + d + d + name
"""
mod4_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod4_fixed = """\
from .mod1 import a
from .mod2 import b, c
from .mod3 import name
def func():
return a + b + c + d + d + name
"""
code_mod5 = """\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""
mod5_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod5_fixed = """\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""
code_mod6 = """\
from os.path import *
isfile(join('a', 'b'))
"""
code_mod6_fixed = """\
from os.path import isfile, join
isfile(join('a', 'b'))
"""
code_mod7 = """\
from .mod6 import *
"""
code_mod7_fixed = ""
mod7_names = {'isfile', 'join'}
code_mod8 = """\
a = 1
b = 2
c = 3
__all__ = ['a']
__all__ += ['b']
"""
mod8_names = {'a', 'b'}
code_mod9 = """\
from .mod8 import *
def func():
return a + b
"""
code_mod9_fixed = """\
from .mod8 import a, b
def func():
return a + b
"""
mod9_names = {'a', 'b', 'func'}
code_submod1 = """\
from ..mod1 import *
from ..mod2 import *
from ..mod3 import name
from .submod3 import *
def func():
return a + b + c + d + d + e + name
"""
submod1_names = {'a', 'aa', 'b', 'c', 'cc', 'e', 'name', 'func'}
code_submod1_fixed = """\
from ..mod1 import a
from ..mod2 import b, c
from ..mod3 import name
from .submod3 import e
def func():
return a + b + c + d + d + e + name
"""
code_submod2 = """\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
from module.submod.submod3 import *
def func():
return a + b + c + d + d + e + name
"""
submod2_names = {'a', 'aa', 'b', 'c', 'cc', 'e', 'name', 'func'}
code_submod2_fixed = """\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
from module.submod.submod3 import e
def func():
return a + b + c + d + d + e + name
"""
code_submod3 = """\
e = 1
"""
submod3_names = {'e'}
code_submod4 = """\
from . import *
func()
"""
submod4_names = {'func'}
code_submod4_fixed = """\
from . import func
func()
"""
code_submod_init = """\
from .submod1 import func
"""
submod_names = {'func'}
# An actual import adds submod1 and submod3 to the submod namespace, since
# they are imported submodule names. The static code does not yet support
# these. If any other imports happen first, like 'import submod.submod2',
# those would be included as well.
submod_dynamic_names = {'submod1', 'submod3', 'func'}
code_bad_syntax = """\
from mod
"""
code_mod_unfixable = """\
from .mod1 import *;
from .mod2 import\t*
def func():
return a + c
"""
mod_unfixable_names = {'a', 'aa', 'b', 'c', 'cc', 'func'}
code_mod_commented_unused_star = """\
from .mod1 import * # comment about mod1
from .mod2 import * # noqa
"""
mod_commented_unused_star_names = {'a', 'aa', 'b', 'c', 'cc'}
code_mod_commented_unused_star_fixed = """\
# comment about mod1
from .mod2 import * # noqa
"""
code_mod_commented_star = """\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import * # generic comment
def func():
return a + c + name
"""
mod_commented_star_names = {'a', 'aa', 'b', 'c', 'cc', 'name', 'func'}
code_mod_commented_star_fixed = """\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import name # generic comment
def func():
return a + c + name
"""
code_submod_recursive_init = """\
from .submod1 import *
"""
submod_recursive_names = {'a', 'b'}
submod_recursive_dynamic_names = {'submod1', 'a', 'b'}
code_submod_recursive_submod1 = """\
a = 1
b = 2
"""
submod_recursive_submod1_names = {'a', 'b'}
code_submod_recursive_submod2 = """\
from . import *
def func():
return a + 1
"""
submod_recursive_submod2_names = {'a', 'b', 'func'}
submod_recursive_submod2_dynamic_names = {'a', 'b', 'func', 'submod1'}
code_submod_recursive_submod2_fixed = """\
from . import a
def func():
return a + 1
"""
def create_module(module):
os.makedirs(module)
with open(module/'mod1.py', 'w') as f:
f.write(code_mod1)
with open(module/'mod2.py', 'w') as f:
f.write(code_mod2)
with open(module/'mod3.py', 'w') as f:
f.write(code_mod3)
with open(module/'mod4.py', 'w') as f:
f.write(code_mod4)
with open(module/'mod5.py', 'w') as f:
f.write(code_mod5)
with open(module/'mod6.py', 'w') as f:
f.write(code_mod6)
with open(module/'mod7.py', 'w') as f:
f.write(code_mod7)
with open(module/'mod8.py', 'w') as f:
f.write(code_mod8)
with open(module/'mod9.py', 'w') as f:
f.write(code_mod9)
with open(module/'__init__.py', 'w') as f:
pass
with open(module/'mod_bad.py', 'w') as f:
f.write(code_bad_syntax)
with open(module/'mod_unfixable.py', 'w') as f:
f.write(code_mod_unfixable)
with open(module/'mod_commented_unused_star.py', 'w') as f:
f.write(code_mod_commented_unused_star)
with open(module/'mod_commented_star.py', 'w') as f:
f.write(code_mod_commented_star)
submod = module/'submod'
os.makedirs(submod)
with open(submod/'__init__.py', 'w') as f:
f.write(code_submod_init)
with open(submod/'submod1.py', 'w') as f:
f.write(code_submod1)
with open(submod/'submod2.py', 'w') as f:
f.write(code_submod2)
with open(submod/'submod3.py', 'w') as f:
f.write(code_submod3)
with open(submod/'submod4.py', 'w') as f:
f.write(code_submod4)
submod_recursive = module/'submod_recursive'
os.makedirs(submod_recursive)
with open(submod_recursive/'__init__.py', 'w') as f:
f.write(code_submod_recursive_init)
with open(submod_recursive/'submod1.py', 'w') as f:
f.write(code_submod_recursive_submod1)
with open(submod_recursive/'submod2.py', 'w') as f:
f.write(code_submod_recursive_submod2)
def test_names_to_replace():
for code in [code_mod1, code_mod2, code_mod3, code_mod7, code_mod8,
code_submod3, code_submod_init, code_submod_recursive_init,
code_submod_recursive_submod1]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == set()
for code in [code_mod4, code_mod5]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == {'a', 'b', 'c', 'd'}
for code in [code_submod1, code_submod2]:
names = names_to_replace(Checker(ast.parse(code)))
assert names == {'a', 'b', 'c', 'd', 'e'}
names = names_to_replace(Checker(ast.parse(code_submod4)))
assert names == {'func'}
names = names_to_replace(Checker(ast.parse(code_mod6)))
assert names == {'isfile', 'join'}
names = names_to_replace(Checker(ast.parse(code_submod_recursive_submod2)))
assert names == {'a'}
names = names_to_replace(Checker(ast.parse(code_mod9)))
assert names == {'a', 'b'}
names = names_to_replace(Checker(ast.parse(code_mod_unfixable)))
assert names == {'a', 'c'}
names = names_to_replace(Checker(ast.parse(code_mod_commented_unused_star)))
assert names == set()
names = names_to_replace(Checker(ast.parse(code_mod_commented_star)))
assert names == {'a', 'c', 'name'}
def test_star_imports():
for code in [code_mod1, code_mod2, code_mod3, code_mod8, code_submod3,
code_submod_init, code_submod_recursive_submod1]:
stars = star_imports(Checker(ast.parse(code)))
assert stars == []
stars = star_imports(Checker(ast.parse(code_mod4)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod5)))
assert stars == ['module.mod1', 'module.mod2']
stars = star_imports(Checker(ast.parse(code_mod6)))
assert stars == ['os.path']
stars = star_imports(Checker(ast.parse(code_mod7)))
assert stars == ['.mod6']
stars = star_imports(Checker(ast.parse(code_mod9)))
assert stars == ['.mod8']
stars = star_imports(Checker(ast.parse(code_submod1)))
assert stars == ['..mod1', '..mod2', '.submod3']
stars = star_imports(Checker(ast.parse(code_submod2)))
assert stars == ['module.mod1', 'module.mod2', 'module.submod.submod3']
for code in [code_submod4, code_submod_recursive_submod2]:
stars = star_imports(Checker(ast.parse(code)))
assert stars == ['.']
stars = star_imports(Checker(ast.parse(code_submod_recursive_init)))
assert stars == ['.submod1']
stars = star_imports(Checker(ast.parse(code_mod_unfixable)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod_commented_unused_star)))
assert stars == ['.mod1', '.mod2']
stars = star_imports(Checker(ast.parse(code_mod_commented_star)))
assert stars == ['.mod1', '.mod2', '.mod3']
def test_get_names():
names = get_names(code_mod1)
assert names == {'a', 'aa', 'b'}
names = get_names(code_mod2)
assert names == {'b', 'c', 'cc'}
names = get_names(code_mod3)
assert names == {'name'}
names = get_names(code_mod4)
# TODO: Remove the imported name 'name'
assert names == {'.mod1.*', '.mod2.*', 'name', 'func'}
names = get_names(code_mod5)
# TODO: Remove the imported name 'name'
assert names == {'module.mod1.*', 'module.mod2.*', 'name', 'func'}
names = get_names(code_mod6)
assert names == {'os.path.*'}
names = get_names(code_submod_init)
assert names == {'func'}
names = get_names(code_submod1)
# TODO: Remove the imported name 'name'
assert names == {'..mod1.*', '..mod2.*', '.submod3.*', 'name', 'func'}
names = get_names(code_submod2)
# TODO: Remove the imported name 'name'
assert names == {'module.mod1.*', 'module.mod2.*',
'module.submod.submod3.*', 'name', 'func'}
names = get_names(code_submod3)
assert names == {'e'}
names = get_names(code_submod4)
assert names == {'..*'}
raises(SyntaxError, lambda: get_names(code_bad_syntax))
names = get_names(code_mod_unfixable)
assert names == {'.mod1.*', '.mod2.*', 'func'}
names = get_names(code_mod_commented_unused_star)
assert names == {'.mod1.*', '.mod2.*'}
names = get_names(code_mod_commented_star)
assert names == {'.mod1.*', '.mod2.*', '.mod3.*', 'func'}
names = get_names(code_submod_recursive_init)
assert names == {'.submod1.*'}
names = get_names(code_submod_recursive_submod1)
assert names == {'a', 'b'}
names = get_names(code_submod_recursive_submod2)
assert names == {'..*', 'func'}
@pytest.mark.parametrize('relative', [True, False])
def test_get_names_from_dir(tmpdir, relative):
directory = tmpdir/'module'
create_module(directory)
if relative:
chdir = tmpdir
directory = Path('module')
else:
chdir = '.'
curdir = os.path.abspath('.')
try:
os.chdir(chdir)
assert get_names_from_dir('.mod1', directory) == mod1_names
assert get_names_from_dir('.mod2', directory) == mod2_names
assert get_names_from_dir('.mod3', directory) == mod3_names
assert get_names_from_dir('.mod4', directory) == mod4_names
assert get_names_from_dir('.mod5', directory) == mod5_names
assert get_names_from_dir('.mod6', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('.mod6', directory, allow_dynamic=False))
assert get_names_from_dir('.mod7', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('.mod7', directory, allow_dynamic=False))
assert get_names_from_dir('.mod8', directory) == mod8_names
assert get_names_from_dir('.mod9', directory) == mod9_names
assert get_names_from_dir('.mod_unfixable', directory) == mod_unfixable_names
assert get_names_from_dir('.mod_commented_unused_star', directory) == mod_commented_unused_star_names
assert get_names_from_dir('.mod_commented_star', directory) == mod_commented_star_names
assert get_names_from_dir('.submod', directory) == submod_names
assert get_names_from_dir('.submod.submod1', directory) == submod1_names
assert get_names_from_dir('.submod.submod2', directory) == submod2_names
assert get_names_from_dir('.submod.submod3', directory) == submod3_names
assert get_names_from_dir('.submod.submod4', directory) == submod4_names
assert get_names_from_dir('.submod_recursive', directory) == submod_recursive_names
assert get_names_from_dir('.submod_recursive.submod1', directory) == submod_recursive_submod1_names
assert get_names_from_dir('.submod_recursive.submod2', directory) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', directory) == mod1_names
assert get_names_from_dir('module.mod2', directory) == mod2_names
assert get_names_from_dir('module.mod3', directory) == mod3_names
assert get_names_from_dir('module.mod4', directory) == mod4_names
assert get_names_from_dir('module.mod5', directory) == mod5_names
assert get_names_from_dir('module.mod6', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', directory, allow_dynamic=False))
assert get_names_from_dir('module.mod7', directory) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', directory, allow_dynamic=False))
assert get_names_from_dir('module.mod8', directory) == mod8_names
assert get_names_from_dir('module.mod9', directory) == mod9_names
assert get_names_from_dir('module.mod_unfixable', directory) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', directory) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', directory) == mod_commented_star_names
assert get_names_from_dir('module.submod', directory) == submod_names
assert get_names_from_dir('module.submod.submod1', directory) == submod1_names
assert get_names_from_dir('module.submod.submod2', directory) == submod2_names
assert get_names_from_dir('module.submod.submod3', directory) == submod3_names
assert get_names_from_dir('module.submod.submod4', directory) == submod4_names
assert get_names_from_dir('module.submod_recursive', directory) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', directory) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', directory) == submod_recursive_submod2_names
submod = directory/'submod'
assert get_names_from_dir('..submod', submod) == submod_names
assert get_names_from_dir('.', submod) == submod_names
assert get_names_from_dir('.submod1', submod) == submod1_names
assert get_names_from_dir('.submod2', submod) == submod2_names
assert get_names_from_dir('.submod3', submod) == submod3_names
assert get_names_from_dir('.submod4', submod) == submod4_names
assert get_names_from_dir('..mod1', submod) == mod1_names
assert get_names_from_dir('..mod2', submod) == mod2_names
assert get_names_from_dir('..mod3', submod) == mod3_names
assert get_names_from_dir('..mod4', submod) == mod4_names
assert get_names_from_dir('..mod5', submod) == mod5_names
assert get_names_from_dir('..mod6', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod6', submod, allow_dynamic=False))
assert get_names_from_dir('..mod7', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod7', submod, allow_dynamic=False))
assert get_names_from_dir('..mod8', submod) == mod8_names
assert get_names_from_dir('..mod9', submod) == mod9_names
assert get_names_from_dir('..mod_unfixable', submod) == mod_unfixable_names
assert get_names_from_dir('..mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('..mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('..submod_recursive', submod) == submod_recursive_names
assert get_names_from_dir('..submod_recursive.submod1', submod) == submod_recursive_submod1_names
assert get_names_from_dir('..submod_recursive.submod2', submod) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', submod) == mod1_names
assert get_names_from_dir('module.mod2', submod) == mod2_names
assert get_names_from_dir('module.mod3', submod) == mod3_names
assert get_names_from_dir('module.mod4', submod) == mod4_names
assert get_names_from_dir('module.mod5', submod) == mod5_names
assert get_names_from_dir('module.mod6', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod7', submod) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod8', submod) == mod8_names
assert get_names_from_dir('module.mod9', submod) == mod9_names
assert get_names_from_dir('module.mod_unfixable', submod) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('module.submod', submod) == submod_names
assert get_names_from_dir('module.submod.submod1', submod) == submod1_names
assert get_names_from_dir('module.submod.submod2', submod) == submod2_names
assert get_names_from_dir('module.submod.submod3', submod) == submod3_names
assert get_names_from_dir('module.submod.submod4', submod) == submod4_names
assert get_names_from_dir('module.submod_recursive', submod) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', submod) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', submod) == submod_recursive_submod2_names
submod_recursive = directory/'submod_recursive'
assert get_names_from_dir('..submod', submod_recursive) == submod_names
assert get_names_from_dir('..submod.submod1', submod_recursive) == submod1_names
assert get_names_from_dir('..submod.submod2', submod_recursive) == submod2_names
assert get_names_from_dir('..submod.submod3', submod_recursive) == submod3_names
assert get_names_from_dir('..submod.submod4', submod_recursive) == submod4_names
assert get_names_from_dir('..mod1', submod_recursive) == mod1_names
assert get_names_from_dir('..mod2', submod_recursive) == mod2_names
assert get_names_from_dir('..mod3', submod_recursive) == mod3_names
assert get_names_from_dir('..mod4', submod_recursive) == mod4_names
assert get_names_from_dir('..mod5', submod_recursive) == mod5_names
assert get_names_from_dir('..mod6', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod6', submod_recursive, allow_dynamic=False))
assert get_names_from_dir('..mod7', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('..mod7', submod_recursive, allow_dynamic=False))
assert get_names_from_dir('..mod8', submod_recursive) == mod8_names
assert get_names_from_dir('..mod9', submod_recursive) == mod9_names
assert get_names_from_dir('..mod_unfixable', submod_recursive) == mod_unfixable_names
assert get_names_from_dir('..mod_commented_unused_star', submod_recursive) == mod_commented_unused_star_names
assert get_names_from_dir('..mod_commented_star', submod_recursive) == mod_commented_star_names
assert get_names_from_dir('.', submod_recursive) == submod_recursive_names
assert get_names_from_dir('..submod_recursive', submod_recursive) == submod_recursive_names
assert get_names_from_dir('.submod1', submod_recursive) == submod_recursive_submod1_names
assert get_names_from_dir('.submod2', submod_recursive) == submod_recursive_submod2_names
assert get_names_from_dir('module.mod1', submod_recursive) == mod1_names
assert get_names_from_dir('module.mod2', submod_recursive) == mod2_names
assert get_names_from_dir('module.mod3', submod_recursive) == mod3_names
assert get_names_from_dir('module.mod4', submod_recursive) == mod4_names
assert get_names_from_dir('module.mod5', submod_recursive) == mod5_names
assert get_names_from_dir('module.mod6', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod6', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod7', submod_recursive) == get_names_dynamically('os.path')
raises(NotImplementedError, lambda: get_names_from_dir('module.mod7', submod, allow_dynamic=False))
assert get_names_from_dir('module.mod8', submod_recursive) == mod8_names
assert get_names_from_dir('module.mod9', submod_recursive) == mod9_names
assert get_names_from_dir('module.mod_unfixable', submod_recursive) == mod_unfixable_names
assert get_names_from_dir('module.mod_commented_unused_star', submod) == mod_commented_unused_star_names
assert get_names_from_dir('module.mod_commented_star', submod) == mod_commented_star_names
assert get_names_from_dir('module.submod', submod_recursive) == submod_names
assert get_names_from_dir('module.submod.submod1', submod_recursive) == submod1_names
assert get_names_from_dir('module.submod.submod2', submod_recursive) == submod2_names
assert get_names_from_dir('module.submod.submod3', submod_recursive) == submod3_names
assert get_names_from_dir('module.submod.submod4', submod_recursive) == submod4_names
assert get_names_from_dir('module.submod_recursive', submod_recursive) == submod_recursive_names
assert get_names_from_dir('module.submod_recursive.submod1', submod_recursive) == submod_recursive_submod1_names
assert get_names_from_dir('module.submod_recursive.submod2', submod_recursive) == submod_recursive_submod2_names
raises(ExternalModuleError, lambda: get_names_from_dir('os.path', directory))
raises(ExternalModuleError, lambda: get_names_from_dir('os.path', submod))
raises(RuntimeError, lambda: get_names_from_dir('.mod_bad', directory))
raises(RuntimeError, lambda: get_names_from_dir('module.mod_bad', directory))
raises(RuntimeError, lambda: get_names_from_dir('.mod_doesnt_exist', directory))
raises(RuntimeError, lambda: get_names_from_dir('module.mod_doesnt_exist', directory))
finally:
os.chdir(curdir)
def test_get_names_dynamically(tmpdir):
os_path = get_names_dynamically('os.path')
assert 'isfile' in os_path
assert 'join' in os_path
directory = tmpdir/'module'
create_module(directory)
sys_path = sys.path
try:
sys.path.insert(0, str(tmpdir))
assert get_names_dynamically('module.mod1') == mod1_names
assert get_names_dynamically('module.mod2') == mod2_names
assert get_names_dynamically('module.mod3') == mod3_names
assert get_names_dynamically('module.mod4') == mod4_names
assert get_names_dynamically('module.mod5') == mod5_names
assert get_names_dynamically('module.mod6') == os_path
assert get_names_dynamically('module.mod7') == os_path
assert get_names_dynamically('module.mod8') == mod8_names
assert get_names_dynamically('module.mod9') == mod9_names
assert get_names_dynamically('module.mod_unfixable') == mod_unfixable_names
assert get_names_dynamically('module.mod_commented_unused_star') == mod_commented_unused_star_names
assert get_names_dynamically('module.mod_commented_star') == mod_commented_star_names
assert get_names_dynamically('module.submod') == submod_dynamic_names
assert get_names_dynamically('module.submod.submod1') == submod1_names
assert get_names_dynamically('module.submod.submod2') == submod2_names
assert get_names_dynamically('module.submod.submod3') == submod3_names
raises(RuntimeError, lambda: get_names_dynamically('module.submod.submod4'))
assert get_names_dynamically('module.submod_recursive') == submod_recursive_dynamic_names
assert get_names_dynamically('module.submod_recursive.submod1') == submod_recursive_submod1_names
assert get_names_dynamically('module.submod_recursive.submod2') == submod_recursive_submod2_dynamic_names
# Doesn't actually import because of the undefined name 'd'
# assert get_names_dynamically('module.submod.submod4') == submod4_names
finally:
sys.path = sys_path
raises(RuntimeError, lambda: get_names_dynamically('notarealmodule'))
def test_fix_code(tmpdir, capsys):
# TODO: Test the verbose and quiet flags
directory = tmpdir/'module'
create_module(directory)
assert fix_code(code_mod1, file=directory/'mod1.py') == code_mod1
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod2, file=directory/'mod2.py') == code_mod2
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod3, file=directory/'mod3.py') == code_mod3
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod4, file=directory/'mod4.py') == code_mod4_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(directory/'mod4.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'.mod1'" in err
assert "'.mod2'" in err
assert "Using '.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_mod5, file=directory/'mod5.py') == code_mod5_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(directory/'mod5.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'module.mod1'" in err
assert "'module.mod2'" in err
assert "Using 'module.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_mod6, file=directory/'mod6.py') == code_mod6_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert raises(NotImplementedError, lambda: fix_code(code_mod6, file=directory/'mod6.py', allow_dynamic=False))
assert fix_code(code_mod7, file=directory/'mod7.py') == code_mod7_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert raises(NotImplementedError, lambda: fix_code(code_mod7, file=directory/'mod7.py', allow_dynamic=False))
assert fix_code(code_mod8, file=directory/'mod8.py') == code_mod8
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod9, file=directory/'mod9.py') == code_mod9_fixed
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_mod_unfixable, file=directory/'mod_unfixable.py') == code_mod_unfixable
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert 'Could not find the star imports for' in err
for mod in ["'.mod1'", "'.mod2'"]:
assert mod in err
assert fix_code(code_mod_commented_unused_star, file=directory/'mod_commented_unused_star.py') == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert ("The removed star import statement for '.mod1' had an inline "
"comment which may not make sense without the import") in err
assert fix_code(code_mod_commented_star, file=directory/'mod_commented_star.py') == code_mod_commented_star_fixed
out, err = capsys.readouterr()
assert not out
assert not err
submod = directory/'submod'
assert fix_code(code_submod_init, file=submod/'__init__.py') == code_submod_init
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod1, file=submod/'submod1.py') == code_submod1_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(submod/'submod1.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'..mod1'" in err
assert "'..mod2'" in err
assert "'.mod1'" not in err
assert "'.mod2'" not in err
assert "Using '..mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_submod2, file=submod/'submod2.py') == code_submod2_fixed
out, err = capsys.readouterr()
assert not out
assert 'Warning' in err
assert str(submod/'submod2.py') in err
assert "'b'" in err
assert "'a'" not in err
assert "'module.mod1'" in err
assert "'module.mod2'" in err
assert "'module.submod.submod3'" not in err
assert "'module.submod.mod1'" not in err
assert "'module.submod.mod2'" not in err
assert "Using 'module.mod2'" in err
assert "could not find import for 'd'" in err
assert fix_code(code_submod3, file=submod/'submod3.py') == code_submod3
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod4, file=submod/'submod4.py') == code_submod4_fixed
out, err = capsys.readouterr()
assert not out
assert not err
submod_recursive = directory/'submod_recursive'
# TODO: It's not actually useful to test this
assert fix_code(code_submod_recursive_init, file=submod_recursive/'__init__.py') == ""
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod_recursive_submod1, file=submod_recursive/'submod1.py') == code_submod_recursive_submod1
out, err = capsys.readouterr()
assert not out
assert not err
assert fix_code(code_submod_recursive_submod2, file=submod_recursive/'submod2.py') == code_submod_recursive_submod2_fixed
out, err = capsys.readouterr()
assert not out
assert not err
raises(RuntimeError, lambda: fix_code(code_bad_syntax, file=directory/'mod_bad.py'))
out, err = capsys.readouterr()
assert not out
assert not err
def touch(f):
with open(f, 'w'):
pass
@pytest.mark.parametrize('relative', [True, False])
def test_get_mod_filename(tmpdir, relative):
if relative:
chdir = tmpdir
tmpdir = Path('.')
else:
chdir = '.'
curdir = os.path.abspath('.')
try:
os.chdir(chdir)
module = tmpdir/'module'
os.makedirs(module)
touch(module/'__init__.py')
touch(module/'mod1.py')
submod = module/'submod'
os.makedirs(submod)
touch(submod/'__init__.py')
touch(submod/'mod1.py')
subsubmod = submod/'submod'
os.makedirs(subsubmod)
touch(subsubmod/'__init__.py')
touch(subsubmod/'mod1.py')
def _test(mod, directory, expected):
result = os.path.abspath(get_mod_filename(mod, directory))
assert result == os.path.abspath(expected)
_test('.', module, module/'__init__.py')
_test('.mod1', module, module/'mod1.py')
_test('.submod', module, submod/'__init__.py')
_test('.submod.mod1', module, submod/'mod1.py')
_test('.submod.submod', module, subsubmod/'__init__.py')
_test('.submod.submod.mod1', module, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', module))
_test('module', module, module/'__init__.py')
_test('module.mod1', module, module/'mod1.py')
_test('module.submod', module, submod/'__init__.py')
_test('module.submod.mod1', module, submod/'mod1.py')
_test('module.submod.submod', module, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', module, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', module))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', module))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', module))
_test('..', submod, module/'__init__.py')
_test('..mod1', submod, module/'mod1.py')
_test('.', submod, submod/'__init__.py')
_test('.mod1', submod, submod/'mod1.py')
_test('..submod', submod, submod/'__init__.py')
_test('..submod.mod1', submod, submod/'mod1.py')
_test('.submod', submod, subsubmod/'__init__.py')
_test('.submod.mod1', submod, subsubmod/'mod1.py')
_test('..submod.submod', submod, subsubmod/'__init__.py')
_test('..submod.submod.mod1', submod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', submod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', submod))
_test('module', submod, module/'__init__.py')
_test('module.mod1', submod, module/'mod1.py')
_test('module.submod', submod, submod/'__init__.py')
_test('module.submod.mod1', submod, submod/'mod1.py')
_test('module.submod.submod', submod, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', submod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', submod))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', submod))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', submod))
_test('...', subsubmod, module/'__init__.py')
_test('...mod1', subsubmod, module/'mod1.py')
_test('..', subsubmod, submod/'__init__.py')
_test('..mod1', subsubmod, submod/'mod1.py')
_test('...submod', subsubmod, submod/'__init__.py')
_test('...submod.mod1', subsubmod, submod/'mod1.py')
_test('.', subsubmod, subsubmod/'__init__.py')
_test('.mod1', subsubmod, subsubmod/'mod1.py')
_test('...submod.submod', subsubmod, subsubmod/'__init__.py')
_test('...submod.submod.mod1', subsubmod, subsubmod/'mod1.py')
_test('..submod', subsubmod, subsubmod/'__init__.py')
_test('..submod.mod1', subsubmod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('.notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('..notreal', subsubmod))
_test('module', subsubmod, module/'__init__.py')
_test('module.mod1', subsubmod, module/'mod1.py')
_test('module.submod', subsubmod, submod/'__init__.py')
_test('module.submod.mod1', subsubmod, submod/'mod1.py')
_test('module.submod.submod', subsubmod, subsubmod/'__init__.py')
_test('module.submod.submod.mod1', subsubmod, subsubmod/'mod1.py')
raises(RuntimeError, lambda: get_mod_filename('module.notreal', subsubmod))
raises(RuntimeError, lambda: get_mod_filename('module.submod.notreal', subsubmod))
raises(ExternalModuleError, lambda: get_mod_filename('notreal.notreal', subsubmod))
finally:
os.chdir(curdir)
def test_replace_imports():
# The verbose and quiet flags are already tested in test_fix_code
for code in [code_mod1, code_mod2, code_mod3, code_mod8, code_submod3,
code_submod_init, code_submod_recursive_submod1, code_mod_unfixable]:
assert replace_imports(code, repls={}, verbose=False, quiet=True) == code
assert replace_imports(code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}, verbose=False, quiet=True) == code_mod4_fixed
assert replace_imports(code_mod5, repls={'module.mod1': ['a'], 'module.mod2': ['b', 'c']}, verbose=False, quiet=True) == code_mod5_fixed
assert replace_imports(code_mod6, repls={'os.path': ['isfile', 'join']}, verbose=False, quiet=False) == code_mod6_fixed
assert replace_imports(code_mod7, repls={'.mod6': []}, verbose=False, quiet=False) == code_mod7_fixed
assert replace_imports(code_mod9, repls={'.mod8': ['a', 'b']}, verbose=False, quiet=False) == code_mod9_fixed
assert replace_imports(code_submod1, repls={'..mod1': ['a'], '..mod2':
['b', 'c'], '.submod3': ['e']}, verbose=False, quiet=True) == code_submod1_fixed
assert replace_imports(code_submod2, repls={'module.mod1': ['a'],
'module.mod2': ['b', 'c'], 'module.submod.submod3': ['e']}, verbose=False, quiet=True) == code_submod2_fixed
assert replace_imports(code_submod4, repls={'.': ['func']}, verbose=False, quiet=True) == code_submod4_fixed
assert replace_imports(code_submod_recursive_submod2, repls={'.': ['a']}) == code_submod_recursive_submod2_fixed
assert replace_imports(code_mod_unfixable, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}) == code_mod_unfixable
assert replace_imports(code_mod_commented_unused_star, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
assert replace_imports(code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}) == code_mod_commented_star_fixed
@pytest.mark.parametrize('verbose_enabled, verbose_kwarg', [
(False, {}), # Default is False
(False, {'verbose': False}),
(True, {'verbose': True}),
], ids=['implicit no verbose', 'explicit no verbose', 'explicit verbose'])
@pytest.mark.parametrize('kwargs, fixed_code, verbose_messages', [
(dict(code=code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}),
code_mod4_fixed, [
"Replacing 'from .mod1 import *' with 'from .mod1 import a'",
"Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"
]),
(dict(code=code_mod4, repls={'.mod1': ['a'], '.mod2': ['b', 'c']}, file='directory/mod4.py'),
code_mod4_fixed, [
"directory/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'",
"directory/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"
]),
(dict(code=code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}),
code_mod_commented_star_fixed, [
"Replacing 'from .mod3 import *' with 'from .mod3 import name'",
"Retaining 'from .mod1 import *' due to noqa comment",
"Retaining 'from .mod2 import *' due to noqa comment"
]),
(dict(code=code_mod_commented_star, repls={'.mod1': ['a'], '.mod2': ['c'], '.mod3': ['name']}, file='directory/mod_commented_star.py'),
code_mod_commented_star_fixed, [
"directory/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'",
"directory/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment",
"directory/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment"
]),
], ids=[
'mod4 without file',
'mod4 with file',
'mod_commented_star without file',
'mod_commented_star with file'
])
def test_replace_imports_verbose_messages(kwargs, fixed_code, verbose_messages, verbose_enabled, verbose_kwarg, capsys):
assert replace_imports(**kwargs, **verbose_kwarg) == fixed_code
_, err = capsys.readouterr()
if verbose_enabled:
assert sorted(err.splitlines()) == verbose_messages
else:
assert err == ''
def test_replace_imports_warnings(capsys):
assert replace_imports(code_mod_unfixable, file='module/mod_unfixable.py', repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: module/mod_unfixable.py: Could not find the star imports for '.mod1'",
"Warning: module/mod_unfixable.py: Could not find the star imports for '.mod2'"
}
assert replace_imports(code_mod_unfixable, file=None, repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: Could not find the star imports for '.mod1'",
"Warning: Could not find the star imports for '.mod2'"
}
assert replace_imports(code_mod_unfixable, quiet=True, repls={'.mod1': ['a'], '.mod2': ['c']}) == code_mod_unfixable
out, err = capsys.readouterr()
assert err == ''
assert replace_imports(code_mod_commented_unused_star, file='module/mod_commented_unused_star.py', repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: module/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
}
assert replace_imports(code_mod_commented_unused_star, file=None, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert set(err.splitlines()) == {
"Warning: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import",
}
assert replace_imports(code_mod_commented_unused_star, quiet=True, repls={'.mod1': [], '.mod2': []}) == code_mod_commented_unused_star_fixed
out, err = capsys.readouterr()
assert err == ''
def test_replace_imports_line_wrapping():
code = """\
from reallyreallylongmodulename import *
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""
code_fixed = """\
{imp}
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""
repls = {'reallyreallylongmodulename': ['longname1', 'longname2', 'longname3', 'longname4', 'longname5', 'longname6', 'longname7', 'longname8', 'longname9']}
assert replace_imports(code, repls) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5,
longname6, longname7, longname8, longname9)''')
# Make sure the first line has at least one imported name.
# There's no point to doing
#
# from mod import (
# name,
#
# if we are aligning the names to the opening parenthesis anyway.
assert replace_imports(code, repls, max_line_length=49) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=50) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=51) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=120) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7,
longname8, longname9)''')
assert len("from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9") == 136
assert replace_imports(code, repls, max_line_length=137) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=136) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=135) == code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8,
longname9)''')
assert replace_imports(code, repls, max_line_length=200) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
assert replace_imports(code, repls, max_line_length=float('inf')) == code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')
@pytest.mark.parametrize('case_permutation', [
lambda s: s,
lambda s: s.upper(),
lambda s: s.lower()
], ids=['same case', 'upper case', 'lower case'])
@pytest.mark.parametrize('allows_star, comment', [
(True, '# noqa'),
(True, '#noqa'),
(True, '# noqa '),
(False, '# noqa foo bar'),
(False, '# noqa:'),
(False, '# noqa :'),
(True, '# noqa: F401'),
(True, '#noqa:F401'),
(True, '# noqa: F401 '),
(True, '#\tnoqa:\tF401\t'),
(True, '# noqa: F403'),
(True, '# noqa: A1,F403,A1'),
(True, '# noqa: A1 F401 A1'),
(True, '# noqa: A1, F401, A1'),
(True, '# noqa: A1 , F401 , A1'),
(False, '# generic comment'),
(False, '#'),
(False, ''),
(False, '# foo: F401'),
(False, '# F401'),
(False, '# noqa F401'), # missing : after noqa
])
def test_is_noqa_comment_allowing_star_import(case_permutation, allows_star, comment):
assert is_noqa_comment_allowing_star_import(case_permutation(comment)) is allows_star
def _dirs_equal(cmp):
if cmp.diff_files:
return False
if not cmp.subdirs:
return True
return all(_dirs_equal(c) for c in cmp.subdirs.values())
def test_cli(tmpdir):
from ..__main__ import __file__
# TODO: Test the verbose and quiet flags
directory_orig = tmpdir/'orig'/'module'
directory = tmpdir/'module'
create_module(directory)
create_module(directory_orig)
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# Make sure we are running the command for the right file
p = subprocess.run([sys.executable, '-m', 'removestar', '--_this-file', 'none'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
assert p.stdout == __file__
p = subprocess.run([sys.executable, '-m', 'removestar', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
warnings = set(f"""\
Warning: {directory}/submod/submod1.py: 'b' comes from multiple modules: '..mod1', '..mod2'. Using '..mod2'.
Warning: {directory}/submod/submod1.py: could not find import for 'd'
Warning: {directory}/submod/submod2.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/submod/submod2.py: could not find import for 'd'
Warning: {directory}/mod4.py: 'b' comes from multiple modules: '.mod1', '.mod2'. Using '.mod2'.
Warning: {directory}/mod4.py: could not find import for 'd'
Warning: {directory}/mod5.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/mod5.py: could not find import for 'd'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod1'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod2'
Warning: {directory}/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import
""".splitlines())
error = f"Error with {directory}/mod_bad.py: SyntaxError: invalid syntax (mod_bad.py, line 1)"
assert set(p.stderr.splitlines()) == warnings.union({error})
diffs = [
f"""\
--- original/{directory}/mod4.py
+++ fixed/{directory}/mod4.py
@@ -1,5 +1,5 @@
-from .mod1 import *
-from .mod2 import *
+from .mod1 import a
+from .mod2 import b, c
from .mod3 import name
\n\
def func():\
""",
f"""\
--- original/{directory}/mod5.py
+++ fixed/{directory}/mod5.py
@@ -1,5 +1,5 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
\n\
def func():\
""",
f"""\
--- original/{directory}/mod6.py
+++ fixed/{directory}/mod6.py
@@ -1,2 +1,2 @@
-from os.path import *
+from os.path import isfile, join
isfile(join('a', 'b'))\
""",
f"""\
--- original/{directory}/mod7.py
+++ fixed/{directory}/mod7.py
@@ -1 +0,0 @@
-from .mod6 import *\
""",
f"""\
--- original/{directory}/mod9.py
+++ fixed/{directory}/mod9.py
@@ -1,4 +1,4 @@
-from .mod8 import *
+from .mod8 import a, b
\n\
def func():
return a + b\
""",
f"""\
--- original/{directory}/mod_commented_unused_star.py
+++ fixed/{directory}/mod_commented_unused_star.py
@@ -1,2 +1,2 @@
-from .mod1 import * # comment about mod1
+# comment about mod1
from .mod2 import * # noqa\
""",
f"""\
--- original/{directory}/mod_commented_star.py
+++ fixed/{directory}/mod_commented_star.py
@@ -1,6 +1,6 @@
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
-from .mod3 import * # generic comment
+from .mod3 import name # generic comment
\n\
def func():\
""",
f"""\
--- original/{directory}/submod/submod1.py
+++ fixed/{directory}/submod/submod1.py
@@ -1,7 +1,7 @@
-from ..mod1 import *
-from ..mod2 import *
+from ..mod1 import a
+from ..mod2 import b, c
from ..mod3 import name
-from .submod3 import *
+from .submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""",
f"""\
--- original/{directory}/submod/submod2.py
+++ fixed/{directory}/submod/submod2.py
@@ -1,7 +1,7 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
-from module.submod.submod3 import *
+from module.submod.submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""",
f"""\
--- original/{directory}/submod/submod4.py
+++ fixed/{directory}/submod/submod4.py
@@ -1,3 +1,3 @@
-from . import *
+from . import func
\n\
func()\
""",
f"""\
--- original/{directory}/submod_recursive/submod2.py
+++ fixed/{directory}/submod_recursive/submod2.py
@@ -1,4 +1,4 @@
-from . import *
+from . import a
\n\
def func():
return a + 1\
""",
]
unchanged = ['__init__.py', 'mod_bad.py', 'mod_unfixable.py']
for d in diffs:
assert d in p.stdout, p.stdout
for mod_path in unchanged:
assert '--- original/{directory}/{mod_path}' not in p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
for d in diffs:
assert d in p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--verbose', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
changes = set(f"""\
{directory}/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'
{directory}/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'
{directory}/mod5.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/mod5.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/mod6.py: Replacing 'from os.path import *' with 'from os.path import isfile, join'
{directory}/mod7.py: Replacing 'from .mod6 import *' with ''
{directory}/mod9.py: Replacing 'from .mod8 import *' with 'from .mod8 import a, b'
{directory}/mod_commented_unused_star.py: Replacing 'from .mod1 import *' with ''
{directory}/mod_commented_unused_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'
{directory}/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment
{directory}/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/submod/submod1.py: Replacing 'from ..mod1 import *' with 'from ..mod1 import a'
{directory}/submod/submod1.py: Replacing 'from ..mod2 import *' with 'from ..mod2 import b, c'
{directory}/submod/submod1.py: Replacing 'from .submod3 import *' with 'from .submod3 import e'
{directory}/submod/submod4.py: Replacing 'from . import *' with 'from . import func'
{directory}/submod/submod2.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/submod/submod2.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/submod/submod2.py: Replacing 'from module.submod.submod3 import *' with 'from module.submod.submod3 import e'
{directory}/submod_recursive/submod2.py: Replacing 'from . import *' with 'from . import a'
""".splitlines())
assert set(p.stderr.splitlines()) == changes.union({error}).union(warnings)
for d in diffs:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
p = subprocess.run([sys.executable, '-m', 'removestar', '--no-dynamic-importing', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
static_error = set(f"""\
Error with {directory}/mod6.py: Static determination of external module imports is not supported.
Error with {directory}/mod7.py: Static determination of external module imports is not supported.
""".splitlines())
assert set(p.stderr.splitlines()) == {error}.union(static_error).union(warnings)
for d in diffs:
if 'mod6' in d:
assert d not in p.stdout
else:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# Test --quiet hides both errors
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', '--no-dynamic-importing', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
assert p.stderr == ''
for d in diffs:
if 'mod6' in d:
assert d not in p.stdout
else:
assert d in p.stdout, p.stdout
cmp = dircmp(directory, directory_orig)
assert _dirs_equal(cmp)
# XXX: This modifies directory, so keep it at the end of the test
p = subprocess.run([sys.executable, '-m', 'removestar', '--quiet', '-i', directory],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
assert p.stderr == ''
assert p.stdout == ''
cmp = dircmp(directory, directory_orig)
assert not _dirs_equal(cmp)
assert cmp.diff_files == ['mod4.py', 'mod5.py', 'mod6.py', 'mod7.py', 'mod9.py', 'mod_commented_star.py', 'mod_commented_unused_star.py']
assert cmp.subdirs['submod'].diff_files == ['submod1.py', 'submod2.py', 'submod4.py']
assert cmp.subdirs['submod_recursive'].diff_files == ['submod2.py']
with open(directory/'mod4.py') as f:
assert f.read() == code_mod4_fixed
with open(directory/'mod5.py') as f:
assert f.read() == code_mod5_fixed
with open(directory/'mod6.py') as f:
assert f.read() == code_mod6_fixed
with open(directory/'mod7.py') as f:
assert f.read() == code_mod7_fixed
with open(directory/'mod9.py') as f:
assert f.read() == code_mod9_fixed
with open(directory/'mod_commented_unused_star.py') as f:
assert f.read() == code_mod_commented_unused_star_fixed
with open(directory/'mod_commented_star.py') as f:
assert f.read() == code_mod_commented_star_fixed
with open(directory/'submod'/'submod1.py') as f:
assert f.read() == code_submod1_fixed
with open(directory/'submod'/'submod2.py') as f:
assert f.read() == code_submod2_fixed
with open(directory/'submod'/'submod4.py') as f:
assert f.read() == code_submod4_fixed
with open(directory/'submod_recursive'/'submod2.py') as f:
assert f.read() == code_submod_recursive_submod2_fixed
with open(directory/'mod_bad.py') as f:
assert f.read() == code_bad_syntax
with open(directory/'mod_unfixable.py') as f:
assert f.read() == code_mod_unfixable
# Test error on nonexistent file
p = subprocess.run([sys.executable, '-m', 'removestar', directory/'notarealfile.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
assert p.stderr == f'Error: {directory}/notarealfile.py: no such file or directory\n'
assert p.stdout == ''
|
tests/builtins/test_list.py | jacebrowning/voc | 850 | 12742093 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase, SAMPLE_SUBSTITUTIONS
class ListTests(TranspileTestCase):
pass
class BuiltinListFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["list"]
substitutions = dict(SAMPLE_SUBSTITUTIONS)
substitutions.update({
"[1, 2.3456, 'another']": [
"[1, 'another', 2.3456]",
"[2.3456, 1, 'another']",
"[2.3456, 'another', 1]",
"['another', 1, 2.3456]",
"['another', 2.3456, 1]",
],
"['a', 'c', 'd']": [
"['a', 'd', 'c']",
"['c', 'a', 'd']",
"['c', 'd', 'a']",
"['d', 'a', 'c']",
"['d', 'c', 'a']",
]
})
|
src/tests/test_sites/test_utils.py | winkidney/PickTrue | 118 | 12742110 | <reponame>winkidney/PickTrue<filename>src/tests/test_sites/test_utils.py
from picktrue.sites import utils
def test_get_name_ext_from_url():
assert utils.get_filename_fom_url(
"https://img9.doubanio.com/view/photo/l/public/p2208623414.jpg"
) == "p2208623414.jpg"
assert utils.get_filename_fom_url(
"https://img9.doubanio.com/view/photo/l/public/p2208623414.jpg?hello=world"
) == "p2208623414.jpg"
|
CPAC/func_preproc/func_preproc.py | FCP-INDI/C-PAC | 125 | 12742174 | <filename>CPAC/func_preproc/func_preproc.py
from nipype import logging
from nipype.interfaces import ants
logger = logging.getLogger('workflow')
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as util
from nipype.interfaces import afni
from nipype.interfaces.afni import preprocess
from nipype.interfaces.afni import utils as afni_utils
from CPAC.func_preproc.utils import add_afni_prefix, nullify, chunk_ts, \
split_ts_chunks, oned_text_concat, notch_filter_motion
from CPAC.utils.interfaces.function import Function
from CPAC.generate_motion_statistics import motion_power_statistics
from CPAC.utils.utils import check_prov_for_motion_tool
# niworkflows
from ..utils.interfaces.ants import AI
def collect_arguments(*args):
command_args = []
if args[0]:
command_args += [args[1]]
command_args += args[2:]
return ' '.join(command_args)
def anat_refined_mask(init_bold_mask=True, wf_name='init_bold_mask'):
wf = pe.Workflow(name=wf_name)
input_node = pe.Node(util.IdentityInterface(fields=['func',
'anatomical_brain_mask',
'anat_brain',
'init_func_brain_mask']),
name='inputspec')
output_node = pe.Node(util.IdentityInterface(fields=['func_brain_mask']),
name='outputspec')
# 1 Take single volume of func
func_single_volume = pe.Node(interface=afni.Calc(),
name='func_single_volume')
# TODO add an option to select volume
func_single_volume.inputs.set(
expr='a',
single_idx=1,
outputtype='NIFTI_GZ'
)
wf.connect(input_node, 'func',
func_single_volume, 'in_file_a')
# 2 get temporary func brain
func_tmp_brain = pe.Node(interface=afni_utils.Calc(),
name='func_tmp_brain')
func_tmp_brain.inputs.expr = 'a*b'
func_tmp_brain.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_single_volume, 'out_file',
func_tmp_brain, 'in_file_a')
# 2.1 get a tmp func brain mask
if init_bold_mask == True:
# 2.1.1 N4BiasFieldCorrection single volume of raw_func
func_single_volume_n4_corrected = pe.Node(
interface=ants.N4BiasFieldCorrection(dimension=3,
copy_header=True,
bspline_fitting_distance=200),
shrink_factor=2,
name='func_single_volume_n4_corrected')
func_single_volume_n4_corrected.inputs.args = '-r True'
wf.connect(func_single_volume, 'out_file',
func_single_volume_n4_corrected, 'input_image')
# 2.1.2 bet n4 corrected image - generate tmp func brain mask
func_tmp_brain_mask = pe.Node(interface=fsl.BET(),
name='func_tmp_brain_mask_pre')
func_tmp_brain_mask.inputs.mask = True
wf.connect(func_single_volume_n4_corrected, 'output_image',
func_tmp_brain_mask, 'in_file')
# 2.1.3 dilate func tmp brain mask
func_tmp_brain_mask_dil = pe.Node(interface=fsl.ImageMaths(),
name='func_tmp_brain_mask_dil')
func_tmp_brain_mask_dil.inputs.op_string = '-dilM'
wf.connect(func_tmp_brain_mask, 'mask_file',
func_tmp_brain_mask_dil, 'in_file')
wf.connect(func_tmp_brain_mask_dil, 'out_file',
func_tmp_brain, 'in_file_b')
else:
# 2.1.1 connect dilated init func brain mask
wf.connect(input_node, 'init_func_brain_mask',
func_tmp_brain, 'in_file_b')
# 3. get transformation of anat to func
# 3.1 Register func tmp brain to anat brain to get func2anat matrix
linear_reg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='func_to_anat_linear_reg')
linear_reg_func_to_anat.inputs.cost = 'mutualinfo'
linear_reg_func_to_anat.inputs.dof = 6
wf.connect(func_tmp_brain, 'out_file',
linear_reg_func_to_anat, 'in_file')
wf.connect(input_node, 'anat_brain',
linear_reg_func_to_anat, 'reference')
# 3.2 Inverse func to anat affine
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
inv_func_to_anat_affine, 'in_file')
# 4. anat mask to func space
# Transform anatomical mask to functional space to get BOLD mask
reg_anat_mask_to_func = pe.Node(interface=fsl.FLIRT(),
name='reg_anat_mask_to_func')
reg_anat_mask_to_func.inputs.apply_xfm = True
reg_anat_mask_to_func.inputs.cost = 'mutualinfo'
reg_anat_mask_to_func.inputs.dof = 6
reg_anat_mask_to_func.inputs.interp = 'nearestneighbour'
wf.connect(input_node, 'anatomical_brain_mask',
reg_anat_mask_to_func, 'in_file')
wf.connect(func_tmp_brain, 'out_file',
reg_anat_mask_to_func, 'reference')
wf.connect(inv_func_to_anat_affine, 'out_file',
reg_anat_mask_to_func, 'in_matrix_file')
# 5. get final func mask: refine func tmp mask with anat_mask_in_func mask
func_mask = pe.Node(interface=fsl.MultiImageMaths(), name='func_mask')
func_mask.inputs.op_string = "-mul %s"
wf.connect(reg_anat_mask_to_func, 'out_file',
func_mask, 'operand_files')
if init_bold_mask == True:
wf.connect(func_tmp_brain_mask_dil, 'out_file',
func_mask, 'in_file')
else:
wf.connect(input_node, 'init_func_brain_mask',
func_mask, 'in_file')
wf.connect(func_mask, 'out_file',
output_node, 'func_brain_mask')
return wf
def anat_based_mask(wf_name='bold_mask'):
# reference DCAN lab BOLD mask
# https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
wf = pe.Workflow(name=wf_name)
input_node = pe.Node(util.IdentityInterface(fields=['func',
'anat_brain',
'anat_head']),
name='inputspec')
output_node = pe.Node(util.IdentityInterface(fields=['func_brain_mask']),
name='outputspec')
# 0. Take single volume of func
func_single_volume = pe.Node(interface=afni.Calc(),
name='func_single_volume')
func_single_volume.inputs.set(
expr='a',
single_idx=1,
outputtype='NIFTI_GZ'
)
wf.connect(input_node, 'func',
func_single_volume, 'in_file_a')
# 1. Register func head to anat head to get func2anat matrix
linear_reg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='func_to_anat_linear_reg')
linear_reg_func_to_anat.inputs.dof = 6
linear_reg_func_to_anat.inputs.interp = 'spline'
linear_reg_func_to_anat.inputs.searchr_x = [30, 30]
linear_reg_func_to_anat.inputs.searchr_y = [30, 30]
linear_reg_func_to_anat.inputs.searchr_z = [30, 30]
wf.connect(func_single_volume, 'out_file',
linear_reg_func_to_anat, 'in_file')
wf.connect(input_node, 'anat_head',
linear_reg_func_to_anat, 'reference')
# 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
inv_func_to_anat_affine, 'in_file')
# 3. get BOLD mask
# 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
reg_anat_brain_to_func = pe.Node(interface=fsl.ApplyWarp(),
name='reg_anat_brain_to_func')
reg_anat_brain_to_func.inputs.interp = 'nn'
reg_anat_brain_to_func.inputs.relwarp = True
wf.connect(input_node, 'anat_brain',
reg_anat_brain_to_func, 'in_file')
wf.connect(input_node, 'func',
reg_anat_brain_to_func, 'ref_file')
wf.connect(inv_func_to_anat_affine, 'out_file',
reg_anat_brain_to_func, 'premat')
# 3.2 Binarize transfered image and fill holes to get BOLD mask.
# Binarize
func_mask_bin = pe.Node(interface=fsl.ImageMaths(),
name='func_mask')
func_mask_bin.inputs.op_string = '-bin'
wf.connect(reg_anat_brain_to_func, 'out_file',
func_mask_bin, 'in_file')
wf.connect(func_mask_bin, 'out_file',
output_node, 'func_brain_mask')
return wf
def normalize_motion_parameters(in_file):
"""
Convert FSL mcflirt motion parameters to AFNI space
"""
import os
import numpy as np
motion_params = np.genfromtxt(in_file).T
motion_params = np.vstack((motion_params[2, :] * 180 / np.pi,
motion_params[0, :] * 180 / np.pi,
-motion_params[1, :] * 180 / np.pi,
motion_params[5, :],
motion_params[3, :],
-motion_params[4, :]))
motion_params = np.transpose(motion_params)
out_file = os.path.join(os.getcwd(), 'motion_params.1D')
np.savetxt(out_file, motion_params)
return out_file
def get_mcflirt_rms_abs(rms_files):
for path in rms_files:
if 'abs.rms' in path:
abs_file = path
if 'rel.rms' in path:
rels_file = path
return (abs_file, rels_file)
def estimate_reference_image(in_file):
# fMRIPrep-style BOLD reference
# Ref: https://github.com/nipreps/niworkflows/blob/maint/1.3.x/niworkflows/interfaces/registration.py#L446-L549
import os
import numpy as np
import nibabel as nb
ref_input = [in_file]
mc_out_file = 'bold_mc.nii.gz'
# Build the nibabel spatial image we will work with
ref_im = []
for im_i in ref_input:
max_new_volumes = 50 - len(ref_im)
if max_new_volumes <= 0:
break
nib_i = nb.squeeze_image(nb.load(im_i))
if nib_i.dataobj.ndim == 3:
ref_im.append(nib_i)
elif nib_i.dataobj.ndim == 4:
ref_im += nb.four_to_three(nib_i.slicer[..., :max_new_volumes])
ref_im = nb.squeeze_image(nb.concat_images(ref_im))
out_file = os.path.join(os.getcwd(), "ref_bold.nii.gz")
# Slicing may induce inconsistencies with shape-dependent values in extensions.
# For now, remove all. If this turns out to be a mistake, we can select extensions
# that don't break pipeline stages.
ref_im.header.extensions.clear()
if ref_im.shape[-1] > 40:
ref_im = nb.Nifti1Image(
ref_im.dataobj[:, :, :, 20:40], ref_im.affine, ref_im.header
)
ref_name = os.path.join(os.getcwd(), "slice.nii.gz")
ref_im.to_filename(ref_name)
cmd = '3dvolreg -Fourier -twopass -zpad 4 -prefix %s %s'%(mc_out_file, ref_name)
os.system(cmd)
mc_slice_nii = nb.load(mc_out_file)
median_image_data = np.median(mc_slice_nii.get_fdata(), axis=3)
nb.Nifti1Image(median_image_data, ref_im.affine, ref_im.header).to_filename(
out_file
)
return out_file
def create_scale_func_wf(scaling_factor, wf_name='scale_func'):
"""Workflow to scale func data.
Parameters
----------
scaling_factor : float
Scale the size of the dataset voxels by the factor.
wf_name : string
name of the workflow
Workflow Inputs::
inputspec.func : func file or a list of func/rest nifti file
User input functional(T2*) Image
Workflow Outputs::
outputspec.scaled_func : string (nifti file)
Path to Output image with scaled data
Order of commands:
- Scale the size of the dataset voxels by the factor 'fac'. For details see `3dcalc <https://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>`_::
3drefit -xyzscale fac rest.nii.gz
"""
# allocate a workflow object
preproc = pe.Workflow(name=wf_name)
# configure the workflow's input spec
inputNode = pe.Node(util.IdentityInterface(fields=['func']),
name='inputspec')
# configure the workflow's output spec
outputNode = pe.Node(util.IdentityInterface(fields=['scaled_func']),
name='outputspec')
# allocate a node to edit the functional file
func_scale = pe.Node(interface=afni_utils.Refit(),
name='func_scale')
func_scale.inputs.xyzscale = scaling_factor
# wire in the func_get_idx node
preproc.connect(inputNode, 'func',
func_scale, 'in_file')
# wire the output
preproc.connect(func_scale, 'out_file',
outputNode, 'scaled_func')
return preproc
def create_wf_edit_func(wf_name="edit_func"):
"""Workflow to edit the scan to the proscribed TRs.
Workflow Inputs::
inputspec.func : func file or a list of func/rest nifti file
User input functional(T2*) Image
inputspec.start_idx : string
Starting volume/slice of the functional image (optional)
inputspec.stop_idx : string
Last volume/slice of the functional image (optional)
Workflow Outputs::
outputspec.edited_func : string (nifti file)
Path to Output image with the initial few slices dropped
Order of commands:
- Get the start and the end volume index of the functional run. If not defined by the user, return the first and last volume.
get_idx(in_files, stop_idx, start_idx)
- Dropping the initial TRs. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc -a rest.nii.gz[4..299]
-expr 'a'
-prefix rest_3dc.nii.gz
"""
# allocate a workflow object
preproc = pe.Workflow(name=wf_name)
# configure the workflow's input spec
inputNode = pe.Node(util.IdentityInterface(fields=['func',
'start_idx',
'stop_idx']),
name='inputspec')
# configure the workflow's output spec
outputNode = pe.Node(util.IdentityInterface(fields=['edited_func']),
name='outputspec')
# allocate a node to check that the requested edits are
# reasonable given the data
func_get_idx = pe.Node(util.Function(input_names=['in_files',
'stop_idx',
'start_idx'],
output_names=['stopidx',
'startidx'],
function=get_idx),
name='func_get_idx')
# wire in the func_get_idx node
preproc.connect(inputNode, 'func',
func_get_idx, 'in_files')
preproc.connect(inputNode, 'start_idx',
func_get_idx, 'start_idx')
preproc.connect(inputNode, 'stop_idx',
func_get_idx, 'stop_idx')
# allocate a node to edit the functional file
func_drop_trs = pe.Node(interface=afni_utils.Calc(),
name='func_drop_trs',
mem_gb=0.37,
mem_x=(739971956005215 / 151115727451828646838272,
'in_file_a'))
func_drop_trs.inputs.expr = 'a'
func_drop_trs.inputs.outputtype = 'NIFTI_GZ'
# wire in the inputs
preproc.connect(inputNode, 'func',
func_drop_trs, 'in_file_a')
preproc.connect(func_get_idx, 'startidx',
func_drop_trs, 'start_idx')
preproc.connect(func_get_idx, 'stopidx',
func_drop_trs, 'stop_idx')
# wire the output
preproc.connect(func_drop_trs, 'out_file',
outputNode, 'edited_func')
return preproc
def slice_timing_wf(name='slice_timing', tpattern=None, tzero=None):
# allocate a workflow object
wf = pe.Workflow(name=name)
# configure the workflow's input spec
inputNode = pe.Node(util.IdentityInterface(fields=['func_ts',
'tr',
'tpattern']),
name='inputspec')
# configure the workflow's output spec
outputNode = pe.Node(
util.IdentityInterface(fields=['slice_time_corrected']),
name='outputspec')
# create TShift AFNI node
func_slice_timing_correction = pe.Node(interface=preprocess.TShift(),
name='slice_timing',
mem_gb=0.45,
mem_x=(5247073869855161 /
604462909807314587353088,
'in_file'))
func_slice_timing_correction.inputs.outputtype = 'NIFTI_GZ'
if tzero is not None:
func_slice_timing_correction.inputs.tzero = tzero
wf.connect([
(
inputNode,
func_slice_timing_correction,
[
(
'func_ts',
'in_file'
),
# (
# # add the @ prefix to the tpattern file going into
# # AFNI 3dTshift - needed this so the tpattern file
# # output from get_scan_params would be tied downstream
# # via a connection (to avoid poofing)
# ('tpattern', nullify, add_afni_prefix),
# 'tpattern'
# ),
(
('tr', nullify),
'tr'
),
]
),
])
if tpattern is not None:
func_slice_timing_correction.inputs.tpattern = tpattern
else:
wf.connect(inputNode, ('tpattern', nullify, add_afni_prefix),
func_slice_timing_correction, 'tpattern')
wf.connect(func_slice_timing_correction, 'out_file',
outputNode, 'slice_time_corrected')
return wf
def get_idx(in_files, stop_idx=None, start_idx=None):
"""
Method to get the first and the last slice for
the functional run. It verifies the user specified
first and last slice. If the values are not valid, it
calculates and returns the very first and the last slice
Parameters
----------
in_file : string (nifti file)
Path to input functional run
stop_idx : int
Last volume to be considered, specified by user
in the configuration file
stop_idx : int
First volume to be considered, specified by user
in the configuration file
Returns
-------
stop_idx : int
Value of first slice to consider for the functional run
start_idx : int
Value of last slice to consider for the functional run
"""
# Import packages
from nibabel import load
# Init variables
img = load(in_files)
hdr = img.get_header()
shape = hdr.get_data_shape()
# Check to make sure the input file is 4-dimensional
if len(shape) != 4:
raise TypeError('Input nifti file: %s is not a 4D file' % in_files)
# Grab the number of volumes
nvols = int(hdr.get_data_shape()[3])
if (start_idx == None) or (int(start_idx) < 0) or (
int(start_idx) > (nvols - 1)):
startidx = 0
else:
startidx = int(start_idx)
if (stop_idx == None) or (int(stop_idx) > (nvols - 1)):
stopidx = nvols - 1
else:
stopidx = int(stop_idx)
return stopidx, startidx
def motion_correct_connections(wf, cfg, strat_pool, pipe_num, opt):
if opt != '3dvolreg' and opt != 'mcflirt':
raise Exception("\n\n[!] Error: The 'tool' parameter of the "
"'motion_correction' workflow must be either "
"'3dvolreg' or 'mcflirt'.\n\nTool input: "
"{0}\n\n".format(opt))
if cfg:
if int(cfg.pipeline_setup['system_config'][
'max_cores_per_participant']) > 1:
chunk_imports = ['import nibabel as nb']
chunk = pe.Node(Function(input_names=['func_file',
'n_chunks',
'chunk_size'],
output_names=['TR_ranges'],
function=chunk_ts,
imports=chunk_imports),
name=f'chunk_{pipe_num}')
#chunk.inputs.n_chunks = int(cfg.pipeline_setup['system_config'][
# 'max_cores_per_participant'])
# 10-TR sized chunks
chunk.inputs.chunk_size = 10
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, chunk, 'func_file')
split_imports = ['import os', 'import subprocess']
split = pe.Node(Function(input_names=['func_file',
'tr_ranges'],
output_names=['split_funcs'],
function=split_ts_chunks,
imports=split_imports),
name=f'split_{pipe_num}')
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, split, 'func_file')
wf.connect(chunk, 'TR_ranges', split, 'tr_ranges')
out_split_func = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_split_func_{pipe_num}')
wf.connect(split, 'split_funcs', out_split_func, 'out_file')
func_motion_correct = pe.MapNode(interface=preprocess.Volreg(),
name=f'func_generate_ref_{pipe_num}',
iterfield=['in_file'])
wf.connect(out_split_func, 'out_file',
func_motion_correct, 'in_file')
func_concat = pe.Node(interface=afni_utils.TCat(),
name=f'func_concat_{pipe_num}')
func_concat.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_motion_correct, 'out_file',
func_concat, 'in_files')
out_motion = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_{pipe_num}')
wf.connect(func_concat, 'out_file', out_motion, 'out_file')
else:
out_split_func = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_split_func_{pipe_num}')
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, out_split_func, 'out_file')
func_motion_correct = pe.Node(interface=preprocess.Volreg(),
name=f'func_generate_ref_{pipe_num}')
wf.connect(out_split_func, 'out_file',
func_motion_correct, 'in_file')
out_motion = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_{pipe_num}')
wf.connect(func_motion_correct, 'out_file',
out_motion, 'out_file')
else:
out_split_func = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_split_func_{pipe_num}')
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, out_split_func, 'out_file')
func_motion_correct = pe.Node(interface=preprocess.Volreg(),
name=f'func_generate_ref_{pipe_num}')
wf.connect(out_split_func, 'out_file',
func_motion_correct, 'in_file')
out_motion = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_{pipe_num}')
wf.connect(func_motion_correct, 'out_file', out_motion, 'out_file')
func_motion_correct.inputs.zpad = 4
func_motion_correct.inputs.outputtype = 'NIFTI_GZ'
args = f'-Fourier'
if cfg.functional_preproc['motion_estimates_and_correction'][
'motion_correction']['AFNI-3dvolreg']['functional_volreg_twopass']:
args = f'-twopass {args}'
func_motion_correct.inputs.args = args
# Calculate motion parameters
if opt == '3dvolreg':
func_motion_correct_A = func_motion_correct.clone(
f'func_motion_correct_3dvolreg_{pipe_num}')
func_motion_correct_A.inputs.md1d_file = 'max_displacement.1D'
func_motion_correct_A.inputs.args = args
wf.connect(out_split_func, 'out_file',
func_motion_correct_A, 'in_file')
node, out = strat_pool.get_data('motion-basefile')
wf.connect(node, out, func_motion_correct_A, 'basefile')
if cfg:
if int(cfg.pipeline_setup['system_config'][
'max_cores_per_participant']) > 1:
motion_concat = pe.Node(interface=afni_utils.TCat(),
name=f'motion_concat_{pipe_num}')
motion_concat.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_motion_correct_A, 'out_file',
motion_concat, 'in_files')
out_motion_A = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_A_{pipe_num}')
wf.connect(motion_concat, 'out_file',
out_motion_A, 'out_file')
concat_imports = ['import os']
md1d_concat = pe.Node(Function(input_names=['in_files'],
output_names=['out_file'],
function=oned_text_concat,
imports=concat_imports),
name=f'md1d_concat_{pipe_num}')
wf.connect(func_motion_correct_A, 'md1d_file',
md1d_concat, 'in_files')
oned_concat = pe.Node(Function(input_names=['in_files'],
output_names=['out_file'],
function=oned_text_concat,
imports=concat_imports),
name=f'oned_concat_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_file',
oned_concat, 'in_files')
oned_matrix_concat = pe.Node(
Function(input_names=['in_files'],
output_names=['out_file'],
function=oned_text_concat,
imports=concat_imports),
name=f'oned_matrix_concat_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_matrix_save',
oned_matrix_concat, 'in_files')
out_md1d = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_md1d_{pipe_num}')
wf.connect(md1d_concat, 'out_file',
out_md1d, 'out_file')
out_oned = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_{pipe_num}')
wf.connect(oned_concat, 'out_file',
out_oned, 'out_file')
out_oned_matrix = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_matrix_{pipe_num}')
wf.connect(oned_matrix_concat, 'out_file',
out_oned_matrix, 'out_file')
else:
out_motion_A = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_A_{pipe_num}')
wf.connect(func_motion_correct_A, 'out_file',
out_motion_A, 'out_file')
out_md1d = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_md1d_{pipe_num}')
wf.connect(func_motion_correct_A, 'md1d_file',
out_md1d, 'out_file')
out_oned = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_file',
out_oned, 'out_file')
out_oned_matrix = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_matrix_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_matrix_save',
out_oned_matrix, 'out_file')
else:
out_motion_A = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_motion_A_{pipe_num}')
wf.connect(func_motion_correct_A, 'out_file',
out_motion_A, 'out_file')
out_md1d = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_md1d_{pipe_num}')
wf.connect(func_motion_correct_A, 'md1d_file',
out_md1d, 'out_file')
out_oned = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_file',
out_oned, 'out_file')
out_oned_matrix = pe.Node(
interface=util.IdentityInterface(fields=['out_file']),
name=f'out_oned_matrix_{pipe_num}')
wf.connect(func_motion_correct_A, 'oned_matrix_save',
out_oned_matrix, 'out_file')
outputs = {
'desc-preproc_bold': (out_motion_A, 'out_file'),
'desc-motion_bold': (out_motion_A, 'out_file'),
'max-displacement': (out_md1d, 'out_file'),
'movement-parameters': (out_oned, 'out_file'),
'coordinate-transformation': (out_oned_matrix, 'out_file')
}
elif opt == 'mcflirt':
func_motion_correct_A = pe.Node(
interface=fsl.MCFLIRT(save_mats=True, save_plots=True),
name=f'func_motion_correct_mcflirt_{pipe_num}', mem_gb=2.5)
func_motion_correct_A.inputs.save_mats = True
func_motion_correct_A.inputs.save_plots = True
func_motion_correct_A.inputs.save_rms = True
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, func_motion_correct_A, 'in_file')
node, out = strat_pool.get_data('motion-basefile')
wf.connect(node, out, func_motion_correct_A, 'ref_file')
normalize_motion_params = pe.Node(Function(input_names=['in_file'],
output_names=['out_file'],
function=normalize_motion_parameters),
name=f'norm_motion_params_{pipe_num}')
wf.connect(func_motion_correct_A, 'par_file',
normalize_motion_params, 'in_file')
get_rms_abs = pe.Node(Function(input_names=['rms_files'],
output_names=['abs_file',
'rels_file'],
function=get_mcflirt_rms_abs),
name=f'get_mcflirt_rms_abs_{pipe_num}')
wf.connect(func_motion_correct_A, 'rms_files',
get_rms_abs, 'rms_files')
outputs = {
'desc-preproc_bold': (func_motion_correct_A, 'out_file'),
'desc-motion_bold': (func_motion_correct_A, 'out_file'),
'max-displacement': (get_rms_abs, 'abs_file'),
'rels-displacement': (get_rms_abs, 'rels_file'),
'movement-parameters': (normalize_motion_params, 'out_file'),
'coordinate-transformation': (func_motion_correct_A, 'mat_file')
}
return (wf, outputs)
def func_scaling(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_scaling",
"config": ["functional_preproc", "scaling"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold"]}
'''
scale_func_wf = create_scale_func_wf(
scaling_factor=cfg.scaling_factor,
wf_name=f"scale_func_{pipe_num}"
)
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, scale_func_wf, 'inputspec.func')
outputs = {
'desc-preproc_bold': (scale_func_wf, 'outputspec.scaled_func')
}
return (wf, outputs)
def func_truncate(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_truncate",
"config": ["functional_preproc", "truncation"],
"switch": "None",
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"desc-preproc_bold": {
"Description": "Truncated functional time-series BOLD data."
}}
}
'''
# if cfg.functional_preproc['truncation']['start_tr'] == 0 and \
# cfg.functional_preproc['truncation']['stop_tr'] == None:
# data, key = strat_pool.get_data(["desc-preproc_bold", "bold"],
# True)
# outputs = {key: data}
# return (wf, outputs)
trunc_wf = create_wf_edit_func(
wf_name=f"edit_func_{pipe_num}"
)
trunc_wf.inputs.inputspec.start_idx = cfg.functional_preproc[
'truncation']['start_tr']
trunc_wf.inputs.inputspec.stop_idx = cfg.functional_preproc['truncation'][
'stop_tr']
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, trunc_wf, 'inputspec.func')
outputs = {
'desc-preproc_bold': (trunc_wf, 'outputspec.edited_func')
}
return (wf, outputs)
def func_despike(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_despike",
"config": ["functional_preproc", "despiking"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"desc-preproc_bold": {
"Description": "De-spiked BOLD time-series via AFNI 3dDespike."
}}
}
'''
despike = pe.Node(interface=preprocess.Despike(),
name=f'func_despiked_{pipe_num}',
mem_gb=0.66,
mem_x=(8251808479088459 / 1208925819614629174706176,
'in_file'))
despike.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, despike, 'in_file')
outputs = {
'desc-preproc_bold': (despike, 'out_file')
}
return (wf, outputs)
def func_slice_time(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_slice_time",
"config": ["functional_preproc", "slice_timing_correction"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"],
"TR",
"tpattern"],
"outputs": {
"desc-preproc_bold": {
"Description": "Slice-time corrected BOLD time-series via AFNI 3dTShift."
},
"desc-stc_bold": {
"Description": "Slice-time corrected BOLD time-series via AFNI 3dTShift."}}
}
'''
slice_time = slice_timing_wf(name='func_slice_timing_correction_'
f'{pipe_num}',
tpattern=cfg.functional_preproc[
'slice_timing_correction']['tpattern'],
tzero=cfg.functional_preproc[
'slice_timing_correction']['tzero'])
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, slice_time, 'inputspec.func_ts')
node, out = strat_pool.get_data('TR')
wf.connect(node, out, slice_time, 'inputspec.tr')
node, out = strat_pool.get_data('tpattern')
wf.connect(node, out, slice_time, 'inputspec.tpattern')
outputs = {
'desc-preproc_bold': (slice_time, 'outputspec.slice_time_corrected'),
'desc-stc_bold': (slice_time, 'outputspec.slice_time_corrected')
}
return (wf, outputs)
def func_reorient(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_reorient",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold", "desc-reorient_bold"]}
'''
func_deoblique = pe.Node(interface=afni_utils.Refit(),
name=f'func_deoblique_{pipe_num}',
mem_gb=0.68,
mem_x=(4664065662093477 /
1208925819614629174706176,
'in_file'))
func_deoblique.inputs.deoblique = True
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, func_deoblique, 'in_file')
func_reorient = pe.Node(interface=afni_utils.Resample(),
name=f'func_reorient_{pipe_num}',
mem_gb=0.68,
mem_x=(9005234470657405 /
1208925819614629174706176,
'in_file'))
func_reorient.inputs.orientation = 'RPI'
func_reorient.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_deoblique, 'out_file', func_reorient, 'in_file')
outputs = {
'desc-preproc_bold': (func_reorient, 'out_file'),
'desc-reorient_bold': (func_reorient, 'out_file')
}
return (wf, outputs)
def get_motion_ref(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "get_motion_ref",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "motion_correction_reference",
"option_val": ["mean", "median", "selected_volume", "fmriprep_reference"],
"inputs": [["desc-preproc_bold", "bold"],
"bold"],
"outputs": ["motion-basefile"]}
'''
if opt != 'mean' and opt != 'median' and opt != 'selected_volume' and opt != 'fmriprep_reference':
raise Exception("\n\n[!] Error: The 'tool' parameter of the "
"'motion_correction_reference' workflow must be either "
"'mean' or 'median' or 'selected_volume' or 'fmriprep_reference'.\n\nTool input: "
"{0}\n\n".format(opt))
if opt == 'mean':
func_get_RPI = pe.Node(interface=afni_utils.TStat(),
name=f'func_get_mean_RPI_{pipe_num}',
mem_gb=0.48,
mem_x=(1435097126797993 /
302231454903657293676544,
'in_file'))
func_get_RPI.inputs.options = '-mean'
func_get_RPI.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, func_get_RPI, 'in_file')
elif opt == 'median':
func_get_RPI = pe.Node(interface=afni_utils.TStat(),
name=f'func_get_median_RPI_{pipe_num}')
func_get_RPI.inputs.options = '-median'
func_get_RPI.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, func_get_RPI, 'in_file')
elif opt == 'selected_volume':
func_get_RPI = pe.Node(interface=afni.Calc(),
name=f'func_get_selected_RPI_{pipe_num}')
func_get_RPI.inputs.set(
expr='a',
single_idx=cfg.functional_preproc['motion_estimates_and_correction'][
'motion_correction']['motion_correction_reference_volume'],
outputtype='NIFTI_GZ'
)
node, out = strat_pool.get_data(['desc-preproc_bold', 'bold'])
wf.connect(node, out, func_get_RPI, 'in_file_a')
elif opt == 'fmriprep_reference':
func_get_RPI = pe.Node(util.Function(input_names=['in_file'],
output_names=['out_file'],
function=estimate_reference_image),
name=f'func_get_fmriprep_ref_{pipe_num}')
node, out = strat_pool.get_data('bold')
wf.connect(node, out, func_get_RPI, 'in_file')
outputs = {
'motion-basefile': (func_get_RPI, 'out_file')
}
return (wf, outputs)
def func_motion_correct(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "motion_correction",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["desc-preproc_bold",
"desc-motion_bold",
"max-displacement",
"rels-displacement",
"movement-parameters",
"coordinate-transformation"]}
'''
wf, outputs = motion_correct_connections(wf, cfg, strat_pool, pipe_num,
opt)
return (wf, outputs)
def func_motion_estimates(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "motion_estimates",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["max-displacement",
"rels-displacement",
"movement-parameters",
"coordinate-transformation"]}
'''
wf, wf_outputs = motion_correct_connections(wf, cfg, strat_pool, pipe_num,
opt)
outputs = {
'max-displacement': wf_outputs['max-displacement'],
'movement-parameters': wf_outputs['movement-parameters']
}
if 'coordinate-transformation' in wf_outputs:
outputs['coordinate-transformation'] = \
wf_outputs['coordinate-transformation']
if 'rels-displacement' in wf_outputs:
outputs['rels-displacement'] = wf_outputs['rels-displacement']
return (wf, outputs)
def func_motion_correct_only(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "motion_correction_only",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["desc-preproc_bold",
"desc-motion_bold"]}
'''
wf, wf_outputs = motion_correct_connections(wf, cfg, strat_pool, pipe_num,
opt)
outputs = {
'desc-motion_bold': wf_outputs['desc-motion_bold']
}
return (wf, outputs)
def motion_estimate_filter(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "motion_estimate_filter",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_estimate_filter"],
"switch": ["run"],
"option_key": "filter_type",
"option_val": ["notch", "lowpass"],
"inputs": ["movement-parameters",
"TR"],
"outputs": ["movement-parameters",
"motion-filter-info",
"motion-filter-plot"]}
'''
notch_imports = ['import os', 'import numpy as np',
'from scipy.signal import iirnotch, lfilter, firwin, freqz',
'from matplotlib import pyplot as plt',
'from CPAC.func_preproc.utils import degrees_to_mm, mm_to_degrees']
notch = pe.Node(Function(input_names=['motion_params',
'filter_type',
'TR',
'fc_RR_min',
'fc_RR_max',
'center_freq',
'freq_bw',
'lowpass_cutoff',
'filter_order'],
output_names=[
'filtered_motion_params',
'filter_info',
'filter_plot'],
function=notch_filter_motion,
imports=notch_imports),
name=f'filter_motion_params_{pipe_num}')
notch.inputs.filter_type = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['filter_type']
notch.inputs.fc_RR_min = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['breathing_rate_min']
notch.inputs.fc_RR_max = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['breathing_rate_max']
notch.inputs.center_freq = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['center_frequency']
notch.inputs.freq_bw = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['filter_bandwidth']
notch.inputs.lowpass_cutoff = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['lowpass_cutoff']
notch.inputs.filter_order = cfg.functional_preproc[
"motion_estimates_and_correction"][
"motion_estimate_filter"]['filter_order']
node, out = strat_pool.get_data('movement-parameters')
wf.connect(node, out, notch, 'motion_params')
node, out = strat_pool.get_data('TR')
wf.connect(node, out, notch, 'TR')
outputs = {
'motion-filter-info': (notch, 'filter_info'),
'motion-filter-plot': (notch, 'filter_plot'),
'movement-parameters': (notch, 'filtered_motion_params')
}
return (wf, outputs)
def calc_motion_stats(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "calc_motion_stats",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "motion_estimates_and_correction",
"motion_estimates", "calculate_motion_after"]],
"option_key": "None",
"option_val": "None",
"inputs": [("desc-motion_bold",
"space-bold_desc-brain_mask",
"movement-parameters",
"max-displacement",
"rels-displacement",
"coordinate-transformation"),
"subject",
"scan"],
"outputs": ["framewise-displacement-power",
"framewise-displacement-jenkinson",
"dvars",
"power-params",
"motion-params"]}
'''
motion_prov = strat_pool.get_cpac_provenance('movement-parameters')
motion_correct_tool = check_prov_for_motion_tool(motion_prov)
gen_motion_stats = motion_power_statistics(
name=f'gen_motion_stats_{pipe_num}',
motion_correct_tool=motion_correct_tool)
# Special case where the workflow is not getting outputs from
# resource pool but is connected to functional datasource
node, out_file = strat_pool.get_data('subject')
wf.connect(node, out_file,
gen_motion_stats, 'inputspec.subject_id')
node, out_file = strat_pool.get_data('scan')
wf.connect(node, out_file,
gen_motion_stats, 'inputspec.scan_id')
node, out_file = strat_pool.get_data("desc-motion_bold")
wf.connect(node, out_file,
gen_motion_stats, 'inputspec.motion_correct')
node, out_file = strat_pool.get_data('space-bold_desc-brain_mask')
wf.connect(node, out_file,
gen_motion_stats, 'inputspec.mask')
node, out_file = strat_pool.get_data('movement-parameters')
wf.connect(node, out_file,
gen_motion_stats,
'inputspec.movement_parameters')
node, out_file = strat_pool.get_data('max-displacement')
wf.connect(node, out_file,
gen_motion_stats,
'inputspec.max_displacement')
if strat_pool.check_rpool('rels-displacement'):
node, out_file = strat_pool.get_data('rels-displacement')
wf.connect(node, out_file, gen_motion_stats,
'inputspec.rels_displacement')
if strat_pool.check_rpool('coordinate-transformation'):
node, out_file = strat_pool.get_data('coordinate-transformation')
wf.connect(node, out_file, gen_motion_stats,
'inputspec.transformations')
outputs = {
'framewise-displacement-power':
(gen_motion_stats, 'outputspec.FDP_1D'),
'framewise-displacement-jenkinson':
(gen_motion_stats, 'outputspec.FDJ_1D'),
'dvars': (gen_motion_stats, 'outputspec.DVARS_1D'),
'power-params': (gen_motion_stats, 'outputspec.power_params'),
'motion-params': (gen_motion_stats, 'outputspec.motion_params')
}
return (wf, outputs)
def bold_mask_afni(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "bold_mask_afni",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "AFNI",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"space-bold_desc-brain_mask": {
"Description": "Binary brain mask of the BOLD functional time-series
created by AFNI 3dAutomask."}}
}
'''
func_get_brain_mask = pe.Node(interface=preprocess.Automask(),
name=f'func_get_brain_mask_AFNI_{pipe_num}')
func_get_brain_mask.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-preproc_bold",
"bold"])
wf.connect(node, out, func_get_brain_mask, 'in_file')
outputs = {
'space-bold_desc-brain_mask': (func_get_brain_mask, 'out_file')
}
return (wf, outputs)
def bold_mask_fsl(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "bold_mask_fsl",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "FSL",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["space-bold_desc-brain_mask"]}
'''
inputnode_bet = pe.Node(
util.IdentityInterface(fields=['frac',
'mesh_boolean',
'outline',
'padding',
'radius',
'reduce_bias',
'remove_eyes',
'robust',
'skull',
'surfaces',
'threshold',
'vertical_gradient']),
name=f'BET_options_{pipe_num}')
func_get_brain_mask = pe.Node(interface=fsl.BET(),
name=f'func_get_brain_mask_BET_{pipe_num}')
func_get_brain_mask.inputs.output_type = 'NIFTI_GZ'
func_get_brain_mask.inputs.mask = True
inputnode_bet.inputs.set(
frac=cfg.functional_preproc['func_masking']['FSL-BET']['frac'],
mesh_boolean=cfg.functional_preproc['func_masking']['FSL-BET'][
'mesh_boolean'],
outline=cfg.functional_preproc['func_masking']['FSL-BET'][
'outline'],
padding=cfg.functional_preproc['func_masking']['FSL-BET'][
'padding'],
radius=cfg.functional_preproc['func_masking']['FSL-BET']['radius'],
reduce_bias=cfg.functional_preproc['func_masking']['FSL-BET'][
'reduce_bias'],
remove_eyes=cfg.functional_preproc['func_masking']['FSL-BET'][
'remove_eyes'],
robust=cfg.functional_preproc['func_masking']['FSL-BET']['robust'],
skull=cfg.functional_preproc['func_masking']['FSL-BET']['skull'],
surfaces=cfg.functional_preproc['func_masking']['FSL-BET'][
'surfaces'],
threshold=cfg.functional_preproc['func_masking']['FSL-BET'][
'threshold'],
vertical_gradient=
cfg.functional_preproc['func_masking']['FSL-BET'][
'vertical_gradient'],
)
wf.connect([
(inputnode_bet, func_get_brain_mask, [
('frac', 'frac'),
('mesh_boolean', 'mesh'),
('outline', 'outline'),
('padding', 'padding'),
('radius', 'radius'),
('reduce_bias', 'reduce_bias'),
('remove_eyes', 'remove_eyes'),
('robust', 'robust'),
('skull', 'skull'),
('surfaces', 'surfaces'),
('threshold', 'threshold'),
('vertical_gradient', 'vertical_gradient'),
])
])
if cfg.functional_preproc['func_masking']['FSL-BET'][
'functional_mean_boolean']:
func_skull_mean = pe.Node(interface=afni_utils.TStat(),
name=f'func_mean_skull_{pipe_num}')
func_skull_mean.inputs.options = '-mean'
func_skull_mean.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_skull_mean, 'in_file')
out_node, out_file = (func_skull_mean, 'out_file')
if cfg.functional_preproc['func_masking']['FSL-BET'][
'functional_mean_thr']['run']:
# T=$(fslstats ${subject}_tmean.nii.gz -p 98)
threshold_T = pe.Node(interface=fsl.ImageStats(),
name=f'func_mean_skull_thr_value_{pipe_num}',
iterfield=['in_file'])
threshold_T.inputs.op_string = "-p %f " % (cfg.functional_preproc['func_masking']['FSL-BET']['functional_mean_thr']['threshold_value'])
wf.connect(func_skull_mean, 'out_file', threshold_T, 'in_file')
# z=$(echo "$T / 10" | bc -l)
def form_thr_string(thr):
threshold_z = str(float(thr/10))
return '-thr %s' % (threshold_z)
form_thr_string = pe.Node(util.Function(input_names=['thr'],
output_names=['out_str'],
function=form_thr_string),
name=f'form_thr_string_{pipe_num}')
wf.connect(threshold_T, 'out_stat', form_thr_string, 'thr')
# fslmaths ${subject}_tmean.nii.gz -thr ${z} ${subject}_tmean_thr.nii.gz
func_skull_mean_thr = pe.Node(interface=fsl.ImageMaths(),
name=f'func_mean_skull_thr_{pipe_num}')
wf.connect(func_skull_mean, 'out_file', func_skull_mean_thr, 'in_file')
wf.connect(form_thr_string, 'out_str', func_skull_mean_thr, 'op_string')
out_node, out_file = (func_skull_mean_thr, 'out_file')
if cfg.functional_preproc['func_masking']['FSL-BET'][
'functional_mean_bias_correction']:
# fast --nopve -B ${subject}_tmean_thr.nii.gz
func_mean_skull_fast = pe.Node(interface=fsl.FAST(),
name=f'func_mean_skull_fast_{pipe_num}')
func_mean_skull_fast.inputs.no_pve = True
func_mean_skull_fast.inputs.output_biascorrected = True
wf.connect(out_node, out_file, func_mean_skull_fast, 'in_files')
out_node, out_file = (func_mean_skull_fast, 'restored_image')
wf.connect(out_node, out_file, func_get_brain_mask, 'in_file')
else:
func_get_brain_mask.inputs.functional = True
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_get_brain_mask, 'in_file')
# erode one voxel of functional brian mask
erode_one_voxel = pe.Node(interface=fsl.ErodeImage(),
name=f'erode_one_voxel_{pipe_num}')
erode_one_voxel.inputs.kernel_shape = 'box'
erode_one_voxel.inputs.kernel_size = 1.0
wf.connect(func_get_brain_mask, 'mask_file',
erode_one_voxel, 'in_file')
outputs = {
'space-bold_desc-brain_mask': (erode_one_voxel, 'out_file')
}
return (wf, outputs)
def bold_mask_fsl_afni(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "bold_mask_fsl_afni",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "FSL_AFNI",
"inputs": [["desc-motion_bold", "desc-preproc_bold", "bold"],
"motion-basefile"],
"outputs": ["space-bold_desc-brain_mask",
"desc-ref_bold"]}
'''
# fMRIPrep-style BOLD mask
# Ref: https://github.com/nipreps/niworkflows/blob/maint/1.3.x/niworkflows/func/util.py#L246-L514
# Initialize transforms with antsAI
init_aff = pe.Node(
AI(
metric=("Mattes", 32, "Regular", 0.2),
transform=("Affine", 0.1),
search_factor=(20, 0.12),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True,
),
name=f"init_aff_{pipe_num}",
n_procs=cfg.pipeline_setup['system_config']['num_OMP_threads'],
)
init_aff.inputs.fixed_image = cfg.functional_preproc[
'func_masking']['FSL_AFNI']['bold_ref']
init_aff.inputs.fixed_image_mask = cfg.functional_preproc[
'func_masking']['FSL_AFNI']['brain_mask']
init_aff.inputs.search_grid = (40, (0, 40, 40))
# Set up spatial normalization
norm = pe.Node(
ants.Registration(
winsorize_upper_quantile=0.98,
winsorize_lower_quantile=0.05,
float=True,
metric=['Mattes'],
metric_weight=[1],
radius_or_number_of_bins=[64],
transforms=['Affine'],
transform_parameters=[[0.1]],
number_of_iterations=[[200]],
convergence_window_size=[10],
convergence_threshold=[1.e-9],
sampling_strategy=['Random', 'Random'],
smoothing_sigmas=[[2]],
sigma_units=['mm', 'mm', 'mm'],
shrink_factors=[[2]],
sampling_percentage=[0.2],
use_histogram_matching=[True],
use_estimate_learning_rate_once=[True]
),
name=f"norm_{pipe_num}",
n_procs=cfg.pipeline_setup['system_config']['num_OMP_threads'],
)
norm.inputs.fixed_image = cfg.functional_preproc[
'func_masking']['FSL_AFNI']['bold_ref']
map_brainmask = pe.Node(
ants.ApplyTransforms(
interpolation="BSpline",
float=True,
),
name=f"map_brainmask_{pipe_num}",
)
# Use the higher resolution and probseg for numerical stability in rounding
map_brainmask.inputs.input_image = cfg.functional_preproc[
'func_masking']['FSL_AFNI']['brain_probseg']
binarize_mask = pe.Node(interface=fsl.maths.MathsCommand(),
name=f'binarize_mask_{pipe_num}')
binarize_mask.inputs.args = '-thr 0.85 -bin'
# Dilate pre_mask
pre_dilate = pe.Node(
fsl.DilateImage(
operation="max",
kernel_shape="sphere",
kernel_size=3.0,
internal_datatype="char",
),
name=f"pre_mask_dilate_{pipe_num}",
)
# Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
n4_correct = pe.Node(
ants.N4BiasFieldCorrection(
dimension=3, copy_header=True, bspline_fitting_distance=200
),
shrink_factor=2,
rescale_intensities = True,
name=f"n4_correct_{pipe_num}",
n_procs=1,
)
skullstrip_first_pass = pe.Node(
fsl.BET(frac=0.2, mask=True, functional=False),
name=f'skullstrip_first_pass_{pipe_num}')
bet_dilate = pe.Node(
fsl.DilateImage(operation='max', kernel_shape='sphere',
kernel_size=6.0, internal_datatype='char'),
name=f'skullstrip_first_dilate_{pipe_num}')
bet_mask = pe.Node(fsl.ApplyMask(), name=f'skullstrip_first_mask_'
f'{pipe_num}')
unifize = pe.Node(afni_utils.Unifize(t2=True, outputtype='NIFTI_GZ',
args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
out_file="uni.nii.gz"),
name=f'unifize_{pipe_num}')
skullstrip_second_pass = pe.Node(
preprocess.Automask(dilate=1, outputtype='NIFTI_GZ'),
name=f'skullstrip_second_pass_{pipe_num}')
combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'),
name=f'combine_masks_{pipe_num}')
apply_mask = pe.Node(fsl.ApplyMask(),
name=f'extract_ref_brain_bold_{pipe_num}')
node, out = strat_pool.get_data(["motion-basefile"])
wf.connect([(node, init_aff, [(out, "moving_image")]),
(node, map_brainmask, [(out, "reference_image")]),
(node, norm, [(out, "moving_image")]),
(init_aff, norm, [("output_transform", "initial_moving_transform")]),
(norm, map_brainmask, [
("reverse_invert_flags", "invert_transform_flags"),
("reverse_transforms", "transforms"),
]),
(map_brainmask, binarize_mask, [("output_image", "in_file")]),
(binarize_mask, pre_dilate, [("out_file", "in_file")]),
(pre_dilate, n4_correct, [("out_file", "mask_image")]),
(node, n4_correct, [(out, "input_image")]),
(n4_correct, skullstrip_first_pass,
[('output_image', 'in_file')]),
(skullstrip_first_pass, bet_dilate,
[('mask_file', 'in_file')]),
(bet_dilate, bet_mask, [('out_file', 'mask_file')]),
(skullstrip_first_pass, bet_mask, [('out_file', 'in_file')]),
(bet_mask, unifize, [('out_file', 'in_file')]),
(unifize, skullstrip_second_pass, [('out_file', 'in_file')]),
(skullstrip_first_pass, combine_masks,
[('mask_file', 'in_file')]),
(skullstrip_second_pass, combine_masks,
[('out_file', 'operand_file')]),
(unifize, apply_mask, [('out_file', 'in_file')]),
(combine_masks, apply_mask, [('out_file', 'mask_file')]),
])
outputs = {
'space-bold_desc-brain_mask': (combine_masks, 'out_file'),
'desc-ref_bold': (apply_mask, 'out_file')
}
return (wf, outputs)
def bold_mask_anatomical_refined(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "bold_mask_anatomical_refined",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Refined",
"inputs": ["bold",
["desc-preproc_bold", "bold"],
"desc-brain_T1w",
"space-T1w_desc-brain_mask"],
"outputs": ["space-bold_desc-brain_mask"]}
'''
# binarize anat mask, in case it is not a binary mask.
anat_brain_mask_bin = pe.Node(interface=fsl.ImageMaths(),
name=f'anat_brain_mask_bin_{pipe_num}')
anat_brain_mask_bin.inputs.op_string = '-bin'
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
wf.connect(node, out, anat_brain_mask_bin, 'in_file')
# fill holes of anat mask
anat_mask_filled = pe.Node(interface=afni.MaskTool(),
name=f'anat_brain_mask_filled_{pipe_num}')
anat_mask_filled.inputs.fill_holes = True
anat_mask_filled.inputs.outputtype = 'NIFTI_GZ'
wf.connect(anat_brain_mask_bin, 'out_file',
anat_mask_filled, 'in_file')
# init_bold_mask : input raw func
init_bold_mask = anat_refined_mask(init_bold_mask=True,
wf_name=f'init_bold_mask_{pipe_num}')
func_deoblique = pe.Node(interface=afni_utils.Refit(),
name=f'raw_func_deoblique_{pipe_num}')
func_deoblique.inputs.deoblique = True
node, out = strat_pool.get_data('bold')
wf.connect(node, out, func_deoblique, 'in_file')
func_reorient = pe.Node(interface=afni_utils.Resample(),
name=f'raw_func_reorient_{pipe_num}')
func_reorient.inputs.orientation = 'RPI'
func_reorient.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_deoblique, 'out_file',
func_reorient, 'in_file')
wf.connect(func_reorient, 'out_file',
init_bold_mask, 'inputspec.func')
wf.connect(anat_mask_filled, 'out_file',
init_bold_mask, 'inputspec.anatomical_brain_mask')
node, out = strat_pool.get_data('desc-brain_T1w')
wf.connect(node, out, init_bold_mask, 'inputspec.anat_brain')
# dilate init func brain mask
func_tmp_brain_mask = pe.Node(interface=fsl.ImageMaths(),
name=f'func_tmp_brain_mask_dil_{pipe_num}')
func_tmp_brain_mask.inputs.op_string = '-dilM'
wf.connect(init_bold_mask, 'outputspec.func_brain_mask',
func_tmp_brain_mask, 'in_file')
# refined_bold_mask : input motion corrected func
refined_bold_mask = anat_refined_mask(init_bold_mask=False,
wf_name='refined_bold_mask'
f'_{pipe_num}')
node, out = strat_pool.get_data(["desc-preproc_bold",
"bold"])
wf.connect(node, out, refined_bold_mask, 'inputspec.func')
node, out = strat_pool.get_data('desc-brain_T1w')
wf.connect(node, out, refined_bold_mask, 'inputspec.anat_brain')
wf.connect(func_tmp_brain_mask, 'out_file',
refined_bold_mask, 'inputspec.init_func_brain_mask')
# dilate anatomical mask
if cfg.functional_preproc['func_masking']['Anatomical_Refined'][
'anatomical_mask_dilation']:
anat_mask_dilate = pe.Node(interface=afni.MaskTool(),
name=f'anat_mask_dilate_{pipe_num}')
anat_mask_dilate.inputs.dilate_inputs = '1'
anat_mask_dilate.inputs.outputtype = 'NIFTI_GZ'
wf.connect(anat_mask_filled, 'out_file',
anat_mask_dilate, 'in_file')
wf.connect(anat_mask_dilate, 'out_file',
refined_bold_mask, 'inputspec.anatomical_brain_mask')
else:
wf.connect(anat_mask_filled, 'out_file',
refined_bold_mask, 'inputspec.anatomical_brain_mask')
# get final func mask
func_mask_final = pe.Node(interface=fsl.MultiImageMaths(),
name=f'func_mask_final_{pipe_num}')
func_mask_final.inputs.op_string = "-mul %s"
wf.connect(func_tmp_brain_mask, 'out_file',
func_mask_final, 'in_file')
wf.connect(refined_bold_mask, 'outputspec.func_brain_mask',
func_mask_final, 'operand_files')
outputs = {
'space-bold_desc-brain_mask': (func_mask_final, 'out_file')
}
return (wf, outputs)
def bold_mask_anatomical_based(wf, cfg, strat_pool, pipe_num, opt=None):
'''Generate the BOLD mask by basing it off of the anatomical brain mask.
Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.
https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
Node Block:
{"name": "bold_mask_anatomical_based",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Based",
"inputs": [["desc-preproc_bold", "bold"],
"desc-brain_T1w",
["desc-preproc_T1w", "desc-reorient_T1w", "T1w"]],
"outputs": ["space-bold_desc-brain_mask"]}
'''
# 0. Take single volume of func
func_single_volume = pe.Node(interface=afni.Calc(),
name='func_single_volume')
func_single_volume.inputs.set(
expr='a',
single_idx=1,
outputtype='NIFTI_GZ'
)
node, out = strat_pool.get_data(["desc-preproc_bold",
"bold"])
wf.connect(node, out, func_single_volume, 'in_file_a')
# 1. Register func head to anat head to get func2anat matrix
linear_reg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='func_to_anat_linear_reg')
linear_reg_func_to_anat.inputs.dof = 6
linear_reg_func_to_anat.inputs.interp = 'spline'
linear_reg_func_to_anat.inputs.searchr_x = [30, 30]
linear_reg_func_to_anat.inputs.searchr_y = [30, 30]
linear_reg_func_to_anat.inputs.searchr_z = [30, 30]
wf.connect(func_single_volume, 'out_file',
linear_reg_func_to_anat, 'in_file')
node, out = strat_pool.get_data(["desc-preproc_T1w", "desc-reorient_T1w",
"T1w"])
wf.connect(node, out, linear_reg_func_to_anat, 'reference')
# 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
inv_func_to_anat_affine, 'in_file')
# 3. get BOLD mask
# 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
reg_anat_brain_to_func = pe.Node(interface=fsl.ApplyWarp(),
name='reg_anat_brain_to_func')
reg_anat_brain_to_func.inputs.interp = 'nn'
reg_anat_brain_to_func.inputs.relwarp = True
node, out = strat_pool.get_data("desc-brain_T1w")
wf.connect(node, out, reg_anat_brain_to_func, 'in_file')
node, out = strat_pool.get_data(["desc-preproc_bold",
"bold"])
wf.connect(node, out, reg_anat_brain_to_func, 'ref_file')
wf.connect(inv_func_to_anat_affine, 'out_file',
reg_anat_brain_to_func, 'premat')
# 3.2 Binarize transfered image
func_mask_bin = pe.Node(interface=fsl.ImageMaths(),
name='func_mask_bin')
func_mask_bin.inputs.op_string = '-abs -bin'
wf.connect(reg_anat_brain_to_func, 'out_file',
func_mask_bin, 'in_file')
# 3.3 Fill holes to get BOLD mask
func_mask_fill_holes = pe.Node(interface=afni.MaskTool(),
name='func_mask_fill_holes')
func_mask_fill_holes.inputs.fill_holes = True
func_mask_fill_holes.inputs.outputtype = 'NIFTI_GZ'
wf.connect(func_mask_bin, 'out_file',
func_mask_fill_holes, 'in_file')
outputs = {
'space-bold_desc-brain_mask': (func_mask_fill_holes, 'out_file')
}
return (wf, outputs)
def bold_mask_anatomical_resampled(wf, cfg, strat_pool, pipe_num, opt=None):
'''Resample anatomical brain mask in standard space to get BOLD brain mask in standard space
Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.
https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/OneStepResampling.sh#L121-L132
Node Block:
{"name": "bold_mask_anatomical_resampled",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Resampled",
"inputs": [["desc-preproc_bold", "bold"],
"T1w-template-funcreg",
"space-template_desc-brain_T1w",
"space-template_desc-T1w_mask"],
"outputs": ["space-template_res-bold_desc-brain_T1w",
"space-template_desc-bold_mask",
"space-bold_desc-brain_mask"]}
'''
# applywarp --rel --interp=spline -i ${T1wImage} -r ${ResampRefIm} --premat=$FSLDIR/etc/flirtsch/ident.mat -o ${WD}/${T1wImageFile}.${FinalfMRIResolution}
anat_brain_to_func_res = pe.Node(interface=fsl.ApplyWarp(),
name=f'resample_anat_brain_in_standard_{pipe_num}')
anat_brain_to_func_res.inputs.interp = 'spline'
anat_brain_to_func_res.inputs.premat = cfg.registration_workflows[
'anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']
node, out = strat_pool.get_data('space-template_desc-brain_T1w')
wf.connect(node, out, anat_brain_to_func_res, 'in_file')
node, out = strat_pool.get_data('T1w-template-funcreg')
wf.connect(node, out, anat_brain_to_func_res, 'ref_file')
# Create brain masks in this space from the FreeSurfer output (changing resolution)
# applywarp --rel --interp=nn -i ${FreeSurferBrainMask}.nii.gz -r ${WD}/${T1wImageFile}.${FinalfMRIResolution} --premat=$FSLDIR/etc/flirtsch/ident.mat -o ${WD}/${FreeSurferBrainMaskFile}.${FinalfMRIResolution}.nii.gz
anat_brain_mask_to_func_res = pe.Node(interface=fsl.ApplyWarp(),
name=f'resample_anat_brain_mask_in_standard_{pipe_num}')
anat_brain_mask_to_func_res.inputs.interp = 'nn'
anat_brain_mask_to_func_res.inputs.premat = cfg.registration_workflows[
'anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']
node, out = strat_pool.get_data('space-template_desc-T1w_mask')
wf.connect(node, out, anat_brain_mask_to_func_res, 'in_file')
wf.connect(anat_brain_to_func_res, 'out_file',
anat_brain_mask_to_func_res, 'ref_file')
# Resample func mask in template space back to native space
func_mask_template_to_native = pe.Node(interface=afni.Resample(),
name=f'resample_func_mask_to_native_{pipe_num}')
func_mask_template_to_native.inputs.resample_mode = 'NN'
func_mask_template_to_native.inputs.outputtype = 'NIFTI_GZ'
wf.connect(anat_brain_mask_to_func_res, 'out_file',
func_mask_template_to_native, 'in_file')
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_mask_template_to_native, 'master')
outputs = {
'space-template_res-bold_desc-brain_T1w': (anat_brain_to_func_res, 'out_file'),
'space-template_desc-bold_mask': (anat_brain_mask_to_func_res, 'out_file'),
"space-bold_desc-brain_mask": (func_mask_template_to_native, 'out_file')
}
return (wf, outputs)
def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None):
'''Generate the BOLD mask by basing it off of the anatomical brain.
Adapted from the BOLD mask method from the CCS pipeline.
https://github.com/TingsterX/CCS/blob/master/ccs_01_funcpreproc.sh#L89-L110
Node Block:
{"name": "bold_mask_ccs",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "CCS_Anatomical_Refined",
"inputs": [["desc-motion_bold", "desc-preproc_bold", "bold"],
"desc-brain_T1w",
["desc-preproc_T1w", "desc-reorient_T1w", "T1w"]],
"outputs": ["space-bold_desc-brain_mask",
"desc-ROIbrain_bold"]}
'''
# Run 3dAutomask to generate func initial mask
func_tmp_brain_mask = pe.Node(interface=preprocess.Automask(),
name=f'func_tmp_brain_mask_AFNI_{pipe_num}')
func_tmp_brain_mask.inputs.dilate = 1
func_tmp_brain_mask.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-motion_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, func_tmp_brain_mask, 'in_file')
# Extract 8th volume as func ROI
func_roi = pe.Node(interface=fsl.ExtractROI(),
name=f'extract_func_roi_{pipe_num}')
func_roi.inputs.t_min = 7
func_roi.inputs.t_size = 1
node, out = strat_pool.get_data(["desc-motion_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, func_roi, 'in_file')
# Apply func initial mask on func ROI volume
func_tmp_brain = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'get_func_tmp_brain_{pipe_num}')
wf.connect(func_roi, 'roi_file',
func_tmp_brain, 'in_file')
wf.connect(func_tmp_brain_mask, 'out_file',
func_tmp_brain, 'mask_file')
# Register func tmp brain to anat brain to get func2anat matrix
reg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name=f'func_to_anat_linear_reg_{pipe_num}')
reg_func_to_anat.inputs.interp = 'trilinear'
reg_func_to_anat.inputs.cost = 'corratio'
reg_func_to_anat.inputs.dof = 6
wf.connect(func_tmp_brain, 'out_file',
reg_func_to_anat, 'in_file')
node, out = strat_pool.get_data("desc-brain_T1w")
wf.connect(node, out, reg_func_to_anat, 'reference')
# Inverse func2anat matrix
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name=f'inv_func2anat_affine_{pipe_num}')
inv_func_to_anat_affine.inputs.invert_xfm = True
wf.connect(reg_func_to_anat, 'out_matrix_file',
inv_func_to_anat_affine, 'in_file')
# Transform anat brain to func space
reg_anat_brain_to_func = pe.Node(interface=fsl.FLIRT(),
name=f'reg_anat_brain_to_func_{pipe_num}')
reg_anat_brain_to_func.inputs.apply_xfm = True
reg_anat_brain_to_func.inputs.interp = 'trilinear'
node, out = strat_pool.get_data("desc-brain_T1w")
wf.connect(node, out, reg_anat_brain_to_func, 'in_file')
wf.connect(func_roi, 'roi_file',
reg_anat_brain_to_func, 'reference')
wf.connect(inv_func_to_anat_affine, 'out_file',
reg_anat_brain_to_func, 'in_matrix_file')
# Binarize and dilate anat brain in func space
bin_anat_brain_in_func = pe.Node(interface=fsl.ImageMaths(),
name=f'bin_anat_brain_in_func_{pipe_num}')
bin_anat_brain_in_func.inputs.op_string = '-bin -dilM'
wf.connect(reg_anat_brain_to_func, 'out_file',
bin_anat_brain_in_func, 'in_file')
# Binarize detectable func signals
bin_func = pe.Node(interface=fsl.ImageMaths(),
name=f'bin_func_{pipe_num}')
bin_func.inputs.op_string = '-Tstd -bin'
node, out = strat_pool.get_data(["desc-motion_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, bin_func, 'in_file')
# Take intersection of masks
merge_func_mask = pe.Node(util.Merge(2),
name=f'merge_func_mask_{pipe_num}')
wf.connect(func_tmp_brain_mask, 'out_file',
merge_func_mask, 'in1')
wf.connect(bin_anat_brain_in_func, 'out_file',
merge_func_mask, 'in2')
intersect_mask = pe.Node(interface=fsl.MultiImageMaths(),
name=f'intersect_mask_{pipe_num}')
intersect_mask.inputs.op_string = '-mul %s -mul %s'
intersect_mask.inputs.output_datatype = 'char'
wf.connect(bin_func, 'out_file',
intersect_mask, 'in_file')
wf.connect(merge_func_mask, 'out',
intersect_mask, 'operand_files')
# this is the func input for coreg in ccs
# TODO evaluate if it's necessary to use this brain
example_func_brain = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'get_example_func_brain_{pipe_num}')
wf.connect(func_roi, 'roi_file',
example_func_brain, 'in_file')
wf.connect(intersect_mask, 'out_file',
example_func_brain, 'mask_file')
outputs = {
'space-bold_desc-brain_mask': (intersect_mask, 'out_file'),
'desc-ROIbrain_bold': (example_func_brain, 'out_file')
}
return (wf, outputs)
def bold_masking(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "bold_masking",
"config": None,
"switch": [["functional_preproc", "run"],
["functional_preproc", "func_masking", "apply_func_mask_in_native_space"]],
"option_key": "None",
"option_val": "None",
"inputs": [(["desc-preproc_bold", "bold"],
"space-bold_desc-brain_mask")],
"outputs": {
"desc-preproc_bold": {
"Description": "The skull-stripped BOLD time-series.",
"SkullStripped": True},
"desc-brain_bold": {
"Description": "The skull-stripped BOLD time-series.",
"SkullStripped": True}}
}
'''
func_edge_detect = pe.Node(interface=afni_utils.Calc(),
name=f'func_extract_brain_{pipe_num}')
func_edge_detect.inputs.expr = 'a*b'
func_edge_detect.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-preproc_bold",
"bold"])
wf.connect(node, out, func_edge_detect, 'in_file_a')
node, out = strat_pool.get_data("space-bold_desc-brain_mask")
wf.connect(node, out, func_edge_detect, 'in_file_b')
outputs = {
'desc-preproc_bold': (func_edge_detect, 'out_file'),
'desc-brain_bold': (func_edge_detect, 'out_file')
}
return (wf, outputs)
def func_mean(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_mean",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "generate_func_mean", "run"]],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-mean_bold"]
}
'''
func_mean = pe.Node(interface=afni_utils.TStat(),
name=f'func_mean_{pipe_num}')
func_mean.inputs.options = '-mean'
func_mean.inputs.outputtype = 'NIFTI_GZ'
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_mean, 'in_file')
outputs = {
'desc-mean_bold': (func_mean, 'out_file')
}
return (wf, outputs)
def func_normalize(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_normalize",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "normalize_func", "run"]],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold"]}
'''
func_normalize = pe.Node(interface=fsl.ImageMaths(),
name=f'func_normalize_{pipe_num}',
mem_gb=0.7,
mem_x=(4538494663498653 /
604462909807314587353088, 'in_file'))
func_normalize.inputs.op_string = '-ing 10000'
func_normalize.inputs.out_data_type = 'float'
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_normalize, 'in_file')
outputs = {
'desc-preproc_bold': (func_normalize, 'out_file')
}
return (wf, outputs)
def func_mask_normalize(wf, cfg, strat_pool, pipe_num, opt=None):
'''
{"name": "func_mask_normalize",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [(["desc-preproc_bold", "bold"],
"space-bold_desc-brain_mask")],
"outputs": ["space-bold_desc-brain_mask"]}
'''
func_mask_normalize = pe.Node(interface=fsl.ImageMaths(),
name=f'func_mask_normalize_{pipe_num}',
mem_gb=0.7,
mem_x=(4538494663498653 /
604462909807314587353088, 'in_file'))
func_mask_normalize.inputs.op_string = '-Tmin -bin'
func_mask_normalize.inputs.out_data_type = 'char'
node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, func_mask_normalize, 'in_file')
outputs = {
'space-bold_desc-brain_mask': (func_mask_normalize, 'out_file')
}
return (wf, outputs)
|
scripts/ipa_to_sql.py | btrungchi/English-to-IPA | 195 | 12742188 | from eng_to_ipa import transcribe
import sqlite3
import re
from os.path import join, abspath, dirname
conn = sqlite3.connect(join(abspath(dirname(__file__)),
"../eng_to_ipa/resources/CMU_dict.db"))
c = conn.cursor()
def create_dictionary_table():
try:
c.execute("""CREATE TABLE eng_ipa
(id INTEGER PRIMARY KEY,
word text NOT NULL,
phonemes text NOT NULL,
ipa text NOT NULL
)""")
conn.commit()
except sqlite3.OperationalError:
c.execute("DROP TABLE eng_ipa;")
conn.commit()
create_dictionary_table()
def insert_dictionary_values():
"""takes the prepared data and places it into the database"""
dictionary_data = []
with open(join(abspath(dirname(__file__)), '..\\eng_to_ipa\\resources\\CMU_source_files/cmudict-0.7b.txt'),
encoding="UTF-8") as source_file:
for line in source_file.readlines():
word = re.sub(r"\(\d\)", "", line.split(" ")[0]).lower()
phonemes = line.split(" ")[1].replace("\n", "").lower()
ipa = transcribe.cmu_to_ipa([[phonemes]], stress_marking="both")[0][0]
dictionary_data.append((str(word), str(phonemes), str(ipa)))
c.executemany("INSERT INTO eng_ipa(word, phonemes, ipa) VALUES (?, ?, ?)", dictionary_data)
conn.commit()
if __name__ == "__main__":
# create_dictionary_table()
# insert_dictionary_values()
# test
c.execute("SELECT * FROM eng_ipa WHERE "
"REPLACE(REPLACE(ipa, 'ˌ', ''), 'ˈ', '') "
"LIKE \"%nstr%\"")
for r in c.fetchall():
print(str(r))
|
utest/resources/robotdata/libs/sub/libsi.py | ludovicurbain/SWIFT-RIDE | 775 | 12742200 | <filename>utest/resources/robotdata/libs/sub/libsi.py
def libsi_keyword():
print('libsi keyword')
|
babyai/rl/utils/supervised_losses.py | m-smith/babyai | 411 | 12742213 | import torch
import torch.nn.functional as F
import numpy
from babyai.rl.utils import DictList
# dictionary that defines what head is required for each extra info used for auxiliary supervision
required_heads = {'seen_state': 'binary',
'see_door': 'binary',
'see_obj': 'binary',
'obj_in_instr': 'binary',
'in_front_of_what': 'multiclass9', # multi class classifier with 9 possible classes
'visit_proportion': 'continuous01', # continous regressor with outputs in [0, 1]
'bot_action': 'binary'
}
class ExtraInfoCollector:
'''
This class, used in rl.algos.base, allows connecting the extra information from the environment, and the
corresponding predictions using the specific heads in the model. It transforms them so that they are easy to use
to evaluate losses
'''
def __init__(self, aux_info, shape, device):
self.aux_info = aux_info
self.shape = shape
self.device = device
self.collected_info = dict()
self.extra_predictions = dict()
for info in self.aux_info:
self.collected_info[info] = torch.zeros(*shape, device=self.device)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# we predict one number only
self.extra_predictions[info] = torch.zeros(*shape, 1, device=self.device)
elif required_heads[info].startswith('multiclass'):
# means that this is a multi-class classification and we need to predict the whole proba distr
n_classes = int(required_heads[info].replace('multiclass', ''))
self.extra_predictions[info] = torch.zeros(*shape, n_classes, device=self.device)
else:
raise ValueError("{} not supported".format(required_heads[info]))
def process(self, env_info):
# env_info is now a tuple of dicts
env_info = [{k: v for k, v in dic.items() if k in self.aux_info} for dic in env_info]
env_info = {k: [env_info[_][k] for _ in range(len(env_info))] for k in env_info[0].keys()}
# env_info is now a dict of lists
return env_info
def fill_dictionaries(self, index, env_info, extra_predictions):
for info in self.aux_info:
dtype = torch.long if required_heads[info].startswith('multiclass') else torch.float
self.collected_info[info][index] = torch.tensor(env_info[info], dtype=dtype, device=self.device)
self.extra_predictions[info][index] = extra_predictions[info]
def end_collection(self, exps):
collected_info = dict()
extra_predictions = dict()
for info in self.aux_info:
# T x P -> P x T -> P * T
collected_info[info] = self.collected_info[info].transpose(0, 1).reshape(-1)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# T x P x 1 -> P x T x 1 -> P * T
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1)
elif type(required_heads[info]) == int:
# T x P x k -> P x T x k -> (P * T) x k
k = required_heads[info] # number of classes
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1, k)
# convert the dicts to DictLists, and add them to the exps DictList.
exps.collected_info = DictList(collected_info)
exps.extra_predictions = DictList(extra_predictions)
return exps
class SupervisedLossUpdater:
'''
This class, used by PPO, allows the evaluation of the supervised loss when using extra information from the
environment. It also handles logging accuracies/L2 distances/etc...
'''
def __init__(self, aux_info, supervised_loss_coef, recurrence, device):
self.aux_info = aux_info
self.supervised_loss_coef = supervised_loss_coef
self.recurrence = recurrence
self.device = device
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def init_epoch(self):
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
def init_batch(self):
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def eval_subbatch(self, extra_predictions, sb):
supervised_loss = torch.tensor(0., device=self.device)
supervised_accuracy = torch.tensor(0., device=self.device)
supervised_L2_loss = torch.tensor(0., device=self.device)
supervised_prevalence = torch.tensor(0., device=self.device)
binary_classification_tasks = 0
classification_tasks = 0
regression_tasks = 0
for pos, info in enumerate(self.aux_info):
coef = self.supervised_loss_coef[pos]
pred = extra_predictions[info]
target = dict.__getitem__(sb.collected_info, info)
if required_heads[info] == 'binary':
binary_classification_tasks += 1
classification_tasks += 1
supervised_loss += coef * F.binary_cross_entropy_with_logits(pred.reshape(-1), target)
supervised_accuracy += ((pred.reshape(-1) > 0).float() == target).float().mean()
supervised_prevalence += target.mean()
elif required_heads[info].startswith('continuous'):
regression_tasks += 1
mse = F.mse_loss(pred.reshape(-1), target)
supervised_loss += coef * mse
supervised_L2_loss += mse
elif required_heads[info].startswith('multiclass'):
classification_tasks += 1
supervised_accuracy += (pred.argmax(1).float() == target).float().mean()
supervised_loss += coef * F.cross_entropy(pred, target.long())
else:
raise ValueError("{} not supported".format(required_heads[info]))
if binary_classification_tasks > 0:
supervised_prevalence /= binary_classification_tasks
else:
supervised_prevalence = torch.tensor(-1)
if classification_tasks > 0:
supervised_accuracy /= classification_tasks
else:
supervised_accuracy = torch.tensor(-1)
if regression_tasks > 0:
supervised_L2_loss /= regression_tasks
else:
supervised_L2_loss = torch.tensor(-1)
self.batch_supervised_loss += supervised_loss.item()
self.batch_supervised_accuracy += supervised_accuracy.item()
self.batch_supervised_L2_loss += supervised_L2_loss.item()
self.batch_supervised_prevalence += supervised_prevalence.item()
return supervised_loss
def update_batch_values(self):
self.batch_supervised_loss /= self.recurrence
self.batch_supervised_accuracy /= self.recurrence
self.batch_supervised_L2_loss /= self.recurrence
self.batch_supervised_prevalence /= self.recurrence
def update_epoch_logs(self):
self.log_supervised_losses.append(self.batch_supervised_loss)
self.log_supervised_accuracies.append(self.batch_supervised_accuracy)
self.log_supervised_L2_losses.append(self.batch_supervised_L2_loss)
self.log_supervised_prevalences.append(self.batch_supervised_prevalence)
def end_training(self, logs):
logs["supervised_loss"] = numpy.mean(self.log_supervised_losses)
logs["supervised_accuracy"] = numpy.mean(self.log_supervised_accuracies)
logs["supervised_L2_loss"] = numpy.mean(self.log_supervised_L2_losses)
logs["supervised_prevalence"] = numpy.mean(self.log_supervised_prevalences)
return logs
|
tests/RunTests/PythonTests/test2011_036.py | maurizioabba/rose | 488 | 12742214 | <gh_stars>100-1000
class num():
def __init__(self, n):
self.n = n
print "init", self.n
def __enter__(self):
print "enter", self.n
return (self.n, self.n**2)
def __exit__(self, type, value, traceback):
print "exit", self.n
pass
# simple (non-targetted) with-stmt
with num(0):
print "simple"
# targetted with-stmt
with num(1) as (one, one_squared):
print one, one_squared
# multiple, targetted with-stmt
with num(2) as (two, two_squared), num(3) as three:
print two, two_squared, three
# mixed with-stmt
with num(4) as four, num(5), num(6) as six:
print four, six
|
apps/bible/utils/translations.py | goztrk/django-htk | 206 | 12742239 | <reponame>goztrk/django-htk
# HTK Imports
from htk.utils import htk_setting
from htk.utils.general import resolve_model_dynamically
def get_translation_model(translation):
translations_map = htk_setting('HTK_BIBLE_TRANSLATIONS_MAP')
translation_model_class = translations_map.get(translation.upper())
translation_model = (
resolve_model_dynamically(translation_model_class)
if translation_model_class
else None
)
return translation_model
|
h2o-py/tests/testdir_misc/pyunit_pop.py | ahmedengu/h2o-3 | 6,098 | 12742241 | <gh_stars>1000+
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def pyunit_pop():
pros = h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))
nc = pros.ncol
popped_col = pros.pop(pros.names[0])
print(pros.dim)
print(popped_col.dim)
assert popped_col.ncol==1
assert pros.ncol==nc-1
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_pop)
else:
pyunit_pop()
|
ortools/sat/samples/nurses_sat.py | magneticflux-/or-tools | 8,273 | 12742243 | #!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of a simple nurse scheduling problem."""
# [START program]
# [START import]
from ortools.sat.python import cp_model
# [END import]
# [START solution_printer]
class NursesPartialSolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self, shifts, num_nurses, num_days, num_shifts, sols):
cp_model.CpSolverSolutionCallback.__init__(self)
self._shifts = shifts
self._num_nurses = num_nurses
self._num_days = num_days
self._num_shifts = num_shifts
self._solutions = set(sols)
self._solution_count = 0
def on_solution_callback(self):
if self._solution_count in self._solutions:
print('Solution %i' % self._solution_count)
for d in range(self._num_days):
print('Day %i' % d)
for n in range(self._num_nurses):
is_working = False
for s in range(self._num_shifts):
if self.Value(self._shifts[(n, d, s)]):
is_working = True
print(' Nurse %i works shift %i' % (n, s))
if not is_working:
print(' Nurse {} does not work'.format(n))
print()
self._solution_count += 1
def solution_count(self):
return self._solution_count
# [END solution_printer]
def main():
# Data.
# [START data]
num_nurses = 4
num_shifts = 3
num_days = 3
all_nurses = range(num_nurses)
all_shifts = range(num_shifts)
all_days = range(num_days)
# [END data]
# Creates the model.
# [START model]
model = cp_model.CpModel()
# [END model]
# Creates shift variables.
# shifts[(n, d, s)]: nurse 'n' works shift 's' on day 'd'.
# [START variables]
shifts = {}
for n in all_nurses:
for d in all_days:
for s in all_shifts:
shifts[(n, d,
s)] = model.NewBoolVar('shift_n%id%is%i' % (n, d, s))
# [END variables]
# Each shift is assigned to exactly one nurse in the schedule period.
# [START exactly_one_nurse]
for d in all_days:
for s in all_shifts:
model.Add(sum(shifts[(n, d, s)] for n in all_nurses) == 1)
# [END exactly_one_nurse]
# Each nurse works at most one shift per day.
# [START at_most_one_shift]
for n in all_nurses:
for d in all_days:
model.Add(sum(shifts[(n, d, s)] for s in all_shifts) <= 1)
# [END at_most_one_shift]
# [START assign_nurses_evenly]
# Try to distribute the shifts evenly, so that each nurse works
# min_shifts_per_nurse shifts. If this is not possible, because the total
# number of shifts is not divisible by the number of nurses, some nurses will
# be assigned one more shift.
min_shifts_per_nurse = (num_shifts * num_days) // num_nurses
if num_shifts * num_days % num_nurses == 0:
max_shifts_per_nurse = min_shifts_per_nurse
else:
max_shifts_per_nurse = min_shifts_per_nurse + 1
for n in all_nurses:
num_shifts_worked = 0
for d in all_days:
for s in all_shifts:
num_shifts_worked += shifts[(n, d, s)]
model.Add(min_shifts_per_nurse <= num_shifts_worked)
model.Add(num_shifts_worked <= max_shifts_per_nurse)
# [END assign_nurses_evenly]
# Creates the solver and solve.
# [START solve]
solver = cp_model.CpSolver()
solver.parameters.linearization_level = 0
# Enumerate all solutions.
solver.parameters.enumerate_all_solutions = True
# Display the first five solutions.
a_few_solutions = range(5)
solution_printer = NursesPartialSolutionPrinter(shifts, num_nurses,
num_days, num_shifts,
a_few_solutions)
solver.Solve(model, solution_printer)
# [END solve]
# Statistics.
print()
print('Statistics')
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f s' % solver.WallTime())
print(' - solutions found : %i' % solution_printer.solution_count())
if __name__ == '__main__':
main()
# [END program]
|
galileo/platform/path_helper.py | YaoPu2021/galileo | 115 | 12742282 | <gh_stars>100-1000
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from distutils.sysconfig import get_config_var
if __file__ == 'setup.py':
# when setup
project_root_dir = os.path.dirname(os.path.abspath(__file__))
else:
project_root_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
galileo_src_dir = os.path.join(project_root_dir, 'galileo')
engine_src_dir = os.path.join(project_root_dir, 'engine')
engine_build_dir = os.path.join(project_root_dir, 'build', 'engine')
engine_client_dir = os.path.join(engine_build_dir, 'client')
engine_proto_dir = os.path.join(engine_build_dir, 'proto')
engine_python_dir = os.path.join(engine_build_dir, 'python')
libs_dir = os.path.join(project_root_dir, 'galileo', 'framework', 'libs')
pywrap_dir = os.path.join(project_root_dir, 'galileo', 'framework', 'pywrap')
def get_tf_ops():
suffix = get_config_var('EXT_SUFFIX')
return os.path.join(pywrap_dir, 'tf_ops' + suffix)
def get_cpp_targets():
return [
os.path.join(engine_client_dir, 'libclient.so'),
os.path.join(engine_proto_dir, 'libproto.so'),
]
def get_py_targets():
suffix = get_config_var('EXT_SUFFIX')
return [
os.path.join(engine_python_dir, 'py_client' + suffix),
os.path.join(engine_python_dir, 'py_service' + suffix),
os.path.join(engine_python_dir, 'py_convertor' + suffix),
]
def get_all_targets():
return get_cpp_targets() + get_py_targets()
def is_targets_exists():
return all([os.path.isfile(f) for f in get_all_targets()])
|
chapter03/3-2.py | alberthao/Python-Crash-Course-Homework | 138 | 12742285 | names = ['David','Herry','Army']
message1 = "hello " + names[0]
print(message1)
message1 = "hello " + names[1]
print(message1)
message1 = "hello " + names[2]
print(message1) |
quetz/authentication/jupyterhub.py | maresb/quetz | 108 | 12742286 | <reponame>maresb/quetz<gh_stars>100-1000
# Copyright 2020 QuantStack, Codethink Ltd
# Distributed under the terms of the Modified BSD License.
import json
from typing import Any, List, overload
from urllib.parse import quote
from quetz.config import Config, ConfigEntry, ConfigSection
from .oauth2 import OAuthAuthenticator
class JupyterConfigEntry:
config_section = "jupyterhubauthenticator"
registered_entries: List[ConfigEntry] = []
config = None
def __init__(self, dtype, default=None, required=True):
self.dtype = dtype
self.default = default
self.required = required
# these type annotations dont work yet, but I leave them for now
# maybe someone will find a solution later
# https://github.com/python/mypy/issues/2566#issuecomment-703998877
@overload
def __get__(self, instance: None, owner: Any) -> "JupyterConfigEntry":
...
@overload
def __get__(self, instance: object, owner: Any) -> str:
...
def __get__(self, obj, objtype) -> str:
return getattr(self.config, self.config_attr_name)
def __set_name__(self, owner, name):
self.attr_name = name
self.config_attr_name = f"{self.config_section}_{name}"
entry = ConfigEntry(
name, self.dtype, default=self.default, required=self.required
)
self.registered_entries.append(entry)
@classmethod
def _make_config(cls):
section = ConfigSection(
cls.config_section,
cls.registered_entries,
required=False,
)
return [section]
@classmethod
def register(cls, config: Config):
cls.config = config
config_options = cls._make_config()
config.register(config_options)
return config.configured_section(cls.config_section)
class JupyterhubAuthenticator(OAuthAuthenticator):
"""Use Oauth2 protcol to authenticate with jupyterhub server, which acts
as identity provider.
To activate add the following section to the ``config.toml`` (see :ref:`configfile`):
.. code::
[jupyterhubauthenticator]
# client credentials, they need to be registered with
# jupyterhub by adding an external service
client_id = "quetz_client"
client_secret = "<PASSWORD>-secret"
# token enpoint of Jupyterhub, needs to be accessible from Quetz server
access_token_url = "http://JUPYTERHUB_HOST:PORT/hub/api/oauth2/token"
# authorize endpoint of JupyterHub, needs to be accessible from users' browser
authorize_url = "http://JUPYTERHUB_HOST:PORT/hub/api/oauth2/authorize"
# API root, needs to be accesible from Quetz server
api_base_url = "http://JUPYTERHUB_HOST:PORT/hub/api/"
To configure quetz as an oauth client in JupyterHub, you will need to define
a `JupyterHub service <https://jupyterhub.readthedocs.io/en/stable/reference/services.html#externally-managed-services>`_. You can achieve it by adding the following to the
``jupyterhub_config.py`` file of your JupyterHub instance:
.. code::
c.JupyterHub.services = [
{
# service name, it will be used to setup routers
'name': 'quetz',
# quetz URL to setup redirections, only required if you use
# JupyterHub url scheme
'url': 'http://QUETZ_HOST:PORT',
# any secret >8 characters, you will also need to set
# the client_secret in the authenticator config with this
# string
'api_token': '<PASSWORD>',
# client_id in the authenticator config
'oauth_client_id': 'quetz_client',
# URL of the callback endpoint on the quetz server
'oauth_redirect_uri': 'http://QUETZ_HOST:PORT/auth/jupyterhub/authorize',
}
]
""" # noqa
provider = 'jupyterhub'
# TODO: need to figure out how to use type annotations with descriptors
# see also: https://github.com/python/mypy/pull/2266
client_id = JupyterConfigEntry(str, required=True) # type: ignore
client_secret = JupyterConfigEntry(str, required=True) # type: ignore
access_token_url = JupyterConfigEntry(str, required=True) # type: ignore
validate_token_url = "authorizations/token/{}"
authorize_url = JupyterConfigEntry(str, required=True) # type: ignore
api_base_url = JupyterConfigEntry(str, required=True) # type: ignore
client_kwargs = {
"token_endpoint_auth_method": "client_secret_post",
"token_placement": "uri",
}
async def userinfo(self, request, token):
response = await self._get_user_for_token(token)
profile = response.json()
github_profile = {
"id": profile["name"] + '_id',
"name": profile["name"],
"avatar_url": "",
"login": profile["name"],
}
return github_profile
async def _get_user_for_token(self, token):
headers = {'Authorization': 'token {}'.format(self.client_secret)}
access_token = quote(token['access_token'], safe='')
# authlib client will be place token in query params
# which are ignored by jupyterhub
# this workaround is required to implement jupyterhub API
# which puts the token as path parameter
# https://jupyterhub.readthedocs.io/en/stable/_static/rest-api/index.html#path--authorizations-token--token- # noqa
resp = await self.client.get(
f'authorizations/token/{access_token}', token=token, headers=headers
)
return resp
async def validate_token(self, token):
# access_token = json.loads(token)["access_token"]
token = json.loads(token)
resp = await self._get_user_for_token(token)
return resp.status_code == 200
def configure(self, config: Config):
self.is_enabled = JupyterConfigEntry.register(config)
super().configure(config)
|
custom_components/hacs/constrains.py | svkowalski/HAcore_QNAP | 167 | 12742340 | """HACS Startup constrains."""
# pylint: disable=bad-continuation
import os
from .const import CUSTOM_UPDATER_LOCATIONS, CUSTOM_UPDATER_WARNING
from .helpers.misc import version_left_higher_then_right
from custom_components.hacs.globals import get_hacs
MINIMUM_HA_VERSION = "0.110.0"
def check_constrains():
"""Check HACS constrains."""
if not constrain_translations():
return False
if not constrain_custom_updater():
return False
if not constrain_version():
return False
return True
def constrain_custom_updater():
"""Check if custom_updater exist."""
hacs = get_hacs()
for location in CUSTOM_UPDATER_LOCATIONS:
if os.path.exists(location.format(hacs.system.config_path)):
msg = CUSTOM_UPDATER_WARNING.format(
location.format(hacs.system.config_path)
)
hacs.logger.critical(msg)
return False
return True
def constrain_version():
"""Check if the version is valid."""
hacs = get_hacs()
if not version_left_higher_then_right(hacs.system.ha_version, MINIMUM_HA_VERSION):
hacs.logger.critical(
f"You need HA version {MINIMUM_HA_VERSION} or newer to use this integration."
)
return False
return True
def constrain_translations():
"""Check if traslations exist."""
hacs = get_hacs()
if not os.path.exists(
f"{hacs.system.config_path}/custom_components/hacs/translations"
):
hacs.logger.critical("You are missing the translations directory.")
return False
return True
|
river/compose/func.py | fox-ds/river | 2,184 | 12742345 | <filename>river/compose/func.py
import typing
from river import base
__all__ = ["FuncTransformer"]
class FuncTransformer(base.Transformer):
"""Wraps a function to make it usable in a pipeline.
There is often a need to apply an arbitrary transformation to a set of features. For instance,
this could involve parsing a date and then extracting the hour from said date. If you're
processing a stream of data, then you can do this yourself by calling the necessary code at
your leisure. On the other hand, if you want to do this as part of a pipeline, then you need to
follow a simple convention.
To use a function as part of a pipeline, take as input a `dict` of features and output a `dict`.
Once you have initialized this class with your function, then you can use it like you would use
any other (unsupervised) transformer.
It is up to you if you want your function to be pure or not. By pure we refer to a function
that doesn't modify its input. However, we recommend writing pure functions because this
reduces the chances of inserting bugs into your pipeline.
Parameters
----------
func
A function that takes as input a `dict` and outputs a `dict`.
Examples
--------
>>> from pprint import pprint
>>> import datetime as dt
>>> from river import compose
>>> x = {'date': '2019-02-14'}
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... x['is_weekend'] = date.day in (5, 6)
... x['hour'] = date.hour
... return x
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'date': '2019-02-14', 'hour': 0, 'is_weekend': False}
The above example is not pure because it modifies the input. The following example is pure
and produces the same output:
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... return {'is_weekend': date.day in (5, 6), 'hour': date.hour}
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'hour': 0, 'is_weekend': False}
The previous example doesn't include the `date` feature because it returns a new `dict`.
However, a common usecase is to add a feature to an existing set of features. You can do
this in a pure way by unpacking the input `dict` into the output `dict`:
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... return {'is_weekend': date.day in (5, 6), 'hour': date.hour, **x}
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'date': '2019-02-14', 'hour': 0, 'is_weekend': False}
You can add `FuncTransformer` to a pipeline just like you would with any other transformer.
>>> from river import naive_bayes
>>> pipeline = compose.FuncTransformer(parse_date) | naive_bayes.MultinomialNB()
>>> pipeline
Pipeline (
FuncTransformer (
func="parse_date"
),
MultinomialNB (
alpha=1.
)
)
If you provide a function without wrapping it, then the pipeline will do it for you:
>>> pipeline = parse_date | naive_bayes.MultinomialNB()
"""
def __init__(self, func: typing.Callable[[dict], dict]):
self.func = func
def transform_one(self, x):
return self.func(x)
def __str__(self):
return self.func.__name__
|
green/test/test_loader.py | jwaschkau/green | 686 | 12742384 | <gh_stars>100-1000
from __future__ import unicode_literals
import os
from os.path import dirname
import platform
import shutil
import sys
import tempfile
from textwrap import dedent
import unittest
try:
from unittest.mock import MagicMock, patch
except:
from mock import MagicMock, patch
from green import loader
from green.loader import GreenTestLoader, flattenTestSuite
class TestToProtoTestList(unittest.TestCase):
def test_moduleImportFailure(self):
"""
toProtoTestList() raises import errors normally
"""
suite = MagicMock()
suite.__class__.__name__ = str("ModuleImportFailure")
suite.__str__.return_value = "exception_method (other_stuff)"
suite.exception_method.side_effect = AttributeError
self.assertRaises(AttributeError, loader.toProtoTestList, (suite,))
def test_moduleImportFailureIgnored(self):
"""
toProtoTestList() does not raise errors when doing completions
"""
suite = MagicMock()
suite.__class__.__name__ = str("ModuleImportFailure")
suite.__str__.return_value = "exception_method other_stuff"
suite.exception_method.side_effect = AttributeError
self.assertEqual(loader.toProtoTestList(suite, doing_completions=True), [])
class TestToParallelTargets(unittest.TestCase):
def setUp(self):
super(TestToParallelTargets, self).setUp()
class FakeModule(object):
pass
class FakeModule2(object):
pass
self._fake_module_name = "my_test_module"
self._fake_module_name2 = "my_test_module2"
sys.modules[self._fake_module_name] = FakeModule
sys.modules[self._fake_module_name2] = FakeModule2
def tearDown(self):
del sys.modules[self._fake_module_name]
del sys.modules[self._fake_module_name2]
super(TestToParallelTargets, self).tearDown()
def test_methods_with_no_constraints(self):
"""
toParallelTargets() returns only module names.
"""
class NormalTestCase(unittest.TestCase):
def runTest(self):
pass
NormalTestCase.__module__ = self._fake_module_name
targets = loader.toParallelTargets(NormalTestCase(), [])
self.assertEqual(targets, [self._fake_module_name])
def test_methods_with_constraints(self):
"""
toParallelTargets() returns test names when constrained.
"""
class NormalTestCase(unittest.TestCase):
def runTest(self):
pass
NormalTestCase.__module__ = self._fake_module_name
full_name = "my_test_module.NormalTestCase.runTest"
targets = loader.toParallelTargets(NormalTestCase(), [full_name])
self.assertEqual(targets, [full_name])
def test_filter_out_dot(self):
"""
toParallelTargets() correctly returns modules when '.' is in target list
"""
class NormalTestCase(unittest.TestCase):
def runTest(self):
pass
class NormalTestCase2(unittest.TestCase):
def runTest(self):
pass
NormalTestCase.__module__ = self._fake_module_name
NormalTestCase2.__module__ = self._fake_module_name2
targets = loader.toParallelTargets([NormalTestCase(), NormalTestCase2()], ["."])
self.assertEqual(targets, ["my_test_module", "my_test_module2"])
def test_ignore_doctest(self):
"""
toParallelTargets() ignores"""
class TestCompletions(unittest.TestCase):
def test_completionBad(self):
"""
Bad match generates no completions
"""
self.assertEqual("", loader.getCompletions("garbage.in"))
def test_completionExact(self):
"""
Correct completions are generated for an exact match.
"""
c = set(loader.getCompletions("green").split("\n"))
self.assertIn("green", c)
self.assertIn("green.test", c)
self.assertIn("green.test.test_loader", c)
self.assertIn("green.test.test_loader.TestCompletions", c)
self.assertIn("green.test.test_loader.TestCompletions.test_completionExact", c)
def test_completionPartialShort(self):
"""
Correct completions generated for short partial match.
"""
cwd = os.getcwd()
green_parent = dirname(dirname(dirname(os.path.abspath(__file__))))
os.chdir(green_parent)
self.addCleanup(os.chdir, cwd)
c = set(loader.getCompletions("gre").split("\n"))
self.assertIn("green", c)
self.assertIn("green.test", c)
self.assertIn("green.test.test_loader", c)
self.assertIn("green.test.test_loader.TestCompletions", c)
self.assertIn(
"green.test.test_loader.TestCompletions.test_completionPartialShort", c
)
def test_completionPartial(self):
"""
Correct completions generated for partial match. 2nd target ignored.
"""
c = set(loader.getCompletions(["green.te", "green"]).split("\n"))
self.assertIn("green.test", c)
self.assertIn("green.test.test_loader", c)
self.assertIn("green.test.test_loader.TestCompletions", c)
self.assertIn(
"green.test.test_loader.TestCompletions.test_completionPartial", c
)
self.assertNotIn("green", c)
def test_completionEmpty(self):
"""
An empty target generates completions for the whole directory
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
os.chdir(tmpdir)
self.addCleanup(os.chdir, cwd)
os.mkdir("the_pkg")
fh = open(os.path.join("the_pkg", "__init__.py"), "w")
fh.write("")
fh.close()
fh = open(os.path.join("the_pkg", "test_things.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
"""
)
)
fh.close()
c = set(loader.getCompletions("").split("\n"))
self.assertIn("the_pkg", c)
self.assertIn("the_pkg.test_things", c)
self.assertIn("the_pkg.test_things.A.testOne", c)
self.assertIn("the_pkg.test_things.A.testTwo", c)
def test_completionDot(self):
"""
A '.' target generates completions for the whole directory
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
os.chdir(tmpdir)
self.addCleanup(os.chdir, cwd)
os.mkdir("my_pkg")
fh = open(os.path.join("my_pkg", "__init__.py"), "w")
fh.write("")
fh.close()
fh = open(os.path.join("my_pkg", "test_things.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
"""
)
)
fh.close()
c = set(loader.getCompletions(".").split("\n"))
self.assertIn("my_pkg", c)
self.assertIn("my_pkg.test_things", c)
self.assertIn("my_pkg.test_things.A.testOne", c)
self.assertIn("my_pkg.test_things.A.testTwo", c)
def test_completionIgnoresErrors(self):
"""
Errors in one module don't block the remaining completions
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
os.chdir(tmpdir)
self.addCleanup(os.chdir, cwd)
os.mkdir("my_pkg2")
fh = open(os.path.join("my_pkg2", "__init__.py"), "w")
fh.write("")
fh.close()
fh = open(os.path.join("my_pkg2", "test_crash01.py"), "w")
contents = dedent(
"""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
"""
)
fh.write(contents)
fh.close()
fh = open(os.path.join("my_pkg2", "test_crash02.py"), "w")
fh.write("import moocow")
fh.close()
fh = open(os.path.join("my_pkg2", "test_crash03.py"), "w")
fh.write(contents)
fh.close()
c = set(loader.getCompletions(".").split("\n"))
self.assertIn("my_pkg2", c)
self.assertIn("my_pkg2.test_crash01", c)
self.assertIn("my_pkg2.test_crash01.A.testOne", c)
self.assertIn("my_pkg2.test_crash01.A.testTwo", c)
self.assertIn("my_pkg2.test_crash03", c)
self.assertIn("my_pkg2.test_crash03.A.testOne", c)
self.assertIn("my_pkg2.test_crash03.A.testTwo", c)
class TestIsPackage(unittest.TestCase):
def test_yes(self):
"""
A package is identified.
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
fh = open(os.path.join(tmpdir, "__init__.py"), "w")
fh.write("pass\n")
fh.close()
self.assertTrue(loader.isPackage(tmpdir))
def test_no(self):
"""
A non-package is identified
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
self.assertFalse(loader.isPackage(tmpdir))
class TestDottedModule(unittest.TestCase):
def test_bad_path(self):
"""
A bad path causes an exception
"""
self.assertRaises(
ValueError, loader.findDottedModuleAndParentDir, tempfile.tempdir
)
def test_good_path(self):
"""
A good path gets (dotted_module, parent) properly returned
"""
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, "a", "b", "c", "d"))
package_init = os.path.join(tmpdir, "a", "b", "c", "__init__.py")
subpkg_init = os.path.join(tmpdir, "a", "b", "c", "d", "__init__.py")
module_name = "stuff.py"
module = os.path.join(tmpdir, "a", "b", "c", "d", module_name)
for filename in [package_init, subpkg_init, module]:
fh = open(filename, "w")
fh.write("pass\n")
fh.close()
self.assertEqual(
loader.findDottedModuleAndParentDir(module),
("c.d.stuff", os.path.join(tmpdir, "a", "b")),
)
class TestLoadTestsFromTestCase(unittest.TestCase):
def setUp(self):
self.loader = GreenTestLoader()
def test_runTest(self):
"""
When a testcase has no matching method names, but does have a runTest,
use that instead.
"""
class MyTestCase(unittest.TestCase):
def helper1(self):
pass
def helper2(self):
pass
def runTest(self):
pass
suite = self.loader.loadTestsFromTestCase(MyTestCase)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(suite._tests[0]._testMethodName, "runTest")
def test_normal(self):
"""
Normal test methods get loaded
"""
class Normal(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
suite = self.loader.loadTestsFromTestCase(Normal)
self.assertEqual(suite.countTestCases(), 2)
self.assertEqual(
set([x._testMethodName for x in suite._tests]),
set(["test_method1", "test_method2"]),
)
def test_isTestCaseDisabled(self):
"""
TestCases disabled by nose generators don't get loaded
"""
class HasDisabled(unittest.TestCase):
def test_method(self):
pass
test_method.__test__ = False
suite = self.loader.loadTestsFromTestCase(HasDisabled)
self.assertEqual(suite.countTestCases(), 0)
class TestLoadFromModuleFilename(unittest.TestCase):
def setUp(self):
self.loader = GreenTestLoader()
def test_skipped_module(self):
"""
A module that wants to be skipped gets skipped
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
filename = os.path.join(tmpdir, "skipped_module.py")
fh = open(filename, "w")
fh.write(
dedent(
"""
import unittest
raise unittest.case.SkipTest
class NotReached(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
"""
)
)
fh.close()
suite = self.loader.loadFromModuleFilename(filename)
self.assertEqual(suite.countTestCases(), 1)
self.assertRaises(
unittest.case.SkipTest,
getattr(suite._tests[0], suite._tests[0]._testMethodName),
)
class TestDiscover(unittest.TestCase):
def setUp(self):
self.loader = GreenTestLoader()
@patch("green.loader.os.path.isdir")
@patch("green.loader.debug")
@patch("green.loader.os.listdir")
def test_oserror(self, mock_listdir, mock_debug, mock_isdir):
"""
discover() prints a debug message and moves on when ecountering an OSError
"""
mock_isdir.return_value = True
mock_listdir.side_effect = OSError()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
self.loader.discover(os.path.join(tmpdir, "garbage_in"))
self.assertEqual(len(mock_debug.mock_calls), 1)
def test_bad_input(self):
"""
discover() raises ImportError when passed a non-directory
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
self.assertRaises(
ImportError, self.loader.discover, os.path.join(tmpdir, "garbage_in")
)
filename = os.path.join(tmpdir, "some_file.py")
fh = open(filename, "w")
fh.write("pass\n")
fh.close()
self.assertRaises(ImportError, self.loader.discover, filename)
def test_bad_pkg_name(self):
"""
If the directory is an invalid package name, don't bother looking in
it.
"""
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
startdir = os.getcwd()
os.chdir(tmpdir)
self.addCleanup(os.chdir, startdir)
bad_pkg_name = "1badname"
os.mkdir(bad_pkg_name)
tmp_subdir = os.path.join(tmpdir, bad_pkg_name)
fh = open(os.path.join(tmp_subdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
named_module = os.path.join(os.path.basename(tmp_subdir), "named_module.py")
fh = open(named_module, "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
self.assertEqual(self.loader.discover(tmpdir), None)
def test_symlink(self):
"""
If the directory is a symlink, it should be skipped.
"""
if platform.system() == "Windows": # pragma: no cover
self.skipTest("This test is for posix-specific behavior")
tmpdir = tempfile.mkdtemp()
tmpdir2 = tempfile.mkdtemp()
os.symlink(tmpdir, os.path.join(tmpdir2, "link"))
self.addCleanup(shutil.rmtree, tmpdir)
startdir = os.getcwd()
os.chdir(tmpdir)
self.addCleanup(os.chdir, startdir)
pkg_name = "realpkg"
os.mkdir(pkg_name)
tmp_subdir = os.path.join(tmpdir, pkg_name)
fh = open(os.path.join(tmp_subdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
named_module = os.path.join(os.path.basename(tmp_subdir), "test_module.py")
fh = open(named_module, "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
self.assertEqual(self.loader.discover(tmpdir2), None)
class TestLoadTargets(unittest.TestCase):
# Setup
@classmethod
def setUpClass(cls):
cls.startdir = os.getcwd()
cls.container_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
if os.getcwd() != cls.startdir:
os.chdir(cls.startdir)
cls.startdir = None
shutil.rmtree(cls.container_dir)
def setUp(self):
os.chdir(self.container_dir)
self.tmpdir = tempfile.mkdtemp(dir=self.container_dir)
self.loader = GreenTestLoader()
def tearDown(self):
os.chdir(self.container_dir)
shutil.rmtree(self.tmpdir)
# Tests
def test_returnIsLoadable(self):
"""
Results returned by toParallelTargets should be loadable by
loadTargets(), even if they aren't directly loadable through a package
relative to the current working directory.
"""
tests_dir = tempfile.mkdtemp(dir=self.tmpdir)
# No __init__.py in the directory!
fh = open(os.path.join(tests_dir, "test_not_in_pkg.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Discover stuff
suite = self.loader.loadTargets(".")
# This should resolve it to the module that's not importable from here
test = loader.toParallelTargets(suite, [])[0]
self.loader.loadTargets(test)
def test_emptyDirAbsolute(self):
"""
Absolute path to empty directory returns None
"""
tests = self.loader.loadTargets(self.tmpdir)
self.assertTrue(tests is None)
def test_emptyDirRelative(self):
"""
Relative path to empty directory returns None
"""
os.chdir(self.tmpdir)
os.chdir("..")
tests = self.loader.loadTargets(os.path.dirname(self.tmpdir))
self.assertEqual(tests, None)
def test_emptyDirDot(self):
"""
'.' while in an empty directory returns None
"""
os.chdir(self.tmpdir)
tests = self.loader.loadTargets(".")
self.assertTrue(tests is None)
def test_relativeDotDir(self):
"""
Dotted relative path to empty directory returns None
"""
os.chdir(self.tmpdir)
os.chdir("..")
target = os.path.join(".", os.path.basename(self.tmpdir))
tests = self.loader.loadTargets(target)
self.assertTrue(tests is None)
def test_BigDirWithAbsoluteImports(self):
"""
Big dir discovers tests and doesn't crash on absolute import
"""
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
pkg_name = os.path.basename(sub_tmpdir)
# Child setup
# pkg/__init__.py
fh = open(os.path.join(sub_tmpdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
# pkg/target_module.py
fh = open(os.path.join(sub_tmpdir, "target_module.py"), "w")
fh.write("a = 1\n")
fh.close()
# pkg/test/__init__.py
os.mkdir(os.path.join(sub_tmpdir, "test"))
fh = open(os.path.join(sub_tmpdir, "test", "__init__.py"), "w")
fh.write("\n")
fh.close()
# pkg/test/test_target_module.py
fh = open(os.path.join(sub_tmpdir, "test", "test_target_module.py"), "w")
fh.write(
dedent(
"""
import unittest
import {}.target_module
class A(unittest.TestCase):
def testPass(self):
pass
""".format(
pkg_name
)
)
)
fh.close()
# Load the tests
os.chdir(self.tmpdir)
test_suite = self.loader.loadTargets(pkg_name)
self.assertEqual(test_suite.countTestCases(), 1)
# Dotted name should start with the package!
self.assertEqual(
pkg_name + ".test.test_target_module.A.testPass",
loader.toProtoTestList(test_suite)[0].dotted_name,
)
def test_DirWithInit(self):
"""
Dir empty other than blank __init__.py returns None
"""
# Parent directory setup
os.chdir(self.tmpdir)
os.chdir("..")
# Child setup
target = os.path.join(self.tmpdir, "__init__.py")
fh = open(target, "w")
fh.write("\n")
fh.close()
fh = open(os.path.join(self.tmpdir, "test_module_with_init.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Load the tests
module_name = os.path.basename(self.tmpdir)
tests = self.loader.loadTargets(module_name)
self.assertEqual(tests.countTestCases(), 1)
def test_DottedName(self):
"""
Importing a module via dotted name loads the tests.
"""
# Parent directory setup
os.chdir(self.tmpdir)
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
basename = os.path.basename(sub_tmpdir)
# Child setup
fh = open(os.path.join(basename, "__init__.py"), "w")
fh.write("\n")
fh.close()
fh = open(os.path.join(basename, "test_module_dotted_name.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Load the tests
module_name = basename + ".test_module_dotted_name"
tests = self.loader.loadTargets(module_name)
self.assertEqual(tests.countTestCases(), 1)
def test_DottedNamePackageFromPath(self):
"""
Importing a package from path loads the tests.
"""
# Child setup
tmp_subdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(tmp_subdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
fh = open(os.path.join(tmp_subdir, "test_module.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Go somewhere else, but setup the path
os.chdir(self.startdir)
sys.path.insert(0, self.tmpdir)
# Load the tests
tests = self.loader.loadTargets(os.path.basename(tmp_subdir))
sys.path.remove(self.tmpdir)
self.assertTrue(tests.countTestCases(), 1)
def test_ModuleByName(self):
"""
A module in a package can be loaded by filename.
"""
os.chdir(self.tmpdir)
tmp_subdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(tmp_subdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
named_module = os.path.join(os.path.basename(tmp_subdir), "named_module.py")
fh = open(named_module, "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Load the tests
tests = self.loader.loadTargets(named_module)
try:
self.assertEqual(tests.countTestCases(), 1)
except:
raise
finally:
shutil.rmtree(tmp_subdir)
def test_MalformedModuleByName(self):
"""
Importing malformed module by name creates test that raises
ImportError.
"""
fh = open(os.path.join(self.tmpdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
malformed_module = os.path.join(
os.path.basename(self.tmpdir), "malformed_module.py"
)
fh = open(malformed_module, "w")
fh.write("This is a malformed module.")
fh.close()
# Load the tests
tests = self.loader.loadTargets(malformed_module)
self.assertEqual(tests.countTestCases(), 1)
test = tests._tests[0]
test_method = getattr(test, test._testMethodName)
self.assertRaises(ImportError, test_method)
def test_partiallyGoodName(self):
"""
Don't crash loading module.object with existing module but not object.
"""
# Parent directory setup
os.chdir(self.tmpdir)
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
basename = os.path.basename(sub_tmpdir)
# Child setup
fh = open(os.path.join(basename, "__init__.py"), "w")
fh.write("\n")
fh.close()
fh = open(os.path.join(basename, "existing_module.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""
)
)
fh.close()
# Load the tests
module_name = basename + ".existing_module.nonexistant_object"
tests = self.loader.loadTargets(module_name)
self.assertEqual(tests, None)
def test_multiple_targets(self):
"""
Specifying multiple targets causes them all to be tested.
"""
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
# pkg/__init__.py
fh = open(os.path.join(sub_tmpdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
# pkg/test/test_target1.py
fh = open(os.path.join(sub_tmpdir, "test_target1.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
# pkg/test/test_target2.py
fh = open(os.path.join(sub_tmpdir, "test_target2.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
# Load the tests
os.chdir(self.tmpdir)
pkg = os.path.basename(sub_tmpdir)
tests = self.loader.loadTargets(
[pkg + "." + "test_target1", pkg + "." + "test_target2"]
)
self.assertEqual(tests.countTestCases(), 2)
def test_duplicate_targets(self):
"""
Specifying duplicate targets does not cause duplicate loading.
"""
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(sub_tmpdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
fh = open(os.path.join(sub_tmpdir, "test_dupe_target.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
os.chdir(self.tmpdir)
pkg = os.path.basename(sub_tmpdir)
tests = self.loader.loadTargets(
[
pkg + "." + "test_dupe_target",
pkg + "." + "test_dupe_target",
pkg + "." + "test_dupe_target",
]
)
self.assertEqual(tests.countTestCases(), 1)
def test_explicit_filename_error(self):
"""
Loading a module by name with a syntax error produces a failure, not a
silent absence of its tests.
"""
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
fh = open(os.path.join(sub_tmpdir, "mod_with_import_error.py"), "w")
fh.write("this is a syntax error")
fh.close()
os.chdir(sub_tmpdir)
tests = self.loader.loadTargets("mod_with_import_error.py")
self.assertEqual(tests.countTestCases(), 1)
def test_file_pattern(self):
"""
Specifying a file pattern causes only matching files to be loaded
"""
sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
# pkg/__init__.py
fh = open(os.path.join(sub_tmpdir, "__init__.py"), "w")
fh.write("\n")
fh.close()
# pkg/test/target1_tests.py
fh = open(os.path.join(sub_tmpdir, "target1_tests.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
# pkg/test/target2_tests.py
fh = open(os.path.join(sub_tmpdir, "target2_tests.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
# pkg/test/test_target999.py: NOT a match.
fh = open(os.path.join(sub_tmpdir, "test_target999.py"), "w")
fh.write(
dedent(
"""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""
)
)
fh.close()
# Load the tests
os.chdir(self.tmpdir)
pkg = os.path.basename(sub_tmpdir)
tests = self.loader.loadTargets(pkg, file_pattern="*_tests.py")
self.assertEqual(tests.countTestCases(), 2)
class TestFlattenTestSuite(unittest.TestCase):
# Setup
@classmethod
def setUpClass(cls):
cls.startdir = os.getcwd()
cls.container_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
if os.getcwd() != cls.startdir:
os.chdir(cls.startdir)
cls.startdir = None
shutil.rmtree(cls.container_dir)
def setUp(self):
os.chdir(self.container_dir)
self.tmpdir = tempfile.mkdtemp(dir=self.container_dir)
self.loader = GreenTestLoader()
def tearDown(self):
os.chdir(self.container_dir)
shutil.rmtree(self.tmpdir)
@patch("green.loader.GreenTestLoader.suiteClass")
@patch("green.loader.DocTestSuite")
def test_docTests(self, mock_doc_test_suite, mock_suite_class):
"""
flattenTestSuite injects the test module name into the doctest's .__module__
"""
mock_test = MagicMock()
mock_iter = MagicMock(return_value=iter([mock_test]))
mock_suite = MagicMock()
mock_suite.__iter__ = mock_iter
mock_doc_test_suite.return_value = mock_suite
module = MagicMock()
test_module_name = "test.module"
module.__name__ = test_module_name
module.doctest_modules = ["real.module"]
flattenTestSuite((), module)
self.assertEqual(mock_test.__module__, test_module_name)
|
source/utils/bp5dbg/adios2/bp5dbg/utils.py | gregorweiss/ADIOS2 | 190 | 12742415 | <reponame>gregorweiss/ADIOS2
LocalValueDim = 18446744073709551613
dataTypes = {
-1: 'unknown',
0: 'byte',
1: 'short',
2: 'integer',
4: 'long',
50: 'unsigned_byte',
51: 'unsigned_short',
52: 'unsigned_integer',
54: 'unsigned_long',
5: 'real',
6: 'double',
7: 'long_double',
9: 'string',
10: 'complex',
11: 'double_complex',
12: 'string_array',
55: 'char'
}
dataTypeSize = {
-1: 0,
0: 1,
1: 2,
2: 4,
4: 8,
50: 1,
51: 2,
52: 4,
54: 8,
5: 4,
6: 8,
7: 16,
9: 0,
10: 8,
11: 16,
12: 0,
55: 1
}
def GetTypeName(typeID):
name = dataTypes.get(typeID)
if name is None:
name = "unknown type"
return name
def GetTypeSize(typeID):
size = dataTypeSize.get(typeID)
if size is None:
size = 0
return size
CharacteristicNames = {
0: 'value',
1: 'min',
2: 'max',
3: 'offset',
4: 'dimensions',
5: 'var_id',
6: 'payload_offset',
7: 'file_index',
8: 'time_index',
9: 'bitmap',
10: 'stat',
11: 'transform_type',
12: 'minmax'
}
def GetCharacteristicName(cID):
name = CharacteristicNames.get(cID)
if name is None:
name = "unknown characteristic"
return name
def GetCharacteristicDataLength(cID, typeID):
name = CharacteristicNames.get(cID)
if (name == 'value' or name == 'min' or
name == 'max' or name == 'minmax'):
return dataTypeSize[typeID]
elif (name == 'offset' or name == 'payload_offset'):
return 8
elif (name == 'file_index' or name == 'time_index'):
return 4
else:
return 0
# Read Header info 64 bytes
# fileType: Data, Metadata, Index Table
def ReadHeader(f, fileSize, fileType):
status = True
if fileSize < 64:
print("ERROR: Invalid " + fileType + ". File is smaller "
"than the header (64 bytes)")
return False
header = f.read(64)
hStr = header.decode('ascii')
versionStr = hStr[0:32].replace('\0', ' ')
major = hStr[32]
minor = hStr[33]
micro = hStr[34]
# unused = hStr[35]
endianValue = header[36]
if endianValue == 0:
endian = 'yes'
elif endianValue == 1:
endian = ' no'
else:
print("ERROR: byte 28 must be 0 or 1 to indicate endianness of "
"the data. It is however {0} in this file".format(
endianValue))
status = False
bpversion = int(header[37])
active = int(header[38])
if active == 0:
activeStr = ' no'
else:
activeStr = 'yes'
# unused = hStr[39]
WriterCount = int(header[40])
aggregatorcount = int(header[44])
iscolumnmajor = header[49]
# 45..63 unused
print("-----------------------------------------------------------"
"-----------------------------------------------------------")
print("| Version string | Major | Minor | Patch "
"| unused | Endian | BP version | Active | WriterCount | AggCount" +
" | ColumnMajor | unused |")
print("| 32 bytes | 1B | 1B | 1B "
"| 1B | 1B | 1B | 1B | 4b | 4b " +
"| 1b | 16B |")
print("+----------------------------------------------------------"
"----------------------------------------------------------+")
print("| {0} | {1} | {2} | {3} | | {4} "
"| {5} | {6} | {7:d} | {8:d} | " +
"{9} | |".format(
versionStr, major, minor, micro, endian, bpversion, activeStr,
WriterCount, aggregatorcount, iscolumnmajor))
print("-----------------------------------------------------------"
"-----------------------------------------------------------")
return [status, WriterCount]
if __name__ == "__main__":
print("ERROR: Utility main program is bp5dbg.py")
|
imagenet/optim/qhm.py | batuozt/transformer-ls | 177 | 12742437 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
from torch.optim import Optimizer
class QHM(Optimizer):
r"""
Stochastic gradient method with Quasi-Hyperbolic Momentum (QHM):
h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
d(k) = (1 - \nu) * g(k) + \nu * h(k)
x(k+1) = x(k) - \alpha * d(k)
"Quasi-hyperbolic momentum and Adam for deep learning"
by <NAME> and <NAME>, ICLR 2019
optimizer = QHM(params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0)
Args:
params (iterable): iterable params to optimize or dict of param groups
lr (float): learning rate, \alpha in QHM update (default:-1 need input)
momentum (float, optional): \beta in QHM update, range[0,1) (default:0)
qhm_nu (float, optional): \nu in QHM update, range[0,1] (default: 1)
\nu = 0: SGD without momentum (\beta is ignored)
\nu = 1: SGD with momentum \beta and dampened gradient (1-\beta)
\nu = \beta: SGD with "Nesterov momentum" \beta
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> optimizer = torch.optim.QHM(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self, params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0):
# nu can take values outside of the interval [0,1], but no guarantee of convergence?
if lr <= 0:
raise ValueError("Invalid value for learning rate (>0): {}".format(lr))
if momentum < 0 or momentum > 1:
raise ValueError("Invalid value for momentum [0,1): {}".format(momentum))
if weight_decay < 0:
raise ValueError("Invalid value for weight_decay (>=0): {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, qhm_nu=qhm_nu, weight_decay=weight_decay)
super(QHM, self).__init__(params, defaults)
# extra_buffer == True only in SSLS with momentum > 0 and nu != 1
self.state['allocate_step_buffer'] = False
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates model and returns loss.
"""
loss = None
if closure is not None:
loss = closure()
self.add_weight_decay()
self.qhm_direction()
self.qhm_update()
return loss
def add_weight_decay(self):
# weight_decay is the same as adding L2 regularization
for group in self.param_groups:
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None:
continue
if weight_decay > 0:
p.grad.data.add_(p.data, alpha=weight_decay)
def qhm_direction(self):
for group in self.param_groups:
momentum = group['momentum']
qhm_nu = group['qhm_nu']
for p in group['params']:
if p.grad is None:
continue
x = p.data # Optimization parameters
g = p.grad.data # Stochastic gradient
# Compute the (negative) step directoin d and necessary momentum
state = self.state[p]
if abs(momentum) < 1e-12 or abs(qhm_nu) < 1e-12: # simply SGD if beta=0 or nu=0
d = state['step_buffer'] = g
else:
if 'momentum_buffer' not in state:
h = state['momentum_buffer'] = torch.zeros_like(x)
else:
h = state['momentum_buffer']
# Update momentum buffer: h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
h.mul_(momentum).add_(g, alpha=1 - momentum)
if abs(qhm_nu - 1) < 1e-12: # if nu=1, then same as SGD with momentum
d = state['step_buffer'] = h
else:
if self.state['allocate_step_buffer']: # copy from gradient
if 'step_buffer' not in state:
state['step_buffer'] = torch.zeros_like(g)
d = state['step_buffer'].copy_(g)
else: # use gradient buffer
d = state['step_buffer'] = g
# Compute QHM momentum: d(k) = (1 - \nu) * g(k) + \nu * h(k)
d.mul_(1 - qhm_nu).add_(h, alpha=qhm_nu)
def qhm_update(self):
"""
Perform QHM update, need to call compute_qhm_direction() before calling this.
"""
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
p.data.add_(self.state[p]['step_buffer'], alpha=-group['lr'])
|
mmdet/utils/__init__.py | Joanna0123/QueryInst | 326 | 12742449 | from .collect_env import collect_env
from .logger import get_root_logger
from .optimizer import OptimizerHook
__all__ = ['get_root_logger', 'collect_env', 'OptimizerHook']
|
integration/tests/follow_redirect.py | youhavethewrong/hurl | 1,013 | 12742490 | <reponame>youhavethewrong/hurl<filename>integration/tests/follow_redirect.py
from tests import app
from flask import redirect
@app.route('/follow-redirect')
def follow_redirect():
return redirect('http://localhost:8000/following-redirect')
@app.route('/following-redirect')
def following_redirect():
return redirect('http://localhost:8000/followed-redirect')
@app.route('/followed-redirect')
def followed_redirect():
return 'Followed redirect!'
|
heat/tests/convergence/scenarios/update_replace_missed_cleanup.py | noironetworks/heat | 265 | 12742532 | <gh_stars>100-1000
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, example_template_long)
engine.delete_stack('foo')
engine.noop(12)
engine.call(verify, Template({}))
|
third_party/blink/tools/blinkpy/common/net/git_cl_mock.py | zealoussnow/chromium | 14,668 | 12742535 | <gh_stars>1000+
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from blinkpy.common.net.git_cl import CLStatus, GitCL
from blinkpy.common.system.executive import ScriptError
# pylint: disable=unused-argument
class MockGitCL(object):
def __init__(self,
host,
try_job_results=None,
status='closed',
issue_number='1234',
time_out=False,
git_error_output=None):
"""Constructs a fake GitCL with canned return values.
Args:
host: Host object, used for builder names.
try_job_results: A dict of Build to TryJobStatus.
status: CL status string.
issue_number: CL issue number as a string.
time_out: Whether to simulate timing out while waiting.
git_error_output: A dict of git-cl args to exception output.
"""
self._builders = host.builders.all_try_builder_names()
self._status = status
self._try_job_results = try_job_results
self._issue_number = issue_number
self._time_out = time_out
self._git_error_output = git_error_output
self.calls = []
def run(self, args):
self.calls.append(['git', 'cl'] + args)
arg_key = "".join(args)
if self._git_error_output and arg_key in self._git_error_output.keys():
raise ScriptError(output=self._git_error_output[arg_key])
return 'mock output'
def trigger_try_jobs(self, builders, bucket=None):
bucket = bucket or 'luci.chromium.try'
command = ['try', '-B', bucket]
for builder in sorted(builders):
command.extend(['-b', builder])
self.run(command)
def get_issue_number(self):
return self._issue_number
def try_job_results(self, **_):
return self._try_job_results
def wait_for_try_jobs(self, **_):
if self._time_out:
return None
return CLStatus(self._status,
self.filter_latest(self._try_job_results))
def wait_for_closed_status(self, **_):
if self._time_out:
return None
return 'closed'
def latest_try_jobs(self, builder_names=None, **_):
return self.filter_latest(self._try_job_results)
@staticmethod
def filter_latest(try_results):
return GitCL.filter_latest(try_results)
@staticmethod
def all_finished(try_results):
return GitCL.all_finished(try_results)
@staticmethod
def all_success(try_results):
return GitCL.all_success(try_results)
@staticmethod
def some_failed(try_results):
return GitCL.some_failed(try_results)
|
insights/parsers/ipaupgrade_log.py | mglantz/insights-core | 121 | 12742541 | <gh_stars>100-1000
"""
IpaupgradeLog - file ``/var/log/ipaupgrade.log``
================================================
This file records the information of IPA server upgrade process while
executing command ``ipa-server-upgrade``
"""
from .. import LogFileOutput, parser
from insights.specs import Specs
@parser(Specs.ipaupgrade_log)
class IpaupgradeLog(LogFileOutput):
"""
This parser is used to parse the content of file `/var/log/ipaupgrade.log`.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
Typical content of ``ipaupgrade.log`` file is::
2017-08-07T07:36:50Z DEBUG Starting external process
2017-08-07T07:36:50Z DEBUG args=/bin/systemctl is-active [email protected]
2017-08-07T07:36:50Z DEBUG Process finished, return code=0
2017-08-07T07:36:50Z DEBUG stdout=active
2017-08-07T07:41:50Z ERROR IPA server upgrade failed: Inspect /var/log/ipaupgrade.log and run command ipa-server-upgrade manually.
Example:
>>> ipaupgradelog = shared[IpaupgradeLog]
>>> len(list(log.get('DEBUG')))
4
>>> from datetime import datetime
>>> len(log.get_after(datetime(2017, 8, 7, 7, 37, 30)))
1
"""
time_format = '%Y-%m-%dT%H:%M:%SZ'
|
dreamcoder/domains/regex/main.py | Dragon-hxl/LARC | 290 | 12742550 | <reponame>Dragon-hxl/LARC
# analog of list.py for regex tasks. Responsible for actually running the task.
from dreamcoder.domains.regex.makeRegexTasks import makeOldTasks, makeLongTasks, makeShortTasks, makeWordTasks, makeNumberTasks, makeHandPickedTasks, makeNewTasks, makeNewNumberTasks
from dreamcoder.domains.regex.regexPrimitives import basePrimitives, altPrimitives, easyWordsPrimitives, alt2Primitives, concatPrimitives, reducedConcatPrimitives, strConstConcatPrimitives, PRC
from dreamcoder.dreamcoder import explorationCompression, Task
from dreamcoder.grammar import Grammar
from dreamcoder.likelihoodModel import add_cutoff_values, add_string_constants
from dreamcoder.program import Abstraction, Application
from dreamcoder.type import tpregex
from dreamcoder.utilities import eprint, flatten, testTrainSplit, POSITIVEINFINITY
import random
import math
import pregex as pre
import os
try:
from dreamcoder.recognition import RecurrentFeatureExtractor, JSONFeatureExtractor
class LearnedFeatureExtractor(RecurrentFeatureExtractor):
H = 64
special = 'regex'
def tokenize(self, examples):
def sanitize(l): return [z if z in self.lexicon else "?"
for z_ in l
for z in (z_ if isinstance(z_, list) else [z_])]
tokenized = []
for xs, y in examples:
if isinstance(y, list):
y = ["LIST_START"] + y + ["LIST_END"]
else:
y = [y]
y = sanitize(y)
if len(y) > self.maximumLength:
return None
serializedInputs = []
for xi, x in enumerate(xs):
if isinstance(x, list):
x = ["LIST_START"] + x + ["LIST_END"]
else:
x = [x]
x = sanitize(x)
if len(x) > self.maximumLength:
return None
serializedInputs.append(x)
tokenized.append((tuple(serializedInputs), y))
return tokenized
def __init__(self, tasks, testingTasks=[], cuda=False):
self.lexicon = set(flatten((t.examples for t in tasks + testingTasks), abort=lambda x: isinstance(
x, str))).union({"LIST_START", "LIST_END", "?"})
self.num_examples_list = [len(t.examples) for t in tasks]
# Calculate the maximum length
self.maximumLength = POSITIVEINFINITY
self.maximumLength = max(len(l)
for t in tasks + testingTasks
for xs, y in self.tokenize(t.examples)
for l in [y] + [x for x in xs])
super(
LearnedFeatureExtractor,
self).__init__(
lexicon=list(
self.lexicon),
tasks=tasks,
cuda=cuda,
H=self.H,
bidirectional=True)
self.parallelTaskOfProgram = False
def taskOfProgram(self, p, t):
#raise NotImplementedError
num_examples = random.choice(self.num_examples_list)
p = p.visit(ConstantInstantiateVisitor.SINGLE)
preg = p.evaluate([])(pre.String(""))
t = Task("Helm", t, [((), list(preg.sample())) for _ in range(num_examples) ])
return t
except: pass
#in init: loop over tasks, save lengths,
class ConstantInstantiateVisitor(object):
def __init__(self):
self.regexes = [
pre.create(".+"),
pre.create("\d+"),
pre.create("\w+"),
pre.create("\s+"),
pre.create("\\u+"),
pre.create("\l+")]
def primitive(self, e):
if e.name == "r_const":
#return Primitive("STRING", e.tp, random.choice(self.words))
s = random.choice(self.regexes).sample() #random string const
s = pre.String(s)
e.value = PRC(s,arity=0)
return e
def invented(self, e): return e.body.visit(self)
def index(self, e): return e
def application(self, e):
return Application(e.f.visit(self), e.x.visit(self))
def abstraction(self, e):
return Abstraction(e.body.visit(self))
#TODO fix
class MyJSONFeatureExtractor(JSONFeatureExtractor):
N_EXAMPLES = 5
def _featuresOfProgram(self, program, tp):
try:
preg = program.evaluate([])
# if 'left_paren' in program.show(False):
#eprint("string_pregex:", string_pregex)
#eprint("string_pregex:", string_pregex)
except IndexError:
# free variable
return None
except Exception as e:
eprint("Exception during evaluation:", e)
if "Attempt to evaluate fragment variable" in e:
eprint("program (bc fragment error)", program)
return None
examples = []
for _ in range(self.N_EXAMPLES * 5): # oh this is arbitrary ig
try:
y = preg.sample() # TODO
#this line should keep inputs short, so that helmholtzbatch can be large
#allows it to try other samples
#(Could also return None off the bat... idk which is better)
#if len(y) > 20:
# continue
#eprint(tp, program, x, y)
examples.append(y)
except BaseException:
continues
if len(examples) >= self.N_EXAMPLES:
break
else:
return None
return examples # changed to list_features(examples) from examples
def regex_options(parser):
parser.add_argument("--maxTasks", type=int,
default=500,
help="truncate tasks to fit within this boundary")
parser.add_argument(
"--maxExamples",
type=int,
default=10,
help="truncate number of examples per task to fit within this boundary")
parser.add_argument("--tasks",
default="long",
help="which tasks to use",
choices=["old", "short", "long", "words", "number", "handpicked", "new", "newNumber"])
parser.add_argument("--primitives",
default="concat",
help="Which primitive set to use",
choices=["base", "alt1", "easyWords", "alt2", "concat", "reduced", "strConst"])
parser.add_argument("--extractor", type=str,
choices=["hand", "deep", "learned", "json"],
default="learned") # if i switch to json it breaks
parser.add_argument("--split", metavar="TRAIN_RATIO",
type=float,
default=0.8,
help="split test/train")
parser.add_argument("-H", "--hidden", type=int,
default=256,
help="number of hidden units")
parser.add_argument("--likelihoodModel",
default="probabilistic",
help="likelihood Model",
choices=["probabilistic", "all-or-nothing"])
parser.add_argument("--topk_use_map",
dest="topk_use_only_likelihood",
action="store_false")
parser.add_argument("--debug",
dest="debug",
action="store_true")
parser.add_argument("--ll_cutoff",
dest="use_ll_cutoff",
nargs='*',
default=False,
help="use ll cutoff for training tasks (for probabilistic likelihood model only). default is False,")
parser.add_argument("--use_str_const",
action="store_true",
help="use string constants")
"""parser.add_argument("--stardecay",
type=float,
dest="stardecay",
default=0.5,
help="p value for kleenestar and plus")"""
# Lucas recommends putting a struct with the definitions of the primitives here.
# TODO:
# Build likelihood funciton
# modify NN
# make primitives
# make tasks
def main(args):
"""
Takes the return value of the `commandlineArguments()` function as input and
trains/tests the model on regular expressions.
"""
#for dreaming
#parse use_ll_cutoff
use_ll_cutoff = args.pop('use_ll_cutoff')
if not use_ll_cutoff is False:
#if use_ll_cutoff is a list of strings, then train_ll_cutoff and train_ll_cutoff
#will be tuples of that string followed by the actual model
if len(use_ll_cutoff) == 1:
train_ll_cutoff = use_ll_cutoff[0] # make_cutoff_model(use_ll_cutoff[0], tasks))
test_ll_cutoff = use_ll_cutoff[0] # make_cutoff_model(use_ll_cutoff[0], tasks))
else:
assert len(use_ll_cutoff) == 2
train_ll_cutoff = use_ll_cutoff[0] #make_cutoff_model(use_ll_cutoff[0], tasks))
test_ll_cutoff = use_ll_cutoff[1] #make_cutoff_model(use_ll_cutoff[1], tasks))
else:
train_ll_cutoff = None
test_ll_cutoff = None
regexTasks = {"old": makeOldTasks,
"short": makeShortTasks,
"long": makeLongTasks,
"words": makeWordTasks,
"number": makeNumberTasks,
"handpicked": makeHandPickedTasks,
"new": makeNewTasks,
"newNumber": makeNewNumberTasks
}[args.pop("tasks")]
tasks = regexTasks() # TODO
eprint("Generated", len(tasks), "tasks")
maxTasks = args.pop("maxTasks")
if len(tasks) > maxTasks:
eprint("Unwilling to handle {} tasks, truncating..".format(len(tasks)))
seed = 42 # previously this was hardcoded and never changed
random.seed(seed)
random.shuffle(tasks)
del tasks[maxTasks:]
maxExamples = args.pop("maxExamples")
split = args.pop("split")
test, train = testTrainSplit(tasks, split)
eprint("Split tasks into %d/%d test/train" % (len(test), len(train)))
test = add_cutoff_values(test, test_ll_cutoff)
train = add_cutoff_values(train, train_ll_cutoff)
eprint("added cutoff values to tasks, train: ", train_ll_cutoff, ", test:", test_ll_cutoff )
if args.pop("use_str_const"):
assert args["primitives"] == "strConst" or args["primitives"] == "reduced"
ConstantInstantiateVisitor.SINGLE = \
ConstantInstantiateVisitor()
test = add_string_constants(test)
train = add_string_constants(train)
eprint("added string constants to test and train")
for task in test + train:
if len(task.examples) > maxExamples:
task.examples = task.examples[:maxExamples]
task.specialTask = ("regex", {"cutoff": task.ll_cutoff, "str_const": task.str_const})
task.examples = [(xs, [y for y in ys ])
for xs,ys in task.examples ]
task.maxParameters = 1
# from list stuff
primtype = args.pop("primitives")
prims = {"base": basePrimitives,
"alt1": altPrimitives,
"alt2": alt2Primitives,
"easyWords": easyWordsPrimitives,
"concat": concatPrimitives,
"reduced": reducedConcatPrimitives,
"strConst": strConstConcatPrimitives
}[primtype]
extractor = {
"learned": LearnedFeatureExtractor,
"json": MyJSONFeatureExtractor
}[args.pop("extractor")]
extractor.H = args.pop("hidden")
#stardecay = args.stardecay
#stardecay = args.pop('stardecay')
#decaystr = 'd' + str(stardecay)
import datetime
timestamp = datetime.datetime.now().isoformat()
outputDirectory = "experimentOutputs/regex/%s"%timestamp
os.system("mkdir -p %s"%outputDirectory)
args.update({
"featureExtractor": extractor,
"outputPrefix": "%s/regex"%(outputDirectory),
"evaluationTimeout": 0.005,
"topk_use_only_likelihood": True,
"maximumFrontier": 10,
"compressor": args.get("compressor","ocaml")
})
####
# use the
#prim_list = prims(stardecay)
prim_list = prims()
specials = ["r_kleene", "r_plus", "r_maybe", "r_alt", "r_concat"]
n_base_prim = len(prim_list) - len(specials)
productions = [
(math.log(0.5 / float(n_base_prim)),
prim) if prim.name not in specials else (
math.log(0.10),
prim) for prim in prim_list]
baseGrammar = Grammar.fromProductions(productions, continuationType=tpregex)
#baseGrammar = Grammar.uniform(prims())
#for i in range(100):
# eprint(baseGrammar.sample(tpregex))
#eprint(baseGrammar)
#explore
test_stuff = args.pop("debug")
if test_stuff:
eprint(baseGrammar)
eprint("sampled programs from prior:")
for i in range(100): #100
eprint(baseGrammar.sample(test[0].request,maximumDepth=1000))
eprint("""half the probability mass is on higher-order primitives.
Therefore half of enumerated programs should have more than one node.
However, we do not observe this.
Instead we see a very small fraction of programs have more than one node.
So something seems to be wrong with grammar.sample.
Furthermore: observe the large print statement above.
This prints the candidates for sampleDistribution in grammar.sample.
the first element of each tuple is the probability passed into sampleDistribution.
Half of the probability mass should be on the functions, but instead they are equally
weighted with the constants. If you look at the grammar above, this is an error!!!!
""")
assert False
del args["likelihoodModel"]
explorationCompression(baseGrammar, train,
testingTasks = test,
**args)
|
ufora/networking/DemuxedTransport.py | ufora/ufora | 571 | 12742553 | <reponame>ufora/ufora
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ufora.networking.ChannelDemuxer import Message, ChannelDemuxer
import logging
class DemuxedTransport(object):
def __init__(self):
self.clients = {}
def onMessageReceived_(self, content, channelId):
try:
channel = self.clients[channelId]
message = Message(
channel.channelGroup,
channelId,
channel.hostId,
channel.outgoingSequenceNumber,
content
)
channel.outgoingSequenceNumber += 1
self.onMessageReceived(ChannelDemuxer.encodeMessage(message))
except Exception:
import traceback
logging.error('ERROR: failed to dispatch received message\n%s' % traceback.format_exc())
|
hgtk/__init__.py | bluedisk/hangul-tools | 271 | 12742576 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from . import text
from . import letter
from . import checker
from . import josa |
V2RaycSpider1225/src/BusinessLogicLayer/apis/cluster_api/sspanel_parser.py | QIN2DIM/V2RayCloudSpider | 891 | 12742590 | # -*- coding: utf-8 -*-
# Time : 2021/7/25 13:59
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import json
import os
from datetime import datetime
from bs4 import BeautifulSoup
from selenium.common.exceptions import (
StaleElementReferenceException,
WebDriverException,
)
from selenium.webdriver import Chrome
from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN
from src.BusinessLogicLayer.cluster.master import ActionMasterGeneral
class SSPanelParser(ActionMasterGeneral):
def __init__(self, url, silence=False, assault=True, anti_slider=True):
super(SSPanelParser, self).__init__(
url,
silence,
assault,
anti_slider=anti_slider,
)
self.obj_parser = {}
self.cache_db_name = "parser_cache"
self.cache_db_path = self.create_cache_db(database_dir=SERVER_DIR_DATABASE)
def create_cache_db(self, database_dir=None):
database_dir = "database" if database_dir is None else database_dir
if not os.path.exists(database_dir):
os.mkdir(database_dir)
cache_db = os.path.join(database_dir, self.cache_db_name)
if not os.path.exists(cache_db):
os.mkdir(cache_db)
return cache_db
def capture_cache(self, signs, flow):
output_path = os.path.join(self.cache_db_path, signs)
with open(output_path, "w", encoding="utf8") as f:
f.write(flow)
def parse(self, **kwargs):
"""
:return:
"""
api: Chrome = kwargs.get("api")
self.obj_parser.update({"parse_url": self.register_url})
# ----------------------------------------
# 解析可用流量和可用时长
# 优先调用,等待流体动画加载完成[耗时任务]
# 以保证后续解析无需等待
# ----------------------------------------
fluid = set()
fluid_density = []
i = 0
while True:
try:
i += 1
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[:2]
card_body = [tag.text.strip() for tag in card_body]
fluid.update(card_body)
fluid_density.append(len(fluid))
# 流体释放
if len(fluid_density) < 10 or len(fluid) < 3:
continue
# 流体相对均衡
if max(fluid_density[:10]) == min(fluid_density[:10]):
self.obj_parser.update(
{"time": card_body[0], "flow": card_body[-1]}
)
break
except StaleElementReferenceException:
pass
# 存储cookie
with open("123.json", "w", encoding="utf8") as f:
f.write(json.dumps(api.get_cookies()))
# 读取cookie
# cookie_json = " ".join([f"{i['name']}={i['value']};" for i in json.loads(f.read())])
# ----------------------------------------
# 解析站点名称
# ----------------------------------------
try:
parse_name = api.find_element_by_xpath(
"//aside//div[@class='sidebar-brand']"
).text.strip()
self.obj_parser.update({"parse_name": parse_name})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site name resolution failed -- {self.register_url}"
)
# ----------------------------------------
# 解析站点公告
# ----------------------------------------
reference_links = {}
try:
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[4]
self.obj_parser.update({"desc": card_body.text.strip()})
related_href = card_body.find_elements_by_tag_name("a")
for tag in related_href:
href = tag.get_attribute("href")
if href:
href = href.strip()
if "https" not in href:
href = f"{self.register_url}{href}"
href_desc = tag.text.strip() if tag.text else href
reference_links.update({href: href_desc})
self.obj_parser.update({"reference_links": reference_links})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site announcement parsing error -- {self.register_url}"
)
# ----------------------------------------
# 解析[链接导入]
# ----------------------------------------
subscribes = {}
support = []
try:
# 清洗订阅链接
soup = BeautifulSoup(api.page_source, "html.parser")
for i in soup.find_all("a"):
if i.get("data-clipboard-text"):
subscribes.update({i.get("data-clipboard-text"): i.text.strip()})
# 识别支持的订阅类型
buttons = api.find_elements_by_xpath("//div[@class='card'][2]//a")
for tag in buttons:
support_ = tag.get_attribute("class")
if support_:
support_ = [
i
for i in [i for i in support_.split() if i.startswith("btn-")]
if i
not in [
"btn-icon",
"btn-primary",
"btn-lg",
"btn-round",
"btn-progress",
]
]
if len(support_) == 1:
class_name = support_[0].replace("btn-", "")
support.append(class_name)
# 残差补全
for tag in subscribes.values():
if "surge" in tag.lower():
support.append("surge")
if "ssr" in tag.lower():
support.append("ssr")
self.obj_parser.update(
{"subscribes": subscribes, "support": list(set(support))}
)
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site subscription resolution failed -- {self.register_url}"
)
self.obj_parser.update(
{
"email": self.email,
"password": self.password,
"recently_login": datetime.now(tz=TIME_ZONE_CN),
}
)
return self.obj_parser
def parse_by_login(self, **kwargs) -> dict:
return self.seep("login", self.parse, **kwargs)
def parse_by_register(self, **kwargs):
return self.seep("register", self.parse, **kwargs)
def refresh_cookie(self, **kwargs):
def get_cookie():
cookies = kwargs.get("api")
return json.dumps(cookies.get_cookies()) if cookies else {}
return self.seep("login", get_cookie, **kwargs)
def seep(self, method, business, **kwargs):
# 获取任务设置
api = self.set_spider_option()
# 执行核心业务逻辑
try:
self.get_html_handle(api=api, url=self.register_url, wait_seconds=45)
if method == "login":
self.sign_in(api, **kwargs)
elif method == "register":
self.sign_up(api)
self.wait(api, 40, "//div[@class='card-body']")
kwargs.setdefault("api", api)
return business(**kwargs)
finally:
api.quit()
|
virl/cli/search/commands.py | tombry/virlutils | 133 | 12742619 | <filename>virl/cli/search/commands.py
import click
from virl.api import ViewerPlugin, NoPluginError
from virl.api.github import get_repos
from virl.cli.views.search import repo_table
@click.command()
@click.argument("query", required=False)
@click.option("--org", default="virlfiles", required=False, help="GitHub organization to search (default: virlfiles)")
def search(query=None, **kwargs):
"""
list topologies available via github
"""
repos = get_repos(org=kwargs["org"], query=query)
if query is not None:
click.secho("Displaying {} Results For {}".format(len(repos), query))
else:
click.secho("Displaying {} Results".format(len(repos)))
try:
pl = ViewerPlugin(viewer="search")
pl.visualize(repos=repos)
except NoPluginError:
repo_table(repos)
|
test/qa-tests/buildscripts/resmokelib/testing/hooks.py | tfogo/mongo-tools | 828 | 12742626 | """
Customize the behavior of a fixture by allowing special code to be
executed before or after each test, and before or after each suite.
"""
from __future__ import absolute_import
import os
import sys
import bson
import pymongo
from . import fixtures
from . import testcases
from .. import errors
from .. import logging
from .. import utils
def make_custom_behavior(class_name, *args, **kwargs):
"""
Factory function for creating CustomBehavior instances.
"""
if class_name not in _CUSTOM_BEHAVIORS:
raise ValueError("Unknown custom behavior class '%s'" % (class_name))
return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
class CustomBehavior(object):
"""
The common interface all CustomBehaviors will inherit from.
"""
@staticmethod
def start_dynamic_test(test_case, test_report):
"""
If a CustomBehavior wants to add a test case that will show up
in the test report, it should use this method to add it to the
report, since we will need to count it as a dynamic test to get
the stats in the summary information right.
"""
test_report.startTest(test_case, dynamic=True)
def __init__(self, logger, fixture):
"""
Initializes the CustomBehavior with the specified fixture.
"""
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
self.logger = logger
self.fixture = fixture
def before_suite(self, test_report):
"""
The test runner calls this exactly once before they start
running the suite.
"""
pass
def after_suite(self, test_report):
"""
The test runner calls this exactly once after all tests have
finished executing. Be sure to reset the behavior back to its
original state so that it can be run again.
"""
pass
def before_test(self, test_report):
"""
Each test will call this before it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
def after_test(self, test_report):
"""
Each test will call this after it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
class CleanEveryN(CustomBehavior):
"""
Restarts the fixture after it has ran 'n' tests.
On mongod-related fixtures, this will clear the dbpath.
"""
DEFAULT_N = 20
def __init__(self, logger, fixture, n=DEFAULT_N):
CustomBehavior.__init__(self, logger, fixture)
# Try to isolate what test triggers the leak by restarting the fixture each time.
if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
" the fixture after each test instead of after every %d.", n)
n = 1
self.n = n
self.tests_run = 0
def after_test(self, test_report):
self.tests_run += 1
if self.tests_run >= self.n:
self.logger.info("%d tests have been run against the fixture, stopping it...",
self.tests_run)
self.tests_run = 0
teardown_success = self.fixture.teardown()
self.logger.info("Starting the fixture back up again...")
self.fixture.setup()
self.fixture.await_ready()
# Raise this after calling setup in case --continueOnFailure was specified.
if not teardown_success:
raise errors.TestFailure("%s did not exit cleanly" % (self.fixture))
class CheckReplDBHash(CustomBehavior):
"""
Waits for replication after each test, then checks that the dbhahses
of all databases other than "local" match on the primary and all of
the secondaries. If any dbhashes do not match, logs information
about what was different (e.g. Different numbers of collections,
missing documents in a collection, mismatching documents, etc).
Compatible only with ReplFixture subclasses.
"""
def __init__(self, logger, fixture):
if not isinstance(fixture, fixtures.ReplFixture):
raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
CustomBehavior.__init__(self, logger, fixture)
self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
self.started = False
def after_test(self, test_report):
"""
After each test, check that the dbhash of the test database is
the same on all nodes in the replica set or master/slave
fixture.
"""
try:
if not self.started:
CustomBehavior.start_dynamic_test(self.test_case, test_report)
self.started = True
# Wait until all operations have replicated.
self.fixture.await_repl()
success = True
sb = [] # String builder.
primary = self.fixture.get_primary()
primary_conn = utils.new_mongo_client(port=primary.port)
for secondary in self.fixture.get_secondaries():
read_preference = pymongo.ReadPreference.SECONDARY
secondary_conn = utils.new_mongo_client(port=secondary.port,
read_preference=read_preference)
# Skip arbiters.
if secondary_conn.admin.command("isMaster").get("arbiterOnly", False):
continue
all_matched = CheckReplDBHash._check_all_db_hashes(primary_conn,
secondary_conn,
sb)
if not all_matched:
sb.insert(0,
"One or more databases were different between the primary on port %d"
" and the secondary on port %d:"
% (primary.port, secondary.port))
success = all_matched and success
if not success:
# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
self.test_case.logger.info("\n ".join(sb))
raise self.test_case.failureException("The dbhashes did not match")
except self.test_case.failureException as err:
self.test_case.logger.exception("The dbhashes did not match.")
self.test_case.return_code = 1
test_report.addFailure(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.ServerFailure(err.args[0])
except pymongo.errors.WTimeoutError:
self.test_case.logger.exception("Awaiting replication timed out.")
self.test_case.return_code = 2
test_report.addError(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.StopExecution("Awaiting replication timed out")
def after_suite(self, test_report):
"""
If we get to this point, the #dbhash# test must have been
successful, so add it to the test report.
"""
if self.started:
self.test_case.logger.info("The dbhashes matched for all tests.")
self.test_case.return_code = 0
test_report.addSuccess(self.test_case)
# TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)
self.started = False
@staticmethod
def _check_all_db_hashes(primary_conn, secondary_conn, sb):
"""
Returns true if for each non-local database, the dbhash command
returns the same MD5 hash on the primary as it does on the
secondary. Returns false otherwise.
Logs a message describing the differences if any database's
dbhash did not match.
"""
# Overview of how we'll check that everything replicated correctly between these two nodes:
#
# - Check whether they have the same databases.
# - If not, log which databases are missing where, and dump the contents of any that are
# missing.
#
# - Check whether each database besides "local" gives the same md5 field as the result of
# running the dbhash command.
# - If not, check whether they have the same collections.
# - If not, log which collections are missing where, and dump the contents of any
# that are missing.
# - If so, check that the hash of each non-capped collection matches.
# - If any do not match, log the diff of the collection between the two nodes.
success = True
if not CheckReplDBHash._check_dbs_present(primary_conn, secondary_conn, sb):
return False
for db_name in primary_conn.database_names():
if db_name == "local":
continue # We don't expect this to match across different nodes.
matched = CheckReplDBHash._check_db_hash(primary_conn, secondary_conn, db_name, sb)
success = matched and success
return success
@staticmethod
def _check_dbs_present(primary_conn, secondary_conn, sb):
"""
Returns true if the list of databases on the primary is
identical to the list of databases on the secondary, and false
otherwise.
"""
success = True
primary_dbs = primary_conn.database_names()
# Can't run database_names() on secondary, so instead use the listDatabases command.
# TODO: Use database_names() once PYTHON-921 is resolved.
list_db_output = secondary_conn.admin.command("listDatabases")
secondary_dbs = [db["name"] for db in list_db_output["databases"]]
# There may be a difference in databases which is not considered an error, when
# the database only contains system collections. This difference is only logged
# when others are encountered, i.e., success = False.
missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
set(primary_dbs), set(secondary_dbs), "database")
for missing_db in missing_on_secondary:
db = primary_conn[missing_db]
coll_names = db.collection_names()
non_system_colls = [name for name in coll_names if not name.startswith("system.")]
# It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined whether they should exist or not.
if non_system_colls:
sb.append("Database %s present on primary but not on secondary." % (missing_db))
CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
success = False
for missing_db in missing_on_primary:
db = secondary_conn[missing_db]
# Can't run collection_names() on secondary, so instead use the listCollections command.
# TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
# logic that is duplicated here can be consolidated.
list_coll_output = db.command("listCollections")["cursor"]["firstBatch"]
coll_names = [coll["name"] for coll in list_coll_output]
non_system_colls = [name for name in coll_names if not name.startswith("system.")]
# It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined if it should exist or not.
if non_system_colls:
sb.append("Database %s present on secondary but not on primary." % (missing_db))
CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
success = False
return success
@staticmethod
def _check_db_hash(primary_conn, secondary_conn, db_name, sb):
"""
Returns true if the dbhash for 'db_name' matches on the primary
and the secondary, and false otherwise.
Appends a message to 'sb' describing the differences if the
dbhashes do not match.
"""
primary_hash = primary_conn[db_name].command("dbhash")
secondary_hash = secondary_conn[db_name].command("dbhash")
if primary_hash["md5"] == secondary_hash["md5"]:
return True
success = CheckReplDBHash._check_dbs_eq(
primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb)
if not success:
sb.append("Database %s has a different hash on the primary and the secondary"
" ([ %s ] != [ %s ]):"
% (db_name, primary_hash["md5"], secondary_hash["md5"]))
return success
@staticmethod
def _check_dbs_eq(primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb):
"""
Returns true if all non-capped collections had the same hash in
the dbhash response, and false otherwise.
Appends information to 'sb' about the differences between the
'db_name' database on the primary and the 'db_name' database on
the secondary, if any.
"""
success = True
primary_db = primary_conn[db_name]
secondary_db = secondary_conn[db_name]
primary_coll_hashes = primary_hash["collections"]
secondary_coll_hashes = secondary_hash["collections"]
primary_coll_names = set(primary_coll_hashes.keys())
secondary_coll_names = set(secondary_coll_hashes.keys())
missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
primary_coll_names, secondary_coll_names, "collection", sb=sb)
if missing_on_primary or missing_on_secondary:
# 'sb' already describes which collections are missing where.
for coll_name in missing_on_primary:
CheckReplDBHash._dump_all_documents(secondary_db, coll_name, sb)
for coll_name in missing_on_secondary:
CheckReplDBHash._dump_all_documents(primary_db, coll_name, sb)
return
for coll_name in primary_coll_names & secondary_coll_names:
primary_coll_hash = primary_coll_hashes[coll_name]
secondary_coll_hash = secondary_coll_hashes[coll_name]
if primary_coll_hash == secondary_coll_hash:
continue
# Ignore capped collections because they are not expected to match on all nodes.
if primary_db.command({"collStats": coll_name})["capped"]:
# Still fail if the collection is not capped on the secondary.
if not secondary_db.command({"collStats": coll_name})["capped"]:
success = False
sb.append("%s.%s collection is capped on primary but not on secondary."
% (primary_db.name, coll_name))
sb.append("%s.%s collection is capped, ignoring." % (primary_db.name, coll_name))
continue
# Still fail if the collection is capped on the secondary, but not on the primary.
elif secondary_db.command({"collStats": coll_name})["capped"]:
success = False
sb.append("%s.%s collection is capped on secondary but not on primary."
% (primary_db.name, coll_name))
continue
success = False
sb.append("Collection %s.%s has a different hash on the primary and the secondary"
" ([ %s ] != [ %s ]):"
% (db_name, coll_name, primary_coll_hash, secondary_coll_hash))
CheckReplDBHash._check_colls_eq(primary_db, secondary_db, coll_name, sb)
if success:
sb.append("All collections that were expected to match did.")
return success
@staticmethod
def _check_colls_eq(primary_db, secondary_db, coll_name, sb):
"""
Appends information to 'sb' about the differences or between
the 'coll_name' collection on the primary and the 'coll_name'
collection on the secondary, if any.
"""
codec_options = bson.CodecOptions(document_class=TypeSensitiveSON)
primary_coll = primary_db.get_collection(coll_name, codec_options=codec_options)
secondary_coll = secondary_db.get_collection(coll_name, codec_options=codec_options)
primary_docs = CheckReplDBHash._extract_documents(primary_coll)
secondary_docs = CheckReplDBHash._extract_documents(secondary_coll)
CheckReplDBHash._get_collection_diff(primary_docs, secondary_docs, sb)
@staticmethod
def _extract_documents(collection):
"""
Returns a list of all documents in the collection, sorted by
their _id.
"""
return [doc for doc in collection.find().sort("_id", pymongo.ASCENDING)]
@staticmethod
def _get_collection_diff(primary_docs, secondary_docs, sb):
"""
Returns true if the documents in 'primary_docs' exactly match
the documents in 'secondary_docs', and false otherwise.
Appends information to 'sb' about what matched or did not match.
"""
matched = True
# These need to be lists instead of sets because documents aren't hashable.
missing_on_primary = []
missing_on_secondary = []
p_idx = 0 # Keep track of our position in 'primary_docs'.
s_idx = 0 # Keep track of our position in 'secondary_docs'.
while p_idx < len(primary_docs) and s_idx < len(secondary_docs):
primary_doc = primary_docs[p_idx]
secondary_doc = secondary_docs[s_idx]
if primary_doc == secondary_doc:
p_idx += 1
s_idx += 1
continue
# We have mismatching documents.
matched = False
if primary_doc["_id"] == secondary_doc["_id"]:
sb.append("Mismatching document:")
sb.append(" primary: %s" % (primary_doc))
sb.append(" secondary: %s" % (secondary_doc))
p_idx += 1
s_idx += 1
# One node was missing a document. Since the documents are sorted by _id, the doc with
# the smaller _id was the one that was skipped.
elif primary_doc["_id"] < secondary_doc["_id"]:
missing_on_secondary.append(primary_doc)
# Only move past the doc that we know was skipped.
p_idx += 1
else: # primary_doc["_id"] > secondary_doc["_id"]
missing_on_primary.append(secondary_doc)
# Only move past the doc that we know was skipped.
s_idx += 1
# Check if there are any unmatched documents left.
while p_idx < len(primary_docs):
matched = False
missing_on_secondary.append(primary_docs[p_idx])
p_idx += 1
while s_idx < len(secondary_docs):
matched = False
missing_on_primary.append(secondary_docs[s_idx])
s_idx += 1
if not matched:
CheckReplDBHash._append_differences(
missing_on_primary, missing_on_secondary, "document", sb)
else:
sb.append("All documents matched.")
@staticmethod
def _check_difference(primary_set, secondary_set, item_type_name, sb=None):
"""
Returns true if the contents of 'primary_set' and
'secondary_set' are identical, and false otherwise. The sets
contain information about the primary and secondary,
respectively, e.g. the database names that exist on each node.
Appends information about anything that differed to 'sb'.
"""
missing_on_primary = set()
missing_on_secondary = set()
for item in primary_set - secondary_set:
missing_on_secondary.add(item)
for item in secondary_set - primary_set:
missing_on_primary.add(item)
if sb is not None:
CheckReplDBHash._append_differences(
missing_on_primary, missing_on_secondary, item_type_name, sb)
return (missing_on_primary, missing_on_secondary)
@staticmethod
def _append_differences(missing_on_primary, missing_on_secondary, item_type_name, sb):
"""
Given two iterables representing items that were missing on the
primary or the secondary respectively, append the information
about which items were missing to 'sb', if any.
"""
if missing_on_primary:
sb.append("The following %ss were present on the secondary, but not on the"
" primary:" % (item_type_name))
for item in missing_on_primary:
sb.append(str(item))
if missing_on_secondary:
sb.append("The following %ss were present on the primary, but not on the"
" secondary:" % (item_type_name))
for item in missing_on_secondary:
sb.append(str(item))
@staticmethod
def _dump_all_collections(database, coll_names, sb):
"""
Appends the contents of each of the collections in 'coll_names'
to 'sb'.
"""
if coll_names:
sb.append("Database %s contains the following collections: %s"
% (database.name, coll_names))
for coll_name in coll_names:
CheckReplDBHash._dump_all_documents(database, coll_name, sb)
else:
sb.append("No collections in database %s." % (database.name))
@staticmethod
def _dump_all_documents(database, coll_name, sb):
"""
Appends the contents of 'coll_name' to 'sb'.
"""
docs = CheckReplDBHash._extract_documents(database[coll_name])
if docs:
sb.append("Documents in %s.%s:" % (database.name, coll_name))
for doc in docs:
sb.append(" %s" % (doc))
else:
sb.append("No documents in %s.%s." % (database.name, coll_name))
class TypeSensitiveSON(bson.SON):
"""
Extends bson.SON to perform additional type-checking of document values
to differentiate BSON types.
"""
def items_with_types(self):
"""
Returns a list of triples. Each triple consists of a field name, a
field value, and a field type for each field in the document.
"""
return [(key, self[key], type(self[key])) for key in self]
def __eq__(self, other):
"""
Comparison to another TypeSensitiveSON is order-sensitive and
type-sensitive while comparison to a regular dictionary ignores order
and type mismatches.
"""
if isinstance(other, TypeSensitiveSON):
return (len(self) == len(other) and
self.items_with_types() == other.items_with_types())
raise TypeError("TypeSensitiveSON objects cannot be compared to other types")
class ValidateCollections(CustomBehavior):
"""
Runs full validation (db.collection.validate(true)) on all collections
in all databases on every standalone, or primary mongod. If validation
fails (validate.valid), then the validate return object is logged.
Compatible with all subclasses.
"""
DEFAULT_FULL = True
DEFAULT_SCANDATA = True
def __init__(self, logger, fixture, full=DEFAULT_FULL, scandata=DEFAULT_SCANDATA):
CustomBehavior.__init__(self, logger, fixture)
if not isinstance(full, bool):
raise TypeError("Fixture option full is not specified as type bool")
if not isinstance(scandata, bool):
raise TypeError("Fixture option scandata is not specified as type bool")
self.test_case = testcases.TestCase(self.logger, "Hook", "#validate#")
self.started = False
self.full = full
self.scandata = scandata
def after_test(self, test_report):
"""
After each test, run a full validation on all collections.
"""
try:
if not self.started:
CustomBehavior.start_dynamic_test(self.test_case, test_report)
self.started = True
sb = [] # String builder.
# The self.fixture.port can be used for client connection to a
# standalone mongod, a replica-set primary, or mongos.
# TODO: Run collection validation on all nodes in a replica-set.
port = self.fixture.port
conn = utils.new_mongo_client(port=port)
success = ValidateCollections._check_all_collections(
conn, sb, self.full, self.scandata)
if not success:
# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
self.test_case.logger.info("\n ".join(sb))
raise self.test_case.failureException("Collection validation failed")
except self.test_case.failureException as err:
self.test_case.logger.exception("Collection validation failed")
self.test_case.return_code = 1
test_report.addFailure(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.ServerFailure(err.args[0])
def after_suite(self, test_report):
"""
If we get to this point, the #validate# test must have been
successful, so add it to the test report.
"""
if self.started:
self.test_case.logger.info("Collection validation passed for all tests.")
self.test_case.return_code = 0
test_report.addSuccess(self.test_case)
# TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)
self.started = False
@staticmethod
def _check_all_collections(conn, sb, full, scandata):
"""
Returns true if for all databases and collections validate_collection
succeeds. Returns false otherwise.
Logs a message if any database's collection fails validate_collection.
"""
success = True
for db_name in conn.database_names():
for coll_name in conn[db_name].collection_names():
try:
conn[db_name].validate_collection(coll_name, full=full, scandata=scandata)
except pymongo.errors.CollectionInvalid as err:
sb.append("Database %s, collection %s failed to validate:\n%s"
% (db_name, coll_name, err.args[0]))
success = False
return success
_CUSTOM_BEHAVIORS = {
"CleanEveryN": CleanEveryN,
"CheckReplDBHash": CheckReplDBHash,
"ValidateCollections": ValidateCollections,
}
|
configs/mmdet/detection/detection_openvino_dynamic-800x1344.py | zhiqwang/mmdeploy | 746 | 12742646 | _base_ = ['../_base_/base_openvino_dynamic-800x1344.py']
|
test_elasticsearch/test_client/__init__.py | jkbak/elasticsearch-py | 3,353 | 12742669 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from elasticsearch import Elasticsearch
from ..test_cases import DummyTransportTestCase
class TestClient(DummyTransportTestCase):
def test_request_timeout_is_passed_through_unescaped(self):
self.client.ping(request_timeout=0.1)
calls = self.assert_url_called("HEAD", "/")
assert [({"request_timeout": 0.1}, {}, None)] == calls
def test_params_is_copied_when(self):
rt = object()
params = dict(request_timeout=rt)
self.client.ping(params=params)
self.client.ping(params=params)
calls = self.assert_url_called("HEAD", "/", 2)
assert [
({"request_timeout": rt}, {}, None),
({"request_timeout": rt}, {}, None),
] == calls
assert not (calls[0][0] is calls[1][0])
def test_headers_is_copied_when(self):
hv = "value"
headers = dict(Authentication=hv)
self.client.ping(headers=headers)
self.client.ping(headers=headers)
calls = self.assert_url_called("HEAD", "/", 2)
assert [
({}, {"authentication": hv}, None),
({}, {"authentication": hv}, None),
] == calls
assert not (calls[0][0] is calls[1][0])
def test_from_in_search(self):
self.client.search(index="i", from_=10)
calls = self.assert_url_called("POST", "/i/_search")
assert [({"from": "10"}, {}, None)] == calls
def test_repr_contains_hosts(self):
assert "<Elasticsearch([{}])>" == repr(self.client)
def test_repr_subclass(self):
class OtherElasticsearch(Elasticsearch):
pass
assert "<OtherElasticsearch([{}])>" == repr(OtherElasticsearch())
def test_repr_contains_hosts_passed_in(self):
assert "es.org" in repr(Elasticsearch(["es.org:123"]))
def test_repr_truncates_host_to_5(self):
hosts = [{"host": "es" + str(i)} for i in range(10)]
es = Elasticsearch(hosts)
assert "es5" not in repr(es)
assert "..." in repr(es)
def test_index_uses_post_if_id_is_empty(self):
self.client.index(index="my-index", id="", body={})
self.assert_url_called("POST", "/my-index/_doc")
def test_index_uses_put_if_id_is_not_empty(self):
self.client.index(index="my-index", id=0, body={})
self.assert_url_called("PUT", "/my-index/_doc/0")
|
backend/api/actions/compose.py | wickedyoda/Yacht | 1,452 | 12742690 | <gh_stars>1000+
from fastapi import HTTPException
from fastapi.responses import StreamingResponse
from sh import docker_compose
import os
import yaml
import pathlib
import shutil
import docker
import io
import zipfile
from api.settings import Settings
from api.utils.compose import find_yml_files
settings = Settings()
"""
Runs an action on the specified compose project.
"""
def compose_action(name, action):
files = find_yml_files(settings.COMPOSE_DIR)
compose = get_compose(name)
env = os.environ.copy()
if action == "up":
try:
_action = docker_compose(
action,
"-d",
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
elif action == "create":
try:
_action = docker_compose(
"up",
"--no-start",
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
else:
try:
_action = docker_compose(
action,
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
if _action.stdout.decode("UTF-8").rstrip():
_output = _action.stdout.decode("UTF-8").rstrip()
elif _action.stderr.decode("UTF-8").rstrip():
_output = _action.stderr.decode("UTF-8").rstrip()
else:
_output = "No Output"
print(f"""Project {compose['name']} {action} successful.""")
print(f"""Output: """)
print(_output)
return get_compose_projects()
"""
Used to include the DOCKER_HOST in the shell env
when someone ups a compose project or returns a
useless var to just clear the shell env.
"""
def check_dockerhost(environment):
if environment.get("DOCKER_HOST"):
return {"DOCKER_HOST": environment["DOCKER_HOST"]}
else:
return {"clear_env": "true"}
"""
Used to run docker-compose commands on specific
apps in compose projects.
"""
def compose_app_action(
name,
action,
app,
):
files = find_yml_files(settings.COMPOSE_DIR)
compose = get_compose(name)
env = os.environ.copy()
print("RUNNING: " + compose["path"] + " docker-compose " + " " + action + " " + app)
if action == "up":
try:
_action = docker_compose(
"up",
"-d",
app,
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
elif action == "create":
try:
_action = docker_compose(
"up",
"--no-start",
app,
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
elif action == "rm":
try:
_action = docker_compose(
"rm",
"--force",
"--stop",
app,
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
else:
try:
_action = docker_compose(
action,
app,
_cwd=os.path.dirname(compose["path"]),
_env=check_dockerhost(env),
)
except Exception as exc:
if hasattr(exc, "stderr"):
raise HTTPException(400, exc.stderr.decode("UTF-8").rstrip())
else:
raise HTTPException(400, exc)
if _action.stdout.decode("UTF-8").rstrip():
output = _action.stdout.decode("UTF-8").rstrip()
elif _action.stderr.decode("UTF-8").rstrip():
output = _action.stderr.decode("UTF-8").rstrip()
else:
output = "No Output"
print(f"""Project {compose['name']} App {name} {action} successful.""")
print(f"""Output: """)
print(output)
return get_compose_projects()
"""
Checks for compose projects in the COMPOSE_DIR and
returns most of the info inside them.
"""
def get_compose_projects():
files = find_yml_files(settings.COMPOSE_DIR)
projects = []
for project, file in files.items():
volumes = []
networks = []
services = {}
compose = open(file)
loaded_compose = yaml.load(compose, Loader=yaml.SafeLoader)
if loaded_compose:
if loaded_compose.get("volumes"):
for volume in loaded_compose.get("volumes"):
volumes.append(volume)
if loaded_compose.get("networks"):
for network in loaded_compose.get("networks"):
networks.append(network)
if loaded_compose.get("services"):
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
_project = {
"name": project,
"path": file,
"version": loaded_compose.get("version", "3.9"),
"services": services,
"volumes": volumes,
"networks": networks,
}
projects.append(_project)
else:
print("ERROR: " + file + " is invalid or empty!")
return projects
"""
Returns detailed information on a specific compose
project.
"""
def get_compose(name):
try:
files = find_yml_files(settings.COMPOSE_DIR + name)
except Exception as exc:
raise HTTPException(exc.status_code, exc.detail)
for project, file in files.items():
if name == project:
networks = []
volumes = []
services = {}
compose = open(file)
try:
loaded_compose = yaml.load(compose, Loader=yaml.SafeLoader)
except yaml.scanner.ScannerError as exc:
raise HTTPException(422, f"{exc.problem_mark.line}:{exc.problem_mark.column} - {exc.problem}")
if loaded_compose.get("volumes"):
for volume in loaded_compose.get("volumes"):
volumes.append(volume)
if loaded_compose.get("networks"):
for network in loaded_compose.get("networks"):
networks.append(network)
if loaded_compose.get("services"):
for service in loaded_compose.get("services"):
services[service] = loaded_compose["services"][service]
_content = open(file)
content = _content.read()
compose_object = {
"name": project,
"path": file,
"version": loaded_compose.get("version", "-"),
"services": services,
"volumes": volumes,
"networks": networks,
"content": content,
}
return compose_object
else:
raise HTTPException(404, "Project " + name + " not found")
"""
Creates a compose directory (if one isn't there
already) with the name of the project. Then writes
the content of compose.content to it.
"""
def write_compose(compose):
if not os.path.exists(settings.COMPOSE_DIR + compose.name):
try:
pathlib.Path(settings.COMPOSE_DIR + compose.name).mkdir(parents=True)
except Exception as exc:
raise HTTPException(exc.status_code, exc.detail)
with open(settings.COMPOSE_DIR + compose.name + "/docker-compose.yml", "w") as f:
try:
f.write(compose.content)
f.close()
except TypeError as exc:
if exc.args[0] == "write() argument must be str, not None":
raise HTTPException(
status_code=422, detail="Compose file cannot be empty."
)
except Exception as exc:
raise HTTPException(exc.status_code, exc.detail)
return get_compose(name=compose.name)
"""
Deletes a compose project after checking to see if
it exists. This also deletes all files in the folder.
"""
def delete_compose(project_name):
if not os.path.exists("/" + settings.COMPOSE_DIR + project_name):
raise HTTPException(404, "Project directory not found.")
elif not os.path.exists(
"/" + settings.COMPOSE_DIR + project_name + "/docker-compose.yml"
):
raise HTTPException(404, "Project docker-compose.yml not found.")
else:
try:
with open(
"/" + settings.COMPOSE_DIR + project_name + "/docker-compose.yml"
):
pass
except OSError as exc:
raise HTTPException(400, exc.strerror)
try:
shutil.rmtree("/" + settings.COMPOSE_DIR + project_name)
except Exception as exc:
raise HTTPException(exc.status_code, exc.strerror)
return get_compose_projects()
def generate_support_bundle(project_name):
files = find_yml_files(settings.COMPOSE_DIR + project_name)
if project_name in files:
dclient = docker.from_env()
stream = io.BytesIO()
with zipfile.ZipFile(stream, "w") as zf, open(files[project_name], "r") as fp:
compose = yaml.load(fp, Loader=yaml.SafeLoader)
# print(compose)
# print(compose.get("services"))
for _service in compose.get("services"):
print()
if len(compose.get("services").keys()) < 2:
try:
if compose.get("services")[_service].get("container_name"):
service = dclient.containers.get(
compose.get("services")[_service].get("container_name")
)
else:
service = dclient.containers.get(_service)
except docker.errors.NotFound as exc:
raise HTTPException(
exc.status_code,
detail="container " + _service + " not found",
)
else:
try:
if compose.get("services")[_service].get("container_name"):
service = dclient.containers.get(
compose.get("services")[_service].get("container_name")
)
else:
service = dclient.containers.get(
project_name.lower() + "_" + _service + "_1"
)
except docker.errors.NotFound as exc:
raise HTTPException(
exc.status_code,
detail="container " + _service + " not found",
)
service_log = service.logs()
zf.writestr(f"{_service}.log", service_log)
fp.seek(0)
# It is possible that ".write(...)" has better memory management here.
zf.writestr("docker-compose.yml", fp.read())
stream.seek(0)
return StreamingResponse(
stream,
media_type="application/x-zip-compressed",
headers={
"Content-Disposition": f"attachment;filename={project_name}_bundle.zip"
},
)
else:
raise HTTPException(404, f"Project {project_name} not found.")
|
grr/client/grr_response_client/client_actions/network_test.py | khanhgithead/grr | 4,238 | 12742719 | #!/usr/bin/env python
"""Tests the Netstat client action."""
from absl import app
from grr_response_client.client_actions import network
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
class NetstatActionTest(client_test_lib.EmptyActionTest):
"""Tests the Netstat client action."""
def testListNetworkConnections(self):
result = self.RunAction(
network.ListNetworkConnections,
arg=rdf_client_action.ListNetworkConnectionsArgs())
for r in result:
self.assertTrue(r.process_name)
self.assertTrue(r.local_address)
def testListNetworkConnectionsFilter(self):
result = self.RunAction(
network.ListNetworkConnections,
arg=rdf_client_action.ListNetworkConnectionsArgs(listening_only=True))
for r in result:
self.assertTrue(r.process_name)
self.assertTrue(r.local_address)
self.assertEqual(r.state, "LISTEN")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
gammapy/data/tests/test_observers.py | Rishank2610/gammapy | 155 | 12742733 | <reponame>Rishank2610/gammapy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.coordinates import Angle
from gammapy.data import observatory_locations
def test_observatory_locations():
location = observatory_locations["hess"]
assert_allclose(location.lon.deg, Angle("16d30m00.8s").deg)
assert_allclose(location.lat.deg, Angle("-23d16m18.4s").deg)
assert_allclose(location.height.value, 1835)
assert str(location.height.unit) == "m"
|
autoregressive_diffusion/experiments/language/architectures/arm.py | xxdreck/google-research | 23,901 | 12742735 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains training and sampling functions an autoregressive model."""
import functools
from typing import Any, Callable
from absl import logging
from flax import linen as nn
from flax import struct
from flax.training import common_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from autoregressive_diffusion.model import distributions
from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils
from autoregressive_diffusion.utils import util_fns
def cross_entropy(logits, targets):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
onehot_targets = common_utils.onehot(targets, vocab_size)
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
d = np.prod(targets.shape[1:])
loss = util_fns.sum_except_batch(loss) / d / np.log(2)
return loss
def compute_accuracy(logits, targets):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.shape[:-1] != targets.shape[:-1]:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
logits = logits[:, :, None, :] # Insert empty channel axis.
d = float(np.prod(logits.shape[1:-1]))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) / d
acc = util_fns.sum_except_batch(acc)
return acc
class ARM(struct.PyTreeNode):
"""Static model object that wraps important model functions."""
config: ml_collections.config_dict.config_dict.ConfigDict
apply_fn: Callable[Ellipsis, Any]
logprob_fn: Callable[Ellipsis, Any]
sample_fn: Callable[Ellipsis, Any]
neural_net: Any
num_steps: int
policy_support: bool = False
num_stages: int = 1
absorbing_state: int = 0
random_order: bool = False
def log_px(self, rng, params, x, train, context=None):
batch_size = x.shape[0]
if self.random_order:
logging.info('Log-likelihood for a random-order ARM XLNet style.')
rng, rng_perm = jax.random.split(rng)
permutations = ardm_utils.get_batch_permutations(rng_perm, batch_size,
self.num_steps)
else:
logging.info('Log-likelihood for a standard ARM.')
permutations = None
net_out = self.apply_fn(
{'params': params}, x, t=None, mask=None, train=train, context=context,
permutations=permutations,
rngs={'dropout': rng} if train else None)
d = float(np.prod(net_out.shape[1:-1]))
log_px_elementwise = util_fns.sum_except_batch(self.logprob_fn(x, net_out))
log_px = log_px_elementwise / d / np.log(2)
neg_acc = -compute_accuracy(logits=net_out, targets=x)
t_batch_dummy = jnp.zeros((batch_size,), dtype=jnp.int32)
loss_components_dummy = jnp.zeros((batch_size,))
return log_px, loss_components_dummy, neg_acc, t_batch_dummy
def elbo(self, rng, params, x, train, context=None):
return self.log_px(rng, params, x, train, context)
def sample(self, rng, params, batch_size, context=None):
chain_sharded = self.p_sample(rng, params, batch_size, context)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, 0),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample(self, rng, params, batch_size, context):
"""Samples from the model, calls sample_step for every timestep."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
logging.info('Sampling from model, hope you are patient...')
if self.random_order:
rng, rng_perm = jax.random.split(rng)
orders = ardm_utils.get_batch_permutations(rng_perm,
per_device_batch_size,
self.num_steps)
else:
orders = jnp.arange(0, self.num_steps)[None, :]
orders = jnp.repeat(orders, repeats=per_device_batch_size, axis=0)
chain = []
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
chain.append(x)
def next_sample_step(x, t):
x = self.sample_step(
jax.random.fold_in(rng, t), x,
t, orders, params, context)
return x, x
ts = jnp.arange(self.num_steps)
_, chain = jax.lax.scan(next_sample_step, init=x, xs=ts)
return chain
def get_naive_policy(self, budget=250):
assert budget <= self.num_steps
# We use budget+1 because a linspace contains the last step.
naive_policy = ardm_utils.integer_linspace(0, self.num_steps, budget+1)
# Last index does not need to be in policy.
naive_policy = naive_policy[:-1]
return naive_policy
def sample_with_naive_policy(self,
rng,
params,
batch_size,
budget=250):
logging.info('Sampling with naive policy.')
naive_policy = self.get_naive_policy(budget)
return self.sample_with_policy(rng, params, batch_size, naive_policy)
def sample_with_policy(self, rng, params, batch_size, policy):
"""Wrapper for p_sample_with_policy that takes care of unsharding."""
logging.info('Sampling from model (quickly)...')
chain_sharded = self.p_sample_with_policy(rng, params, batch_size, policy)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, None),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample_with_policy(self, rng, params, batch_size, policy):
"""Samples from the model, calls sample_step for every policy step."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
rng, rng_perm = jax.random.split(rng)
sigmas = ardm_utils.get_batch_permutations(rng_perm, per_device_batch_size,
self.num_steps)
policy_extended = jnp.concatenate(
[policy, jnp.array([self.num_steps], dtype=jnp.int32)], axis=0)
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
def next_sample_step(x, idx):
left_t = policy_extended[idx]
right_t = policy_extended[idx + 1]
x = self.sample_step_with_policy(
jax.random.fold_in(rng, idx), x, left_t, right_t, sigmas, params)
return x, x
x, chain = jax.lax.scan(next_sample_step, x, jnp.arange(len(policy)))
return chain
def sample_step_with_policy(self, rng, x, left_t, right_t, sigmas, params):
"""Sampling code for a single step starting at left_t until right_t."""
batch_size = x.shape[0]
left_t = jnp.full(batch_size, fill_value=left_t)
right_t = jnp.full(batch_size, fill_value=right_t)
prev_selection, current_selection = ardm_utils.get_selections_for_sigma_and_range(
sigmas, left_t, right_t, self.config.data_shape)
params_px = self.apply_fn(
{'params': params},
x, left_t, prev_selection, train=False)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def sample_step(self, rng, x, t, sigmas, params, context):
"""Sampling code for a single step t."""
batch_size = x.shape[0]
t_batch = jnp.full(batch_size, fill_value=t)
prev_selection, current_selection = ardm_utils.get_selection_for_sigma_and_t(
sigmas, t_batch, self.config.data_shape)
if self.random_order:
permutations = sigmas
else:
permutations = None
params_px = self.apply_fn(
{'params': params},
x, t_batch, prev_selection, train=False, context=context,
permutations=permutations)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def init_architecture(self, init_rng, tmp_x, tmp_t, context=None):
tmp_mask = None
if context is None:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask, train=False)
else:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask,
train=False, context=context)
@classmethod
def create(cls, config, get_architecture, random_order):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
required_num_outputs = config.num_classes
num_steps = int(np.prod(config.data_shape))
# We set num_steps=0 since this disables time conditioning, which is not
# necessary for ARMs.
neural_net = get_architecture(
config.num_classes, required_num_outputs, num_steps=0, is_causal=True)
out_dist = distributions.SoftmaxCategorical(config.data_shape[-1],
config.num_classes)
return cls(
config,
apply_fn=neural_net.apply,
logprob_fn=out_dist.log_prob,
sample_fn=out_dist.sample,
neural_net=neural_net,
num_steps=num_steps,
random_order=random_order
)
|
tests/test_notify.py | noi4eg/maigret | 1,156 | 12742761 | <reponame>noi4eg/maigret
from maigret.errors import CheckError
from maigret.notify import QueryNotifyPrint
from maigret.result import QueryStatus, QueryResult
def test_notify_illegal():
n = QueryNotifyPrint(color=False)
assert (
n.update(
QueryResult(
username="test",
status=QueryStatus.ILLEGAL,
site_name="TEST_SITE",
site_url_user="http://example.com/test",
)
)
== "[-] TEST_SITE: Illegal Username Format For This Site!"
)
def test_notify_claimed():
n = QueryNotifyPrint(color=False)
assert (
n.update(
QueryResult(
username="test",
status=QueryStatus.CLAIMED,
site_name="TEST_SITE",
site_url_user="http://example.com/test",
)
)
== "[+] TEST_SITE: http://example.com/test"
)
def test_notify_available():
n = QueryNotifyPrint(color=False)
assert (
n.update(
QueryResult(
username="test",
status=QueryStatus.AVAILABLE,
site_name="TEST_SITE",
site_url_user="http://example.com/test",
)
)
== "[-] TEST_SITE: Not found!"
)
def test_notify_unknown():
n = QueryNotifyPrint(color=False)
result = QueryResult(
username="test",
status=QueryStatus.UNKNOWN,
site_name="TEST_SITE",
site_url_user="http://example.com/test",
)
result.error = CheckError('Type', 'Reason')
assert n.update(result) == "[?] TEST_SITE: Type error: Reason"
|
mmtbx/command_line/fake_f_obs.py | dperl-sol/cctbx_project | 155 | 12742780 | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.fake_f_obs
from cctbx import adptbx
from cctbx.array_family import flex
import random, math, sys, os
import iotbx.pdb
import mmtbx.utils
from libtbx import easy_run
import mmtbx.dynamics.cartesian_dynamics as cartesian_dynamics
from mmtbx import monomer_library
import mmtbx.monomer_library.pdb_interpretation
import mmtbx.monomer_library.server
from mmtbx.tls import ladp
from mmtbx.utils import run_reduce_with_timeout
import mmtbx.tls.tools
import mmtbx.f_model
import iotbx.phil
import mmtbx.masks
from libtbx.utils import Sorry
from six.moves import range
import mmtbx.model
if(1):
random.seed(0)
flex.set_random_seed(0)
master_params_str="""\
f_obs {
high_resolution = 2.0
.type = float
low_resolution = 15.0
.type = float
scattering_table = wk1995 it1992 *n_gaussian neutron
f_calc {
atomic_model {
ensemble_size = 20
.type = int
add_hydrogens = False
.type = bool
tls {
max_tl = 2
.type = float
min_tl = 0
.type = float
}
apply_cartesian_dynamics = True
.type = bool
regularize_geometry {
rmsd_bonds_target = 0.025
.type = float
rmsd_angles_target = 2.5
.type = float
}
ladp_angle = 3.0
.type = float
switch_rotamers = True
.type = bool
shake_sites_rmsd = 0.01
.type = float
rigid_body_shift {
rotation_angle = 1.0
.type = float
translation_length = 0.1
.type = float
}
stop_cartesian_dynamics_at_diff = 0.5
.type = float
use_ramachandran_plot_restraints = True
.type = bool
output_file_name = fake_model.pdb
.type = str
}
accuracy {
include scope mmtbx.f_model.sf_and_grads_accuracy_master_params
}
}
f_bulk {
k_sol = 0.35
.type = float
b_sol = 50.0
.type = float
mask {
include scope mmtbx.masks.mask_master_params
}
}
overall_scale = 1.0
overall_anisotropic_scale_matrix_b_cart {
max = 10
.type = float
min = 0
.type = float
}
experimental_noise {
add_random_error_to_amplitudes_percent = 5
.type = float
}
output_file_name = fake_f_obs.mtz
.type = str
}
"""
class show(object):
def __init__(self,
xrs,
xrs_start,
grm,
prefix=""):
esg = grm.energies_sites(
sites_cart = xrs.sites_cart(), compute_gradients = False).geometry
self.bond_rmsd = esg.bond_deviations()[2]
self.angle_rmsd = esg.angle_deviations()[2]
self.error = flex.mean(xrs.distances(other = xrs_start))
print(" %s err=%8.3f rmsd: bonds=%6.3f angles=%6.3f"%(prefix, self.error,
self.bond_rmsd, self.angle_rmsd))
def switch_rotamers(xray_structure, pdb_hierarchy):
x = xray_structure.deep_copy_scatterers()
p = pdb_hierarchy.deep_copy()
p.atoms().reset_i_seq()
p = mmtbx.utils.switch_rotamers(
pdb_hierarchy = p,
mode = "min_distant")
x.set_sites_cart(sites_cart = p.atoms().extract_xyz())
return x, p
def set_ladp(xray_structure, pdb_hierarchy, angle):
axes_and_atoms_i_seqs = ladp.get_axes_and_atoms_i_seqs(
pdb_hierarchy = pdb_hierarchy,
mon_lib_srv = monomer_library.server.server())
xray_structure = xray_structure.set_b_iso(value=random.randrange(5,10))
xray_structure.convert_to_isotropic()
xray_structure = ladp.set_ladp(
xray_structure = xray_structure,
axes_and_atoms_i_seqs = axes_and_atoms_i_seqs,
value = angle,
enable_recursion = True,
depth = 0)
return xray_structure
def random_aniso_adp(space_group, unit_cell, u_scale=2, u_min=0):
return adptbx.u_star_as_u_cart(unit_cell, space_group.average_u_star(
u_star = adptbx.u_cart_as_u_star(unit_cell, adptbx.random_u_cart(
u_scale=u_scale, u_min=u_min))))
def apply_tls(xray_structure, params):
uc = xray_structure.unit_cell()
sg = xray_structure.space_group()
selections_1d = flex.bool(xray_structure.scatterers().size(),True)
selections = [selections_1d.iselection()]
T=random_aniso_adp(space_group=sg, unit_cell=uc, u_scale=params.max_tl,
u_min=params.min_tl)
L=random_aniso_adp(space_group=sg, unit_cell=uc, u_scale=params.max_tl,
u_min=params.min_tl)
print(" T: %s"%",".join([("%7.3f"%i).strip() for i in T]))
print(" L: %s"%",".join([("%7.3f"%i).strip() for i in L]))
tlsos = mmtbx.tls.tools.generate_tlsos(
selections = selections,
xray_structure = xray_structure,
T=[T],
L=[L],
S=[[0,0,0,0,0,0,0,0,0]])
u_cart_from_tls = mmtbx.tls.tools.u_cart_from_tls(
sites_cart = xray_structure.sites_cart(),
selections = selections,
tlsos = tlsos)
xray_structure.convert_to_anisotropic()
u_cart = xray_structure.scatterers().extract_u_cart(uc)
utot = u_cart_from_tls+u_cart
xray_structure.set_u_cart(u_cart=utot, selection = selections_1d.iselection())
xray_structure.tidy_us()
return xray_structure
def apply_rigid_body_shift(xray_structure, params):
import scitbx.matrix
mt = flex#.mersenne_twister(seed=0)
rot_axis = scitbx.matrix.col(mt.random_double_point_on_sphere())
rot_matrix = scitbx.math.r3_rotation_axis_and_angle_as_matrix(
axis=rot_axis, angle=params.rotation_angle, deg=True)
run_away_counter = 0
while True:
transl = mt.random_double_point_on_sphere()
transl_no_cont_sh = scitbx.matrix.col(xray_structure.crystal_symmetry()
.subtract_continuous_allowed_origin_shifts(translation_cart=transl))
l = abs(transl_no_cont_sh)
if(l > 0.1):
break
run_away_counter += 1
assert run_away_counter < 100
transl = transl_no_cont_sh * (params.translation_length/l)
sites_cart = xray_structure.sites_cart()
cm = xray_structure.center_of_mass()
ns = rot_matrix * (sites_cart-cm) + transl + cm
xray_structure.set_sites_cart(sites_cart =
rot_matrix * (sites_cart-cm) + transl + cm)
return xray_structure
def simulate_f_obs(root, crystal_symmetry, params):
f_calc_data = None
f_masks_data = []
for i_m, m in enumerate(root.models()):
raw_records = flex.std_string()
raw_records.append(
iotbx.pdb.format_cryst1_record(crystal_symmetry = crystal_symmetry))
for atom in m.atoms():
ra = atom.format_atom_record()
ru = atom.format_anisou_record()
raw_records.append(ra[:])
raw_records.append(ru[:])
xrs = iotbx.pdb.input(lines = raw_records,
source_info=None).xray_structure_simple()
if(i_m==0):
dummy = abs(xrs.structure_factors(
d_min=params.f_obs.high_resolution).f_calc())
dummy = dummy.resolution_filter(d_max = params.f_obs.low_resolution)
fmodel = mmtbx.f_model.manager(
f_obs = dummy,
xray_structure = xrs,
mask_params = params.f_obs.f_bulk.mask,
sf_and_grads_accuracy_params = params.f_obs.f_calc.accuracy)
fcd = fmodel.f_calc().data()
fms = fmodel.f_masks()
if(i_m==0):
f_calc_data = fcd
f_masks_data = []
for f in fms:
f_masks_data.append(f.data())
else:
f_calc_data += fcd
fmsks = fms
assert len(f_masks_data) == len(fmsks)
for ifmd in range(len(f_masks_data)):
f_masks_data[ifmd] += fmsks[ifmd].data()
fcalc_average = fmodel.f_obs().array(data = f_calc_data)
f_masks_data_average = []
for f in f_masks_data:
f_masks_data_average.append(fmodel.f_obs().array(data = f/len(root.models())))
b_cart = None
if([params.f_obs.overall_anisotropic_scale_matrix_b_cart.max,
params.f_obs.overall_anisotropic_scale_matrix_b_cart.min].count(None)==0):
b_cart = random_aniso_adp(
space_group=crystal_symmetry.space_group(),
unit_cell=crystal_symmetry.unit_cell(),
u_scale=params.f_obs.overall_anisotropic_scale_matrix_b_cart.max,
u_min=params.f_obs.overall_anisotropic_scale_matrix_b_cart.min)
print("\noverall_anisotropic_scale_matrix_b_cart: %s"%",".join(
[("%7.3f"%i).strip() for i in b_cart]))
fmodel = mmtbx.f_model.manager(
f_obs = dummy,
f_calc = fcalc_average,
f_mask = f_masks_data_average,
k_sol = params.f_obs.f_bulk.k_sol,
b_sol = params.f_obs.f_bulk.b_sol,
b_cart = b_cart)
#
f_obs = abs(fmodel.f_model())
f_obs.set_observation_type_xray_amplitude()
mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F(ake)obs")
r_free_flags = f_obs.generate_r_free_flags()
mtz_dataset.add_miller_array(
miller_array=r_free_flags, column_root_label="R-free-flags")
mtz_object = mtz_dataset.mtz_object()
mtz_object.write(file_name=params.f_obs.output_file_name)
def regularize_geometry(xray_structure, restraints_manager, params):
from mmtbx.refinement import geometry_minimization as gm
import scitbx.lbfgs
sites_cart = xray_structure.sites_cart()
minimized = gm.lbfgs(
sites_cart = sites_cart,
correct_special_position_tolerance = 1.0,
geometry_restraints_manager = restraints_manager.geometry,
geometry_restraints_flags = gm.geometry_restraints.flags.flags(default=True),
rmsd_bonds_termination_cutoff=params.rmsd_bonds_target,
rmsd_angles_termination_cutoff=params.rmsd_angles_target,
lbfgs_termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=500))
xray_structure = xray_structure.replace_sites_cart(new_sites = sites_cart)
return xray_structure
def cd(xray_structure, restraints_manager, params):
gradients_calculator=cartesian_dynamics.gradients_calculator_reciprocal_space(
restraints_manager = restraints_manager,
sites_cart = xray_structure.sites_cart(),
wc = 1)
cartesian_dynamics.run(
gradients_calculator = gradients_calculator,
xray_structure = xray_structure,
temperature = 3000,
n_steps = 500000,
time_step = 0.0005,
initial_velocities_zero_fraction = 0,
n_print = 100,
stop_cm_motion = True,
log = None,
stop_at_diff = params.stop_cartesian_dynamics_at_diff,
verbose = -1)
def loop_2(params, xray_structure, pdb_hierarchy, restraints_manager, root):
print("model:")
amp = params.f_obs.f_calc.atomic_model
grm = restraints_manager
xrs = xray_structure.deep_copy_scatterers()
show(xrs = xrs, xrs_start = xrs, grm = grm, prefix = "start:")
xrs_sh = xrs.deep_copy_scatterers()
if(amp.shake_sites_rmsd is not None):
xrs_sh.shake_sites_in_place(rms_difference = amp.shake_sites_rmsd)
if(amp.apply_cartesian_dynamics):
cd(xray_structure = xrs_sh, restraints_manager = grm, params = amp)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "cd: ")
if([amp.regularize_geometry.rmsd_bonds_target,
amp.regularize_geometry.rmsd_angles_target].count(None)==0):
xrs_sh = regularize_geometry(xray_structure = xrs_sh,
restraints_manager = grm, params = amp.regularize_geometry)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "min: ")
if(amp.ladp_angle is not None):
xrs_sh = set_ladp(xray_structure = xrs_sh, pdb_hierarchy = pdb_hierarchy,
angle = amp.ladp_angle)
if([amp.tls.max_tl, amp.tls.min_tl].count(None)==0):
xrs_sh = apply_tls(xray_structure = xrs_sh, params = amp.tls)
if([amp.rigid_body_shift.rotation_angle,
amp.rigid_body_shift.translation_length].count(None)==0):
xrs_sh = apply_rigid_body_shift(xray_structure = xrs_sh,
params = amp.rigid_body_shift)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "rb: ")
#
h = pdb_hierarchy.deep_copy()
h.atoms().reset_i_seq() # XXX
h.atoms().set_xyz(xrs_sh.sites_cart().deep_copy())
h.atoms().set_uij(xrs_sh.scatterers().extract_u_cart(xrs_sh.unit_cell()))
h.atoms().set_b(xrs_sh.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.))
m = h.models()[0].detached_copy()
m.id = str(None)
root.append_model(m)
def loop_1(params, root, xray_structure, pdb_hierarchy, restraints_manager):
xh = [(xray_structure,pdb_hierarchy)]
if(params.f_obs.f_calc.atomic_model.switch_rotamers):
xh.append(switch_rotamers(
xray_structure = xray_structure.deep_copy_scatterers(),
pdb_hierarchy = pdb_hierarchy.deep_copy()))
counter = 0
size = int(math.ceil(params.f_obs.f_calc.atomic_model.ensemble_size/len(xh)))
for xh_ in xh:
x_, h_ = xh_
for mc in range(size):
loop_2(
params = params,
xray_structure = x_,
pdb_hierarchy = h_,
restraints_manager = restraints_manager,
root = root)
for i_model, model in enumerate(root.models()):
model.id = str(i_model)
root.atoms().set_occ(root.atoms().extract_occ()/len(root.models()))
def defaults(log):
print("Default params::\n", file=log)
parsed = iotbx.phil.parse(master_params_str, process_includes=True)
print(file=log)
return parsed
def run(args, log = sys.stdout):
if(len(args)==0):
parsed = defaults(log=log)
parsed.show(prefix=" ", out=log)
return
parsed = defaults(log=log)
processed_args = mmtbx.utils.process_command_line_args(args = args,
log = sys.stdout, master_params = parsed)
processed_args.params.show()
params = processed_args.params.extract()
if(len(processed_args.pdb_file_names)==0):
raise Sorry("No PDB file found.")
if(len(processed_args.pdb_file_names)>1):
raise Sorry("More than one PDB file found.")
pdb_file_name = processed_args.pdb_file_names[0]
if(params.f_obs.f_calc.atomic_model.add_hydrogens):
pdb_file_name_r = os.path.basename(pdb_file_name)+"_reduce"
# easy_run.go("phenix.reduce %s > %s"% (pdb_file_name, pdb_file_name_r))
run_reduce_with_timeout(file_name=pdb_file_name, parameters=" > %s" % pdb_file_name_r)
pdb_file_name = pdb_file_name_r
pdbi_params = mmtbx.model.manager.get_default_pdb_interpretation_params()
if(params.f_obs.f_calc.atomic_model.use_ramachandran_plot_restraints):
pdbi_params.pdb_interpretation.ramachandran_plot_restraints.enabled=True
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(file_name = pdb_file_name))
model.process(make_restraints=True,
pdb_interpretation_params = pdbi_params)
root = iotbx.pdb.hierarchy.root()
loop_1(
params = params,
root = root,
xray_structure = model.get_xray_structure(),
pdb_hierarchy = model.get_hierarchy(),
restraints_manager = model.get_restraints_manager())
root.write_pdb_file(
file_name = params.f_obs.f_calc.atomic_model.output_file_name,
crystal_symmetry = model.crystal_symmetry())
simulate_f_obs(root=root, crystal_symmetry=model.crystal_symmetry(),
params = params)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
tests/test_alpaca_crypto_data_loader.py | ksilo/LiuAlgoTrader | 369 | 12742811 | from datetime import date, datetime
import pandas as pd
import pytest
from alpaca_trade_api.rest import TimeFrame
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.data_loader import DataLoader # type: ignore
from liualgotrader.common.types import DataConnectorType, TimeScale
from liualgotrader.data.alpaca import AlpacaData, AlpacaStream
nyc = timezone("America/New_York")
@pytest.mark.devtest
def test_crypto_get_symbol() -> bool:
alpaca_data = AlpacaData()
start = date(2021, 5, 1)
end = date(2021, 10, 1)
_start, _end = alpaca_data._localize_start_end(start, end)
df = alpaca_data.crypto_get_symbol_data(
symbol="BTCUSD", start=_start, end=_end, timeframe=TimeFrame.Day
)
print(df)
return True
@pytest.mark.devtest
def test_btc_data_loader_day() -> bool:
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
data = dl["BTCUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
@pytest.mark.devtest
def test_btc_data_loader_min() -> bool:
dl = DataLoader(connector=DataConnectorType.alpaca)
data = dl["BTCUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
@pytest.mark.devtest
def test_eth_data_loader_day() -> bool:
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
data = dl["ETHUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
|
core/polyaxon/polyflow/events/__init__.py | admariner/polyaxon | 3,200 | 12742815 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.contexts import refs as contexts_refs
from polyaxon.lifecycle import V1Statuses
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class V1EventKind(polyaxon_sdk.V1EventKind):
events_statuses_mapping = {
polyaxon_sdk.V1EventKind.RUN_STATUS_CREATED: V1Statuses.CREATED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RESUMING: V1Statuses.RESUMING,
polyaxon_sdk.V1EventKind.RUN_STATUS_ON_SCHEDULE: V1Statuses.ON_SCHEDULE,
polyaxon_sdk.V1EventKind.RUN_STATUS_COMPILED: V1Statuses.COMPILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_QUEUED: V1Statuses.QUEUED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SCHEDULED: V1Statuses.SCHEDULED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STARTING: V1Statuses.STARTING,
polyaxon_sdk.V1EventKind.RUN_STATUS_RUNNING: V1Statuses.RUNNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_PROCESSING: V1Statuses.PROCESSING,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPING: V1Statuses.STOPPING,
polyaxon_sdk.V1EventKind.RUN_STATUS_FAILED: V1Statuses.FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPED: V1Statuses.STOPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SUCCEEDED: V1Statuses.SUCCEEDED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SKIPPED: V1Statuses.SKIPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_WARNING: V1Statuses.WARNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNSCHEDULABLE: V1Statuses.UNSCHEDULABLE,
polyaxon_sdk.V1EventKind.RUN_STATUS_UPSTREAM_FAILED: V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RETRYING: V1Statuses.RETRYING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNKNOWN: V1Statuses.UNKNOWN,
polyaxon_sdk.V1EventKind.RUN_STATUS_DONE: V1Statuses.DONE,
}
class EventTriggerSchema(BaseCamelSchema):
kinds = fields.List(
fields.Str(validate=validate.OneOf(V1EventKind.allowable_values)),
required=True,
)
ref = fields.Str(required=True)
@staticmethod
def schema_config():
return V1EventTrigger
class V1EventTrigger(BaseConfig, contexts_refs.RefMixin, polyaxon_sdk.V1EventTrigger):
"""Events are an advanced triggering logic that users can take advantage of in addition to:
* Manual triggers via API/CLI/UI.
* Time-based triggers with schedules and crons.
* Upstream triggers with upstream runs or upstream ops in DAGs.
Events can be attached to an operation in the context of a DAG
to extend the simple trigger process,
this is generally important when the user defines a dependency between two operations
and needs a run to start as soon as
the upstream run generates an event instead of waiting until it reaches a final state.
For instance, a usual use-case is to start a tensorboard as soon as training starts.
In that case the downstream operation will watch for the `running` status.
Events can be attached as well to a single operation
to wait for an internal alert or external events,
for instance if a user integrates Polyaxon with Github,
they can trigger training as soon as Polyaxon is notified that a new git commit was created.
Polyaxon provides several internal and external events that users
can leverage to fully automate their usage of the platform:
* "run_status_created"
* "run_status_resuming"
* "run_status_compiled"
* "run_status_queued"
* "run_status_scheduled"
* "run_status_starting"
* "run_status_initializing"
* "run_status_running"
* "run_status_processing"
* "run_status_stopping"
* "run_status_failed"
* "run_status_stopped"
* "run_status_succeeded"
* "run_status_skipped"
* "run_status_warning"
* "run_status_unschedulable"
* "run_status_upstream_failed"
* "run_status_retrying"
* "run_status_unknown"
* "run_status_done"
* "run_approved_actor"
* "run_invalidated_actor"
* "run_new_artifacts"
* "connection_git_commit"
* "connection_dataset_version"
* "connection_registry_image"
* "alert_info"
* "alert_warning"
* "alert_critical"
* "model_version_new_metric"
* "project_custom_event"
* "org_custom_event"
Args:
kinds: List[str]
ref: str
> **Important**: Currently only events with prefix `run_status_*` are supported.
## YAML usage
```yaml
>>> events:
>>> ref: {{ ops.upstream-operation }}
>>> kinds: [run_status_running]
```
```yaml
>>> event:
>>> ref: {{ connections.git-repo-connection-name }}
>>> kinds: [connection_git_commit]
```
## Python usage
```python
>>> from polyaxon.polyflow import V1EventKind, V1EventTrigger
>>> event1 = V1EventTrigger(
>>> ref="{{ ops.upstream-operation }}",
>>> kinds=[V1EventTrigger.RUN_STATUS_RUNNING],
>>> )
>>> event2 = V1EventTrigger(
>>> ref="{{ connections.git-repo-connection-name }}",
>>> kinds=[V1EventTrigger.CONNECTION_GIT_COMMIT],
>>> )
```
## Fields
### kinds
The trigger event kinds to watch, if any event is detected the operation defining the `events`
section will be initiated.
```yaml
>>> event:
>>> kinds: [run_status_running, run_status_done]
```
> **Note**: Similar to trigger in DAGs, after an operation is initiated,
> it will still have to validate the rest of the Polyaxonfile,
> i.e. conditions, contexts, connections, ...
### ref
A valid reference that Polyaxon can resolve the objects that will send the events to watch for.
All supported events are prefixed with the object reference that can send such events.
The `run_*` events can be referenced both by `runs.UUID` or
`ops.OPERATION_NAME` if defined in the context of a DAG.
```yaml
>>> event:
>>> ref: ops.upstream_operation_name
```
"""
IDENTIFIER = "event_trigger"
SCHEMA = EventTriggerSchema
REDUCED_ATTRIBUTES = [
"ref",
]
|
uliweb/i18n/i18ntool.py | timgates42/uliweb | 202 | 12742817 | <reponame>timgates42/uliweb
import os
from optparse import make_option
from uliweb.core import SimpleFrame
from uliweb.utils.common import pkg
from uliweb.core.commands import Command
#def getfiles(path):
# files_list = []
# if os.path.exists(os.path.abspath(os.path.normpath(path))):
# if os.path.isfile(path):
# files_list.append(path)
# else:
# for root, dirs, files in os.walk(path):
# for f in files:
# filename = os.path.join(root, f)
# if '.svn' in filename or (not filename.endswith('.py') and not filename.endswith('.html') and not filename.endswith('.ini')):
# continue
# files_list.append(filename)
# return files_list
def _get_outputfile(path, locale='en'):
output = os.path.normpath(os.path.join(path, 'locale', locale, 'LC_MESSAGES', 'uliweb.pot'))
return output
def _process(path, locale, options, output_dir=None):
from pygettext import extrace_files
from po_merge import merge
from uliweb.utils import pyini
output_dir = output_dir or path
output = _get_outputfile(output_dir, locale=locale)
try:
if options['template']:
x = pyini.Ini(options['template'])
else:
x = pyini.Ini()
vars = {}
vars['First_Author'] = x.get_var('I18N/First_Author', 'FIRST AUTHOR <EMAIL@ADDRESS>')
vars['Project_Id_Version'] = x.get_var('I18N/Project_Id_Version', 'PACKAGE VERSION')
vars['Last_Translator'] = x.get_var('I18N/Last_Translator', 'FULL NAME <EMAIL@ADDRESS>')
vars['Language_Team'] = x.get_var('I18N/Language_Team', 'LANGUAGE <<EMAIL>>')
vars['Content_Type_Charset'] = x.get_var('I18N/Content_Type_Charset', 'utf-8')
vars['Content_Transfer_Encoding'] = x.get_var('I18N/Content_Transfer_Encoding', '8bit')
vars['Plural_Forms'] = x.get_var('I18N/Plural_Forms', 'nplurals=1; plural=0;')
extrace_files(path, output, {'verbose':options['verbose']}, vars=vars)
print 'Success! output file is %s' % output
merge(output[:-4]+'.po', output, options['exact'])
except:
raise
class I18nCommand(Command):
name = 'i18n'
check_apps_dirs = False
args = '<appname, appname, ...>'
help = 'Extract i18n message catalog form app or all apps. Please notice that you can not set -p, -d, --uliweb, --apps and <appname, ...> at the same time.'
has_options = True
option_list = (
make_option('--apps', dest='apps', action='store_true', default=False,
help='If set, then extract translation messages from all apps located in project direcotry, and save .po file in each app direcotry.'),
make_option('-p', dest='project', action='store_true', default=False,
help='If set, then extract translation messages from project directory.'),
make_option('-d', dest='directory',
help='If set, then extract translation messages from directory.'),
make_option('--uliweb', dest='uliweb', action='store_true', default=False,
help='If set, then extract translation messages from uliweb.'),
make_option('-l', dest='locale', default='en',
help='Target locale. Default is "en".'),
make_option('--exact', dest='exact', action='store_true', default=False,
help='If set, then all entries existed in old .po file but not existed in new .pot will be removed.'),
make_option('-t', '--template', dest='template',
help='PO variables definition, such as: charset, translater, etc.'),
)
def handle(self, options, global_options, *args):
from uliweb.utils.common import check_apps_dir
opts = {'verbose':global_options.verbose, 'template':options.template,
'exact':options.exact}
if options.project:
check_apps_dir(global_options.apps_dir)
app = self.get_application(global_options)
_process(global_options.apps_dir, options.locale, opts, output_dir=global_options.project)
elif options.apps or args:
check_apps_dir(global_options.apps_dir)
app = self.get_application(global_options)
if options.apps:
_apps = SimpleFrame.get_apps(global_options.apps_dir)
else:
_apps = args
apps_dir = os.path.normpath(os.path.abspath(global_options.apps_dir))
for appname in _apps:
path = SimpleFrame.get_app_dir(appname)
if global_options.verbose:
print 'Processing... app=>[%s] path=>[%s]' % (appname, path)
_process(path, options.locale, opts)
elif options.uliweb:
path = pkg.resource_filename('uliweb', '')
_process(path, options.locale, opts)
elif options.directory:
_process(options.directory, options.locale, opts)
|
convokit/text_processing/textToArcs.py | CornellNLP/Cornell-Conversational-Analysis-Toolkit | 371 | 12742825 | from .textProcessor import TextProcessor
def _use_text(tok, sent):
return tok['tok'].isalpha() or tok['tok'][1:].isalpha()
class TextToArcs(TextProcessor):
"""
Transformer that outputs a collection of arcs in the dependency parses of each sentence of an utterance. The returned collection is a list where each element corresponds to a sentence in the utterance. Each sentence is represented in terms of its arcs, in a space-separated string.
Each arc, in turn, can be read as follows:
* `x_y` means that `x` is the parent and `y` is the child token (e.g., `agree_does` = `agree --> does`)
* `x_*` means that `x` is a token with at least one descendant, which we do not resolve (this is analogous to bigrams backing off to unigrams)
* `x>y` means that `x` and `y` are the first two tokens in the sentence
* `x>*` means that `x` is the first token in the sentence.
:param output_field: name of attribute to write arcs to.
:param input_field: name of field to use as input. defaults to 'parsed', which stores dependency parses as returned by the TextParser transformer; otherwise expects similarly-formatted input.
:param use_start: whether to also return the first and first two tokens of the sentence. defaults to `True`.
:param root_only: whether to return only the arcs from the root of the dependency parse. defaults to `False`.
:param follow_deps: if root_only is set to `True`, will nonetheless examine subtrees coming out of a dependency listed in follow_deps; by default will follow 'conj' dependencies (hence examining the parts of a sentence following conjunctions like "and").
:param filter_fn: a boolean function determining which tokens to use. arcs will only be included if filter_fn returns True for all tokens in the arc. the function is of signature filter_fn(token, sent) where tokens and sents are formatted according to the output of TextParser. by default, will use tokens which only contain alphabet letters, or only contain letters after the first character (allowing for contractions like you 're): i.e.: `tok['tok'].isalpha() or tok['tok'][1:].isalpha()`.
:param input_filter: a boolean function of signature `input_filter(utterance, aux_input)`. parses will only be computed for utterances where `input_filter` returns `True`. By default, will always return `True`, meaning that arcs will be computed for all utterances.
:param verbosity: frequency of status messages.
"""
def __init__(self, output_field, input_field='parsed',
use_start=True, root_only=False, follow_deps=('conj',),
filter_fn=_use_text, input_filter=lambda utt, aux: True,
verbosity=0):
aux_input = {'root_only': root_only, 'use_start': use_start, 'follow_deps': follow_deps, 'filter_fn': filter_fn}
TextProcessor.__init__(self, proc_fn=self._get_arcs_per_message_wrapper, output_field=output_field, input_field=input_field, aux_input=aux_input, input_filter=input_filter, verbosity=verbosity)
def _get_arcs_per_message_wrapper(self, text_entry, aux_input={}):
return get_arcs_per_message(text_entry, aux_input['use_start'],
aux_input['root_only'], aux_input['follow_deps'],
aux_input['filter_fn'])
def _get_arcs_at_root(root, sent, use_start=True, root_only=False, follow_deps=('conj',), filter_fn=_use_text):
arcs = set()
if not filter_fn(root, sent): return arcs
arcs.add(root['tok'].lower() + '_*')
next_elems = []
for kid_idx in root['dn']:
kid = sent['toks'][kid_idx]
if kid['dep'] in ['cc']: continue
if filter_fn(kid, sent):
if (kid['dep'] not in follow_deps) and (root['tok'].lower() != kid['tok'].lower()):
arcs.add(root['tok'].lower() + '_' + kid['tok'].lower())
if (not root_only) or (kid['dep'] in follow_deps):
next_elems.append(kid)
if use_start:
first_elem = sent['toks'][0]
if filter_fn(first_elem, sent):
arcs.add(first_elem['tok'].lower() + '>*')
if (1 not in first_elem['dn']) and (len(sent['toks']) >= 2):
second_elem = sent['toks'][1]
if 0 not in second_elem['dn']:
if filter_fn(second_elem, sent) and (first_elem['tok'].lower() != second_elem['tok'].lower()): arcs.add(first_elem['tok'].lower() + '>' + second_elem['tok'].lower())
for next_elem in next_elems:
arcs.update(_get_arcs_at_root(next_elem, sent,
use_start=False, root_only=root_only, follow_deps=follow_deps, filter_fn=filter_fn))
return arcs
def get_arcs_per_message(message, use_start=True, root_only=False, follow_deps=('conj',), filter_fn=_use_text):
"""
Stand-alone function that returns the arcs of parsed text.
:param message: parse to extract arcs from
:param use_start: whether to also return the first and first two tokens of the sentence. defaults to `True`.
:param root_only: whether to return only the arcs from the root of the dependency parse. defaults to `False`.
:param follow_deps: if root_only is set to `True`, will nonetheless examine subtrees coming out of a dependency listed in follow_deps; by default will follow 'conj' dependencies (hence examining the parts of a sentence following conjunctions like "and").
:param filter_fn: a boolean function determining which tokens to use. arcs will only be included if filter_fn returns True for all tokens in the arc. the function is of signature filter_fn(token, sent) where tokens and sents are formatted according to the output of TextParser. by default, will use tokens which only contain alphabet letters, or only contain letters after the first character (allowing for contractions like you 're): i.e.: `tok['tok'].isalpha() or tok['tok'][1:].isalpha()`.
:return: a list where each element corresponds to a sentence in the input message. Each sentence is represented in terms of its arcs, in a space-separated string.
"""
return [' '.join(sorted(_get_arcs_at_root(sent['toks'][sent['rt']], sent, use_start=use_start, root_only=root_only,
follow_deps=follow_deps, filter_fn=filter_fn)))
for sent in message] |
tests/protocols/airplay/test_airplay_scan.py | Jacobs4/pyatv | 532 | 12742833 | """Scanning tests with fake mDNS responder.."""
from ipaddress import ip_address
import pytest
from pyatv.const import Protocol
from tests import fake_udns
from tests.conftest import Scanner
from tests.utils import assert_device
IP_1 = "10.0.0.1"
AIRPLAY_NAME = "AirPlay ATV"
AIRPLAY_ID = "AA:BB:CC:DD:EE:FF"
pytestmark = pytest.mark.asyncio
async def test_multicast_scan_airplay_device(udns_server, multicast_scan: Scanner):
udns_server.add_service(
fake_udns.airplay_service(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1])
)
atvs = await multicast_scan()
assert len(atvs) == 1
assert atvs[0].name == AIRPLAY_NAME
assert atvs[0].identifier == AIRPLAY_ID
assert atvs[0].address == ip_address(IP_1)
async def test_unicast_scan_airplay(udns_server, unicast_scan: Scanner):
udns_server.add_service(
fake_udns.airplay_service(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1], port=7000)
)
atvs = await unicast_scan()
assert len(atvs) == 1
assert_device(
atvs[0],
AIRPLAY_NAME,
ip_address(IP_1),
AIRPLAY_ID,
Protocol.AirPlay,
7000,
)
|
python/examples/tic-tac-toe/tictactoebot.py | Vindexus/boardgame.io | 7,080 | 12742836 | #!/usr/bin/python
#
# Copyright 2018 The boardgame.io Authors
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
#
# pylint: disable=invalid-name,multiple-imports,global-statement
# To play against this bot, start the tictactoe server from http://boardgame.io/#/multiplayer
# and start the bot with:
# $ python tictactoebot.py
# (will play player '1' by default)
"""
Boardgame.io python client example: starts a bot with player id '0'
that plays randomly against the other player '1'.
"""
import signal, random, logging
from boardgameio import Bot
class TicTacToeBot(Bot):
"""
Example of use of base class boardgameio.Bot:
- the bot connects to the multiplayer server at construction
- each time it is the bot's turn to play, method 'think' is called
- when game is over, method 'gameover' is called.
"""
log = logging.getLogger('tictactoebot')
def __init__(self):
Bot.__init__(self, server='localhost', port=8000,
options={'game_name': 'default',
'num_players': 2,
'player_id': '1'})
def think(self, G, _ctx):
""" Called when it is this bot's turn to play. """
cells = G['cells']
# choose a random empty cell
idx = -1
while True and None in cells:
idx = random.randint(0, len(cells)-1)
if not cells[idx]:
break
self.log.debug('cell chosen: %d', idx)
return self.make_move('clickCell', idx)
def gameover(self, _G, ctx):
""" Called when game is over. """
self.log.info('winner is %s', ctx['gameover'])
running = False
log = logging.getLogger('main')
logging.basicConfig(level=logging.INFO)
def main():
""" Start bot and listen continuously for events. """
log.info('starting bot... (Ctrl-C to stop)')
client = TicTacToeBot()
global running
running = True
while running:
client.listen()
log.info('stopped.')
def stop(_signum, _frame):
""" Stop program. """
log.info('stopping...')
global running
running = False
# start process
if __name__ == '__main__':
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
main()
|
python/cuxfilter/charts/core/core_chart.py | Anhmike/cuxfilter | 201 | 12742852 | <gh_stars>100-1000
import functools
import cudf
import dask_cudf
import logging
import panel as pn
from bokeh.models import ColumnDataSource
from panel.config import panel_extension
from panel.io import state
from panel.util import edit_readonly
from typing import Dict
from ...assets import datetime as dt
class BaseChart:
chart_type: str = None
x: str = None
y: str = None
aggregate_fn: str = "count"
color: str = None
_height: int = 0
_width: int = 0
add_interaction: bool = True
chart = None
source = None
source_backup = None
data_points: int = 0
filter_widget = None
_library_specific_params: Dict[str, str] = {}
stride = None
stride_type = int
min_value: float = 0.0
max_value: float = 0.0
x_label_map = {}
y_label_map = {}
_initialized = False
# widget=False can only be rendered the main layout
is_widget = False
title = ""
@property
def name(self):
chart_type = self.chart_type if self.chart_type else "chart"
return f"{self.x}_{chart_type}_{self.title}"
@property
def width(self):
return self._width
@width.setter
def width(self, value):
self._width = value
if self.chart is not None:
self.update_dimensions(width=value)
if self.filter_widget is not None:
self.filter_widget.width = value
@property
def height(self):
return self._height
@height.setter
def height(self, value):
self._height = value
if self.chart is not None:
self.update_dimensions(height=value)
@property
def library_specific_params(self):
return self._library_specific_params
@property
def x_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.x].dtype
return None
@property
def y_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.y].dtype
return None
@library_specific_params.setter
def library_specific_params(self, value):
self._library_specific_params = value
self.extract_mappers()
self.set_color()
def set_color(self):
if "color" in self.library_specific_params:
self.color = self.library_specific_params["color"]
def extract_mappers(self):
if "x_label_map" in self.library_specific_params:
self.x_label_map = self.library_specific_params["x_label_map"]
self.library_specific_params.pop("x_label_map")
if "y_label_map" in self.library_specific_params:
self.y_label_map = self.library_specific_params["y_label_map"]
self.library_specific_params.pop("y_label_map")
def _repr_mimebundle_(self, include=None, exclude=None):
view = self.view()
if self._initialized and panel_extension._loaded:
return view._repr_mimebundle_(include, exclude)
if self._initialized is False:
logging.warning(
"dashboard has not been initialized."
"Please run cuxfilter.dashboard.Dashboard([...charts])"
" to view this object in notebook"
)
if panel_extension._loaded is False:
logging.warning(
"notebooks assets not loaded."
"Please run cuxfilter.load_notebooks_assets()"
" to view this object in notebook"
)
if isinstance(view, pn.Column):
return view.pprint()
return None
def _to_xaxis_type(self, dates):
"""
Description: convert to int64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.x_dtype)
def _to_yaxis_type(self, dates):
"""
Description: convert to int64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.y_dtype)
def _xaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.x_dtype)
def _yaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.y_dtype)
def _xaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.x_dtype)
def _yaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.y_dtype)
def _xaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.x_dtype is
of type datetime, else return stride_type
"""
return dt.transform_stride_type(stride_type, self.x_dtype)
def _yaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.y_dtype is
of type datetime else return stride_type
"""
return dt.transform_stride_type(stride_type, self.y_dtype)
def view(self):
return self.chart
def add_event(self, event, callback):
def release_state():
with edit_readonly(state):
state.busy = False
def callback_busy_state(event):
with edit_readonly(state):
state.busy = True
cb = functools.partial(callback, event)
self.chart.document.add_next_tick_callback(cb)
self.chart.document.add_next_tick_callback(release_state)
self.chart.on_event(event, callback_busy_state)
def update_dimensions(self, width=None, height=None):
print("base calc source function, to over-ridden by delegated classes")
return -1
def calculate_source(self, data):
print("base calc source function, to over-ridden by delegated classes")
return -1
def generate_chart(self):
print("base calc source function, to over-ridden by delegated classes")
return -1
def add_reset_event(self, callback=None):
print("base calc source function, to over-ridden by delegated classes")
return -1
def compute_query_dict(self, query_dict):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reset_chart(self, data: list = []):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reload_chart(self, data, patch_update: bool):
print("base calc source function, to over-ridden by delegated classes")
return -1
def format_source_data(self, source_dict, patch_update=False):
""""""
# print('function to be overridden by library specific extensions')
return -1
def get_source_y_axis(self):
# print('function to be overridden by library specific extensions')
return []
def apply_mappers(self):
""""""
# print('function to be overridden by library specific extensions')
return -1
|
falsy/utils/meta.py | marco-souza/falsy | 127 | 12742872 | <reponame>marco-souza/falsy<filename>falsy/utils/meta.py<gh_stars>100-1000
class Meta(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def bind(self, name, func):
setattr(self.__class__, name, func)
|
python/tvm/relay/testing/nat.py | XiaoSong9905/tvm | 4,640 | 12742881 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Defines a unary natural number (Peano natural number) abstract
data type for Relay and provides some utility functions for it.
Nats are useful for testing purposes, as they make it easy to write
test cases for recursion and pattern matching."""
from tvm.relay.backend.interpreter import ConstructorValue
def get_type(prelude, name):
ty_var = prelude.mod.get_global_type_var(name)
ty_data = prelude.mod.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
def count(prelude, n):
"""Takes a ConstructorValue corresponding to a nat ADT
and converts it into a Python integer. This is an example of
using an ADT value in Python.
"""
assert isinstance(n, ConstructorValue)
_, z, s = prelude.mod.get_type("nat")
if n.tag == z.tag:
return 0
assert n.tag == s.tag
return 1 + count(prelude, n.fields[0])
def make_nat_value(prelude, n):
"""The inverse of count(): Given a non-negative Python integer,
constructs a ConstructorValue representing that value as a nat.
"""
_, z, s = prelude.mod.get_type("nat")
if n == 0:
return ConstructorValue(z.tag, [], z)
return ConstructorValue(s.tag, [make_nat_value(prelude, n - 1)], s)
def make_nat_expr(prelude, n):
"""Given a non-negative Python integer, constructs a Python
expression representing that integer's value as a nat.
"""
assert n >= 0
_, z, s = prelude.mod.get_type("nat")
ret = z()
while n > 0:
ret = s(ret)
n = n - 1
return ret
|
pxr/base/tf/testenv/testTfPyStaticTokens.py | DougRogers-DigitalFish/USD | 3,680 | 12742887 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Tf
import unittest
class TestTfPyStaticTokens(unittest.TestCase):
def test_PyStaticTokens(self):
testTokens = (
('orange', 'orange'),
('pear', "d'Anjou"),
('Fuji', 'Fuji'),
('Pippin', 'Pippin'),
('McIntosh', 'McIntosh'),
)
for scope in (Tf._testStaticTokens, Tf._TestStaticTokens):
for attrName,expectedValue in testTokens:
self.assertTrue(hasattr(scope, attrName))
value = getattr(scope, attrName)
self.assertEqual(value, expectedValue,
"Unexpected value for {0}: got '{1}', expected '{2}'".format(
attrName, value, expectedValue))
# Not wrapping arrays yet, just the array elements.
self.assertFalse(hasattr(scope, 'apple'))
if __name__ == '__main__':
unittest.main()
|
safedelete/tests/test_invisible.py | davidastephens/django-safedelete | 505 | 12742894 | from django.db import models
from ..models import SafeDeleteModel
from .testcase import SafeDeleteTestCase
class InvisibleModel(SafeDeleteModel):
# SafeDeleteModel subclasses automatically have their visibility set to invisible.
name = models.CharField(
max_length=100
)
class VisibilityTestCase(SafeDeleteTestCase):
def setUp(self):
self.instance = InvisibleModel.objects.create(
name='instance'
)
def test_visible_by_pk(self):
"""Test whether the soft deleted model cannot be found by filtering on pk."""
self.assertSoftDelete(self.instance, save=False)
self.assertEqual(
InvisibleModel.objects.filter(
pk=self.instance.pk
).count(),
0
)
self.assertRaises(
InvisibleModel.DoesNotExist,
InvisibleModel.objects.get,
pk=self.instance.pk
)
def test_invisible_by_name(self):
"""Test whether the soft deleted model cannot be found by filtering on name."""
self.assertSoftDelete(self.instance, save=False)
self.assertEqual(
InvisibleModel.objects.filter(
name=self.instance.name
).count(),
0
)
self.assertRaises(
InvisibleModel.DoesNotExist,
InvisibleModel.objects.get,
name=self.instance.name
)
|
clang/test/AST/gen_ast_dump_json_test.py | mkinsner/llvm | 653 | 12742905 | <reponame>mkinsner/llvm
#!/usr/bin/env python3
from __future__ import print_function
from collections import OrderedDict
from shutil import copyfile
import argparse
import json
import os
import re
import subprocess
import sys
import tempfile
def normalize(dict_var):
for k, v in dict_var.items():
if isinstance(v, OrderedDict):
normalize(v)
elif isinstance(v, list):
for e in v:
if isinstance(e, OrderedDict):
normalize(e)
elif type(v) is str:
if v != "0x0" and re.match(r"0x[0-9A-Fa-f]+", v):
dict_var[k] = '0x{{.*}}'
elif os.path.isfile(v):
dict_var[k] = '{{.*}}'
else:
splits = (v.split(' '))
out_splits = []
for split in splits:
inner_splits = split.rsplit(':',2)
if os.path.isfile(inner_splits[0]):
out_splits.append(
'{{.*}}:%s:%s'
%(inner_splits[1],
inner_splits[2]))
continue
out_splits.append(split)
dict_var[k] = ' '.join(out_splits)
def filter_json(dict_var, filters, out):
for k, v in dict_var.items():
if type(v) is str:
if v in filters:
out.append(dict_var)
break
elif isinstance(v, OrderedDict):
filter_json(v, filters, out)
elif isinstance(v, list):
for e in v:
if isinstance(e, OrderedDict):
filter_json(e, filters, out)
def default_clang_path():
guessed_clang = os.path.join(os.path.dirname(__file__), "clang")
if os.path.isfile(guessed_clang):
return guessed_clang
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--clang", help="The clang binary (could be a relative or absolute path)",
action="store", default=default_clang_path())
parser.add_argument("--source", help="the source file(s). Without --update, the command used to generate the JSON "
"will be of the format <clang> -cc1 -ast-dump=json <opts> <source>",
action="store", nargs=argparse.ONE_OR_MORE, required=True)
parser.add_argument("--filters", help="comma separated list of AST filters. Ex: --filters=TypedefDecl,BuiltinType",
action="store", default='')
update_or_generate_group = parser.add_mutually_exclusive_group()
update_or_generate_group.add_argument("--update", help="Update the file in-place", action="store_true")
update_or_generate_group.add_argument("--opts", help="other options",
action="store", default='', type=str)
parser.add_argument("--update-manual", help="When using --update, also update files that do not have the "
"autogenerated disclaimer", action="store_true")
args = parser.parse_args()
if not args.source:
sys.exit("Specify the source file to give to clang.")
clang_binary = os.path.abspath(args.clang)
if not os.path.isfile(clang_binary):
sys.exit("clang binary specified not present.")
for src in args.source:
process_file(src, clang_binary, cmdline_filters=args.filters,
cmdline_opts=args.opts, do_update=args.update,
force_update=args.update_manual)
def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
do_update, force_update):
note_firstline = "// NOTE: CHECK lines have been autogenerated by " \
"gen_ast_dump_json_test.py"
filters_line_prefix = "// using --filters="
note = note_firstline
cmd = [clang_binary, "-cc1"]
if do_update:
# When updating the first line of the test must be a RUN: line
with open(source_file, "r") as srcf:
first_line = srcf.readline()
found_autogenerated_line = False
filters_line = None
for i, line in enumerate(srcf.readlines()):
if found_autogenerated_line:
# print("Filters line: '", line.rstrip(), "'", sep="")
if line.startswith(filters_line_prefix):
filters_line = line[len(filters_line_prefix):].rstrip()
break
if line.startswith(note_firstline):
found_autogenerated_line = True
# print("Found autogenerated disclaimer at line", i + 1)
if not found_autogenerated_line and not force_update:
print("Not updating", source_file, "since it is not autogenerated.",
file=sys.stderr)
return
if not cmdline_filters and filters_line:
cmdline_filters = filters_line
print("Inferred filters as '" + cmdline_filters + "'")
if "RUN: %clang_cc1 " not in first_line:
sys.exit("When using --update the first line of the input file must contain RUN: %clang_cc1")
clang_start = first_line.find("%clang_cc1") + len("%clang_cc1")
file_check_idx = first_line.rfind("| FileCheck")
if file_check_idx:
dump_cmd = first_line[clang_start:file_check_idx]
else:
dump_cmd = first_line[clang_start:]
print("Inferred run arguments as '", dump_cmd, "'", sep="")
options = dump_cmd.split()
if "-ast-dump=json" not in options:
sys.exit("ERROR: RUN: line does not contain -ast-dump=json")
if "%s" not in options:
sys.exit("ERROR: RUN: line does not contain %s")
options.remove("%s")
else:
options = cmdline_opts.split()
options.append("-ast-dump=json")
cmd.extend(options)
using_ast_dump_filter = any('ast-dump-filter' in arg for arg in cmd)
cmd.append(source_file)
print("Will run", cmd)
filters = set()
if cmdline_filters:
note += "\n" + filters_line_prefix + cmdline_filters
filters = set(cmdline_filters.split(','))
print("Will use the following filters:", filters)
try:
json_str = subprocess.check_output(cmd).decode()
except Exception as ex:
print("The clang command failed with %s" % ex)
return -1
out_asts = []
if using_ast_dump_filter:
# If we're using a filter, then we might have multiple JSON objects
# in the output. To parse each out, we use a manual JSONDecoder in
# "raw" mode and update our location in the string based on where the
# last document ended.
decoder = json.JSONDecoder(object_hook=OrderedDict)
doc_start = 0
prev_end = 0
while True:
try:
prev_end = doc_start
(j, doc_start) = decoder.raw_decode(json_str[doc_start:])
doc_start += prev_end + 1
normalize(j)
out_asts.append(j)
except:
break
else:
j = json.loads(json_str, object_pairs_hook=OrderedDict)
normalize(j)
if len(filters) == 0:
out_asts.append(j)
else:
filter_json(j, filters, out_asts)
with tempfile.NamedTemporaryFile("w", delete=False) as f:
with open(source_file, "r") as srcf:
for line in srcf.readlines():
# copy up to the note:
if line.rstrip() == note_firstline:
break
f.write(line)
f.write(note + "\n")
for out_ast in out_asts:
append_str = json.dumps(out_ast, indent=1, ensure_ascii=False)
out_str = '\n\n'
out_str += "// CHECK-NOT: {{^}}Dumping\n"
index = 0
for append_line in append_str.splitlines()[2:]:
if index == 0:
out_str += '// CHECK: %s\n' %(append_line.rstrip())
index += 1
else:
out_str += '// CHECK-NEXT: %s\n' %(append_line.rstrip())
f.write(out_str)
f.flush()
f.close()
if do_update:
print("Updating json appended source file to %s." % source_file)
copyfile(f.name, source_file)
else:
partition = source_file.rpartition('.')
dest_path = '%s-json%s%s' % (partition[0], partition[1], partition[2])
print("Writing json appended source file to %s." % dest_path)
copyfile(f.name, dest_path)
os.remove(f.name)
return 0
if __name__ == '__main__':
main()
|
PaddleCV/adversarial/advbox/__init__.py | suytingwan/models | 819 | 12742940 | <gh_stars>100-1000
"""
A set of tools for generating adversarial example on paddle platform
"""
|
examples/python/templates/multi-page-apps/responsive-collapsible-sidebar/sidebar.py | glsdown/dash-bootstrap-components | 776 | 12742951 | <reponame>glsdown/dash-bootstrap-components
"""
This app creates a collapsible, responsive sidebar layout with
dash-bootstrap-components and some custom css with media queries.
When the screen is small, the sidebar moved to the top of the page, and the
links get hidden in a collapse element. We use a callback to toggle the
collapse when on a small screen, and the custom CSS to hide the toggle, and
force the collapse to stay open when the screen is large.
dcc.Location is used to track the current location, a callback uses the current
location to render the appropriate page content. The active prop of each
NavLink is set automatically according to the current pathname. To use this
feature you must install dash-bootstrap-components >= 0.11.0.
For more details on building multi-page Dash applications, check out the Dash
documentation: https://dash.plot.ly/urls
"""
import dash
import dash_bootstrap_components as dbc
from dash import Input, Output, State, dcc, html
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP],
# these meta_tags ensure content is scaled correctly on different devices
# see: https://www.w3schools.com/css/css_rwd_viewport.asp for more
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
)
# we use the Row and Col components to construct the sidebar header
# it consists of a title, and a toggle, the latter is hidden on large screens
sidebar_header = dbc.Row(
[
dbc.Col(html.H2("Sidebar", className="display-4")),
dbc.Col(
[
html.Button(
# use the Bootstrap navbar-toggler classes to style
html.Span(className="navbar-toggler-icon"),
className="navbar-toggler",
# the navbar-toggler classes don't set color
style={
"color": "rgba(0,0,0,.5)",
"border-color": "rgba(0,0,0,.1)",
},
id="navbar-toggle",
),
html.Button(
# use the Bootstrap navbar-toggler classes to style
html.Span(className="navbar-toggler-icon"),
className="navbar-toggler",
# the navbar-toggler classes don't set color
style={
"color": "rgba(0,0,0,.5)",
"border-color": "rgba(0,0,0,.1)",
},
id="sidebar-toggle",
),
],
# the column containing the toggle will be only as wide as the
# toggle, resulting in the toggle being right aligned
width="auto",
# vertically align the toggle in the center
align="center",
),
]
)
sidebar = html.Div(
[
sidebar_header,
# we wrap the horizontal rule and short blurb in a div that can be
# hidden on a small screen
html.Div(
[
html.Hr(),
html.P(
"A responsive sidebar layout with collapsible navigation "
"links.",
className="lead",
),
],
id="blurb",
),
# use the Collapse component to animate hiding / revealing links
dbc.Collapse(
dbc.Nav(
[
dbc.NavLink("Home", href="/", active="exact"),
dbc.NavLink("Page 1", href="/page-1", active="exact"),
dbc.NavLink("Page 2", href="/page-2", active="exact"),
],
vertical=True,
pills=True,
),
id="collapse",
),
],
id="sidebar",
)
content = html.Div(id="page-content")
app.layout = html.Div([dcc.Location(id="url"), sidebar, content])
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def render_page_content(pathname):
if pathname == "/":
return html.P("This is the content of the home page!")
elif pathname == "/page-1":
return html.P("This is the content of page 1. Yay!")
elif pathname == "/page-2":
return html.P("Oh cool, this is page 2!")
# If the user tries to reach a different page, return a 404 message
return dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)
@app.callback(
Output("sidebar", "className"),
[Input("sidebar-toggle", "n_clicks")],
[State("sidebar", "className")],
)
def toggle_classname(n, classname):
if n and classname == "":
return "collapsed"
return ""
@app.callback(
Output("collapse", "is_open"),
[Input("navbar-toggle", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
if __name__ == "__main__":
app.run_server(port=8888, debug=True)
|
pypy2.7/module/_multiprocess/interp_win32.py | UniverseFly/multiprocess | 356 | 12742993 | from rpython.rlib import rwin32
from rpython.rlib.rarithmetic import r_uint
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from pypy.interpreter.error import oefmt, wrap_windowserror
from pypy.interpreter.function import StaticMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec
from _multiprocess.interp_connection import w_handle
CONSTANTS = """
PIPE_ACCESS_INBOUND PIPE_ACCESS_DUPLEX
GENERIC_READ GENERIC_WRITE OPEN_EXISTING
PIPE_TYPE_MESSAGE PIPE_READMODE_MESSAGE PIPE_WAIT
PIPE_UNLIMITED_INSTANCES
NMPWAIT_WAIT_FOREVER
ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY
ERROR_NO_SYSTEM_RESOURCES ERROR_BROKEN_PIPE ERROR_MORE_DATA
ERROR_ALREADY_EXISTS ERROR_NO_DATA
""".split()
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes = ['windows.h'],
libraries = ['kernel32'],
)
for name in CONSTANTS:
locals()[name] = rffi_platform.ConstantInteger(name)
config = rffi_platform.configure(CConfig)
globals().update(config)
def handle_w(space, w_handle):
return rffi.cast(rwin32.HANDLE, space.int_w(w_handle))
_CreateNamedPipe = rwin32.winexternal(
'CreateNamedPipeA', [
rwin32.LPCSTR,
rwin32.DWORD, rwin32.DWORD, rwin32.DWORD,
rwin32.DWORD, rwin32.DWORD, rwin32.DWORD,
rffi.VOIDP],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ConnectNamedPipe = rwin32.winexternal(
'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_SetNamedPipeHandleState = rwin32.winexternal(
'SetNamedPipeHandleState', [
rwin32.HANDLE,
rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_WaitNamedPipe = rwin32.winexternal(
'WaitNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_PeekNamedPipe = rwin32.winexternal(
'PeekNamedPipe', [
rwin32.HANDLE,
rffi.VOIDP,
rwin32.DWORD,
rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_CreateFile = rwin32.winexternal(
'CreateFileA', [
rwin32.LPCSTR,
rwin32.DWORD, rwin32.DWORD, rffi.VOIDP,
rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
_WriteFile = rwin32.winexternal(
'WriteFile', [
rwin32.HANDLE,
rffi.VOIDP, rwin32.DWORD,
rwin32.LPDWORD, rffi.VOIDP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ReadFile = rwin32.winexternal(
'ReadFile', [
rwin32.HANDLE,
rffi.VOIDP, rwin32.DWORD,
rwin32.LPDWORD, rffi.VOIDP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ExitProcess = rwin32.winexternal(
'ExitProcess', [rffi.UINT], lltype.Void,
save_err=rffi.RFFI_SAVE_LASTERROR)
_GetTickCount = rwin32.winexternal(
'GetTickCount', [], rwin32.DWORD)
_Sleep = rwin32.winexternal(
'Sleep', [rwin32.DWORD], lltype.Void)
def CloseHandle(space, w_handle):
handle = handle_w(space, w_handle)
if not rwin32.CloseHandle(handle):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
def GetLastError(space):
"""NOTE: don't use this. See issue #2658"""
return space.newint(rwin32.GetLastError_saved())
# __________________________________________________________
# functions for the "win32" namespace
@unwrap_spec(name='text', openmode=r_uint, pipemode=r_uint, maxinstances=r_uint,
outputsize=r_uint, inputsize=r_uint, timeout=r_uint)
def CreateNamedPipe(space, name, openmode, pipemode, maxinstances,
outputsize, inputsize, timeout, w_security):
security = space.int_w(w_security)
if security:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateNamedPipe(
name, openmode, pipemode, maxinstances,
outputsize, inputsize, timeout, rffi.NULL)
if handle == rwin32.INVALID_HANDLE_VALUE:
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
return w_handle(space, handle)
def ConnectNamedPipe(space, w_handle, w_overlapped):
handle = handle_w(space, w_handle)
overlapped = space.int_w(w_overlapped)
if overlapped:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
if not _ConnectNamedPipe(handle, rffi.NULL):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
def SetNamedPipeHandleState(space, w_handle, w_pipemode, w_maxinstances,
w_timeout):
handle = handle_w(space, w_handle)
state = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 3, flavor='raw')
statep = lltype.malloc(rffi.CArrayPtr(rffi.UINTP).TO, 3, flavor='raw',
zero=True)
try:
if not space.is_w(w_pipemode, space.w_None):
state[0] = rffi.cast(rffi.UINT, space.uint_w(w_pipemode))
statep[0] = rffi.ptradd(state, 0)
if not space.is_w(w_maxinstances, space.w_None):
state[1] = rffi.cast(rffi.UINT, space.uint_w(w_maxinstances))
statep[1] = rffi.ptradd(state, 1)
if not space.is_w(w_timeout, space.w_None):
state[2] = rffi.cast(rffi.UINT, space.uint_w(w_timeout))
statep[2] = rffi.ptradd(state, 2)
if not _SetNamedPipeHandleState(handle, statep[0], statep[1],
statep[2]):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
finally:
lltype.free(state, flavor='raw')
lltype.free(statep, flavor='raw')
@unwrap_spec(name='text', timeout=r_uint)
def WaitNamedPipe(space, name, timeout):
# Careful: zero means "default value specified by CreateNamedPipe()"
if not _WaitNamedPipe(name, timeout):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
@unwrap_spec(filename='fsencode', access=r_uint, share=r_uint,
disposition=r_uint, flags=r_uint)
def CreateFile(space, filename, access, share, w_security,
disposition, flags, w_templatefile):
security = space.int_w(w_security)
templatefile = space.int_w(w_templatefile)
if security or templatefile:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateFile(filename, access, share, rffi.NULL,
disposition, flags, rwin32.NULL_HANDLE)
if handle == rwin32.INVALID_HANDLE_VALUE:
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
return w_handle(space, handle)
@unwrap_spec(code=r_uint)
def ExitProcess(space, code):
_ExitProcess(code)
def win32_namespace(space):
"NOT_RPYTHON"
w_win32 = space.call_function(space.w_type,
space.wrap("win32"),
space.newtuple([]),
space.newdict())
# constants
for name in CONSTANTS:
space.setattr(w_win32,
space.wrap(name),
space.wrap(config[name]))
space.setattr(w_win32,
space.wrap('NULL'),
space.newint(0))
# functions
for name in ['CloseHandle', 'GetLastError', 'CreateFile',
'CreateNamedPipe', 'ConnectNamedPipe',
'SetNamedPipeHandleState', 'WaitNamedPipe',
'ExitProcess',
]:
function = globals()[name]
w_function = space.wrap(interp2app(function))
w_method = space.wrap(StaticMethod(w_function))
space.setattr(w_win32, space.wrap(name), w_method)
return w_win32
|
calvin/actorstore/systemactors/media/ImageSource.py | gabrielcercel/calvin-base | 334 | 12743011 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-17 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_actor_logger
_log = get_actor_logger(__name__)
class ImageSource(Actor):
"""
When token on input, get an image.
Inputs:
trigger: anything
Outputs:
b64image: generated image
"""
@manage(exclude=["_cam"])
def init(self):
self.setup()
def setup(self):
self._cam = calvinsys.open(self, "image.source")
def did_migrate(self):
self.setup()
def will_end(self):
calvinsys.close(self._cam)
@stateguard(lambda self: calvinsys.can_read(self._cam))
@condition(action_output=['b64image'])
def send_image(self):
image = calvinsys.read(self._cam)
return (image, )
@stateguard(lambda self: calvinsys.can_write(self._cam))
@condition(action_input=['trigger'])
def fetch_image(self, trigger):
calvinsys.write(self._cam, None)
action_priority = (fetch_image, send_image)
requires = ['image.source']
test_calvinsys = {'image.source': {'read': [1,0,1,0,0,1,0,1],
'write': [None, None, None, None]}}
test_set = [
{
'inports': {'trigger': [True, 1, "a", 0]},
'outports': {'b64image': [1,0,1,0,0,1,0,1]}
}
]
|
python/render_vision_dataset/render_verify_pixel2block.py | boldsort/craftassist | 626 | 12743046 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import random
import cv2
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def change_block(schematic, b):
x, y, z = b
## change to red wool
schematic[y][z][x][0] = 35
schematic[y][z][x][1] = 14
def render(npy_p2b, out_dir, port, spp, img_size, mn=None):
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(npy_p2b.split(".")[1:-2])
+ "/schematic.npy"
)
schematic = np.load(npy_file)
print(schematic.shape)
house_name = os.path.basename(os.path.dirname(npy_file))
p2b = np.load(npy_p2b)
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
yaw, distance = list(map(int, npy_p2b.split(".")[-2].split("_")))
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
if mn == [0, 0]:
M, N = p2b.shape[:2]
while True:
m = random.randint(0, M - 1)
n = random.randint(0, N - 1)
if p2b[m][n][0] != -1:
break
else:
m, n = mn
print("Select pixel at {}".format((m, n)))
print("Mapped block {}".format(p2b[m][n]))
change_block(schematic, p2b[m][n])
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky_verify.{}.{}.png".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
## draw the sampled pixel for a better view
img = cv2.imread(out_file)
cv2.circle(img, (n, m), 2, (255, 0, 0))
cv2.imwrite(out_file, img)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_p2b")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
parser.add_argument("--mn", type=int, nargs=2, default=[0, 0])
args = parser.parse_args()
render(args.npy_p2b, args.out_dir, args.port, args.spp, args.size, args.mn)
|
tests/test_e01_link_options.py | simonvh/genomepy | 112 | 12743061 | import pytest
skip = False
if not skip:
@pytest.fixture(scope="module", params=["primary_assembly", "toplevel"])
def assembly(request):
return request.param
@pytest.fixture(scope="module", params=["98", None])
def release_version(request):
return request.param
@pytest.fixture(scope="module", params=["hard", "soft", "unmasked"])
def masking(request):
return request.param
def test_ensembl_genome_download_links(assembly, masking, release_version, ensembl):
"""Test Ensembl links with various options
These genomes are hosted on ftp.ensembl.org
Vertebrates are downloaded from HTTP.
"""
mask = masking if masking != "unmasked" else "none"
toplevel = False if assembly == "primary_assembly" else True
version = release_version
assert ensembl.get_genome_download_link(
"GRCh38.p13", mask=mask, toplevel=toplevel, version=version
)
def test_ensemblgenomes_genome_download_links(masking, ensembl):
"""Test Ensembl FTP links for various genomes
These genomes are hosted on ftp.ensemblgenomes.org.
"""
mask = masking if masking != "unmasked" else "none"
for genome in ["Amel_HAv3.1", "ASM23943v1"]:
assert ensembl.get_genome_download_link(genome, mask=mask)
def test_ucsc_genome_download_links(masking, ucsc):
"""Test UCSC HTTP links for various genomes
Also test masking (unmasked should be ignored)."""
for genome in ["sacCer3", "hg38"]:
assert ucsc.get_genome_download_link(genome, mask=masking)
def test_ncbi_genome_download_links(masking, ncbi):
"""Test NCBI HTTPS links for various genomes
Also test masking (should be ignored).
These genomes are hosted on ftp://ftp.ncbi.nlm.nih.gov."""
for genome in ["Charlie1.0", "GRCh38.p13"]:
assert ncbi.get_genome_download_link(genome, mask=masking)
|
anchore_engine/services/apiext/api/controllers/image_imports.py | rbrady/anchore-engine | 1,484 | 12743073 | <gh_stars>1000+
import datetime
from connexion import request
from anchore_engine.apis import exceptions as api_exceptions
from anchore_engine.apis.authorization import (
ActionBoundPermission,
RequestingAccountValue,
get_authorizer,
)
from anchore_engine.apis.context import ApiRequestContextProxy
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.common.helpers import make_response_error
from anchore_engine.subsys import logger
authorizer = get_authorizer()
IMPORT_BUCKET = "image_content_imports"
MAX_UPLOAD_SIZE = 100 * 1024 * 1024 # 100 MB
OPERATION_EXPIRATION_DELTA = datetime.timedelta(hours=24)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def create_operation():
"""
POST /imports/images
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.create_image_import()
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_operations():
"""
GET /imports/images
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_image_import_operations()
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_operation(operation_id):
"""
GET /imports/images/{operation_id}
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.get_image_import_operation(operation_id)
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def invalidate_operation(operation_id):
"""
DELETE /imports/images/{operation_id}
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.cancel_image_import(operation_id)
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_packages(operation_id):
"""
GET /imports/images/{operation_id}/packages
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "packages")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_dockerfiles(operation_id):
"""
GET /imports/images/{operation_id}/dockerfile
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "dockerfile")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_image_manifests(operation_id):
"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "manifest")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_parent_manifests(operation_id):
"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "parent_manifest")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_image_configs(operation_id):
"""
GET /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "image_config")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_packages(operation_id):
"""
POST /imports/images/{operation_id}/packages
:param operation_id:
:param sbom:
:return:
"""
return content_upload(operation_id, "packages", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_dockerfile(operation_id):
"""
POST /imports/images/{operation_id}/dockerfile
:param operation_id:
:param sbom:
:return:
"""
return content_upload(operation_id, "dockerfile", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_manifest(operation_id):
"""
POST /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
return content_upload(operation_id, "manifest", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_parent_manifest(operation_id):
"""
POST /imports/images/{operation_id}/parent_manifest
:param operation_id:
:return:
"""
return content_upload(operation_id, "parent_manifest", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_config(operation_id):
"""
POST /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""
return content_upload(operation_id, "image_config", request)
def content_upload(operation_id, content_type, request):
"""
Generic handler for multiple types of content uploads. Still operates at the API layer
:param operation_id:
:param content_type:
:param request:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
return (
client.upload_image_import_content(
operation_id, content_type, request.data
),
200,
)
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
|
athena/utils/learning_rate.py | godjealous/athena | 119 | 12743090 | # coding=utf-8
# Copyright (C) ATHENA AUTHORS
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=too-few-public-methods, no-member, too-many-arguments, unused-argument
""" learning rate """
import tensorflow as tf
from ..utils.hparam import register_and_parse_hparams
class WarmUpLearningSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
""" WarmUp Learning rate schedule for Adam
Used as :
optimizer = tf.keras.optimizers.Adam(learning_rate = WarmUpLearningSchedule(512),
beta_1=0.9, beta_2=0.98, epsilon=1e-9)
Args :
model_dim is the something related to total model parameters
warmup_steps is the highest learning rate iters
Returns:
return the learning rate
Idea from the paper: Attention Is All You Need
"""
def __init__(self, model_dim=512, warmup_steps=4000, k=1.0,
decay_steps=99999999, decay_rate=1.0):
super().__init__()
self.model_dim = tf.cast(model_dim, tf.float32)
self.warmup_steps = warmup_steps
self.k = k
self.decay_steps = tf.cast(decay_steps, tf.float32)
self.decay_rate = tf.cast(decay_rate, tf.float32)
def __call__(self, step):
step = tf.cast(step, tf.float32)
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
k = self.k * tf.cast(self.decay_rate ** (step // self.decay_steps), tf.float32)
return k * tf.math.rsqrt(self.model_dim) * tf.math.minimum(arg1, arg2)
class WarmUpAdam(tf.keras.optimizers.Adam):
"""WarmUpAdam Implementation """
default_config = {
"d_model": 512,
"warmup_steps": 8000,
"k": 0.5,
"decay_steps": 100000,
"decay_rate": 1.0
}
def __init__(self, config=None, beta_1=0.9, beta_2=0.999, epsilon=1e-7,
amsgrad=False, name="WarmUpAdam", **kwargs):
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
super().__init__(
learning_rate=WarmUpLearningSchedule(
self.hparams.d_model,
self.hparams.warmup_steps,
self.hparams.k,
self.hparams.decay_steps,
self.hparams.decay_rate
),
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
)
class ExponentialDecayLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
""" ExponentialDecayLearningRateSchedule
Used as :
optimizer = tf.keras.optimizers.Adam(
learning_rate = ExponentialDecayLearningRate(0.01, 100))
Args :
initial_lr, decay_steps
Returns:
initial_lr * (0.5 ** (step // decay_steps))
"""
def __init__(self, initial_lr=0.005, decay_steps=10000, decay_rate=0.5):
super().__init__()
self.initial_lr = initial_lr
self.decay_steps = tf.cast(decay_steps, tf.float32)
self.decay_rate = tf.cast(decay_rate, tf.float32)
def __call__(self, step):
step = tf.cast(step, tf.float32)
factor = tf.cast(self.decay_rate ** (step // self.decay_steps), tf.float32)
return self.initial_lr * factor
class ExponentialDecayAdam(tf.keras.optimizers.Adam):
"""WarmUpAdam Implementation """
default_config = {
"initial_lr": 0.005,
"decay_steps": 10000,
"decay_rate": 0.5
}
def __init__(self, config=None, beta_1=0.9, beta_2=0.999, epsilon=1e-7,
amsgrad=False, name="WarmUpAdam", **kwargs):
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
super().__init__(
learning_rate=ExponentialDecayLearningRateSchedule(
self.hparams.initial_lr,
self.hparams.decay_steps,
self.hparams.decay_rate
),
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
)
|
pythran/tests/cases/goodExpoMeasure.py | davidbrochart/pythran | 1,647 | 12743112 | #runas import numpy as np; n = 20; a = np.arange(n*n*n).reshape((n,n,n)).astype(np.uint8); b = 2. ; goodExpoMeasure(a, b)
#pythran export goodExpoMeasure(uint8[][][], float)
import numpy
def goodExpoMeasure(inRGB, sigma):
'''
Compute the good exposition image quality measure on 1 input image.
'''
R = inRGB[0,:,:].astype(numpy.float64)
G = inRGB[1,:,:].astype(numpy.float64)
B = inRGB[2,:,:].astype(numpy.float64)
goodExpoR = numpy.exp(- ((R - 128)**2) / sigma)
goodExpoG = numpy.exp(- ((G - 128)**2) / sigma)
goodExpoB = numpy.exp(- ((B - 128)**2) / sigma)
goodExpo = goodExpoR * goodExpoG * goodExpoB
goodExpo = (numpy.round(goodExpo, 2) * (2**8-1)).astype(numpy.uint8)
return goodExpo
|
wxpy/api/messages/message.py | yylingyun/wxpy | 14,391 | 12743113 | <reponame>yylingyun/wxpy
# coding: utf-8
from __future__ import unicode_literals
import logging
import os
import tempfile
import weakref
from datetime import datetime
from xml.etree import ElementTree as ETree
try:
import html
except ImportError:
# Python 2.6-2.7
# noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyCompatibility
from HTMLParser import HTMLParser
html = HTMLParser()
from wxpy.api.chats import Chat, Group, Member, User
from wxpy.compatible.utils import force_encoded_string_output
from wxpy.utils import wrap_user_name, repr_message
from .article import Article
from ..consts import ATTACHMENT, CARD, FRIENDS, MAP, PICTURE, RECORDING, SHARING, TEXT, VIDEO
from ...compatible import *
logger = logging.getLogger(__name__)
class Message(object):
"""
单条消息对象,包括:
* 来自好友、群聊、好友请求等聊天对象的消息
* 使用机器人账号在手机微信中发送的消息
| 但 **不包括** 代码中通过 .send/reply() 系列方法发出的消息
| 此类消息请参见 :class:`SentMessage`
"""
def __init__(self, raw, bot):
self.raw = raw
self.bot = weakref.proxy(bot)
self._receive_time = datetime.now()
# 将 msg.chat.send* 方法绑定到 msg.reply*,例如 msg.chat.send_img => msg.reply_img
for method in '', '_image', '_file', '_video', '_msg', '_raw_msg':
setattr(self, 'reply' + method, getattr(self.chat, 'send' + method))
def __hash__(self):
return hash((Message, self.id))
@force_encoded_string_output
def __repr__(self):
return repr_message(self)
def __unicode__(self):
return repr_message(self)
# basic
@property
def type(self):
"""
消息的类型,目前可为以下值::
# 文本
TEXT = 'Text'
# 位置
MAP = 'Map'
# 名片
CARD = 'Card'
# 提示
NOTE = 'Note'
# 分享
SHARING = 'Sharing'
# 图片
PICTURE = 'Picture'
# 语音
RECORDING = 'Recording'
# 文件
ATTACHMENT = 'Attachment'
# 视频
VIDEO = 'Video'
# 好友请求
FRIENDS = 'Friends'
# 系统
SYSTEM = 'System'
:rtype: str
"""
return self.raw.get('Type')
@property
def id(self):
"""
消息的唯一 ID (通常为大于 0 的 64 位整型)
"""
return self.raw.get('NewMsgId')
# content
@property
def text(self):
"""
消息的文本内容
"""
_type = self.type
_card = self.card
if _type == MAP:
location = self.location
if location:
return location.get('label')
elif _card:
if _type == CARD:
return _card.name
elif _type == FRIENDS:
return _card.raw.get('Content')
ret = self.raw.get('Text')
if isinstance(ret, str):
return ret
def get_file(self, save_path=None):
"""
下载图片、视频、语音、附件消息中的文件内容。
可与 :any:`Message.file_name` 配合使用。
:param save_path: 文件的保存路径。若为 None,将直接返回字节数据
"""
_text = self.raw.get('Text')
if callable(_text) and self.type in (PICTURE, RECORDING, ATTACHMENT, VIDEO):
return _text(save_path)
else:
raise ValueError('download method not found, or invalid message type')
@property
def file_name(self):
"""
消息中文件的文件名
"""
return self.raw.get('FileName')
@property
def file_size(self):
"""
消息中文件的体积大小
"""
return self.raw.get('FileSize')
@property
def media_id(self):
"""
文件类消息中的文件资源 ID (但图片视频语音等其他消息中为空)
"""
return self.raw.get('MediaId')
# group
@property
def is_at(self):
"""
当消息来自群聊,且被 @ 时,为 True
"""
return self.raw.get('IsAt') or self.raw.get('isAt')
# misc
@property
def img_height(self):
"""
图片高度
"""
return self.raw.get('ImgHeight')
@property
def img_width(self):
"""
图片宽度
"""
return self.raw.get('ImgWidth')
@property
def play_length(self):
"""
视频长度
"""
return self.raw.get('PlayLength')
@property
def voice_length(self):
"""
语音长度
"""
return self.raw.get('VoiceLength')
@property
def url(self):
"""
分享类消息中的网页 URL
"""
_url = self.raw.get('Url')
if isinstance(_url, str):
_url = html.unescape(_url)
return _url
@property
def articles(self):
"""
公众号推送中的文章列表 (首篇的 标题/地址 与消息中的 text/url 相同)
其中,每篇文章均有以下属性:
* `title`: 标题
* `summary`: 摘要
* `url`: 文章 URL
* `cover`: 封面或缩略图 URL
"""
from wxpy import MP
if self.type == SHARING and isinstance(self.sender, MP):
tree = ETree.fromstring(self.raw['Content'])
# noinspection SpellCheckingInspection
items = tree.findall('.//mmreader/category/item')
article_list = list()
for item in items:
def find_text(tag):
found = item.find(tag)
if found is not None:
return found.text
article = Article()
article.title = find_text('title')
article.summary = find_text('digest')
article.url = find_text('url')
article.cover = find_text('cover')
article_list.append(article)
return article_list
@property
def card(self):
"""
* 好友请求中的请求用户
* 名片消息中的推荐用户
"""
if self.type in (CARD, FRIENDS):
return User(self.raw.get('RecommendInfo'), self.bot)
# time
@property
def create_time(self):
"""
服务端发送时间
"""
# noinspection PyBroadException
try:
return datetime.fromtimestamp(self.raw.get('CreateTime'))
except:
pass
@property
def receive_time(self):
"""
本地接收时间
"""
return self._receive_time
@property
def latency(self):
"""
消息的延迟秒数 (发送时间和接收时间的差值)
"""
create_time = self.create_time
if create_time:
return (self.receive_time - create_time).total_seconds()
@property
def location(self):
"""
位置消息中的地理位置信息
"""
try:
ret = ETree.fromstring(self.raw['OriContent']).find('location').attrib
try:
ret['x'] = float(ret['x'])
ret['y'] = float(ret['y'])
ret['scale'] = int(ret['scale'])
ret['maptype'] = int(ret['maptype'])
except (KeyError, ValueError):
pass
return ret
except (TypeError, KeyError, ValueError, ETree.ParseError):
pass
# chats
@property
def chat(self):
"""
消息所在的聊天会话,即:
* 对于自己发送的消息,为消息的接收者
* 对于别人发送的消息,为消息的发送者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""
if self.raw.get('FromUserName') == self.bot.self.user_name:
return self.receiver
else:
return self.sender
@property
def sender(self):
"""
消息的发送者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""
return self._get_chat_by_user_name(self.raw.get('FromUserName'))
@property
def receiver(self):
"""
消息的接收者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""
return self._get_chat_by_user_name(self.raw.get('ToUserName'))
@property
def member(self):
"""
* 若消息来自群聊,则此属性为消息的实际发送人(具体的群成员)
* 若消息来自其他聊天对象(非群聊),则此属性为 None
:rtype: NoneType, :class:`wxpy.Member`
"""
if isinstance(self.chat, Group):
if self.sender == self.bot.self:
return self.chat.self
else:
actual_user_name = self.raw.get('ActualUserName')
for _member in self.chat.members:
if _member.user_name == actual_user_name:
return _member
return Member(dict(
UserName=actual_user_name,
NickName=self.raw.get('ActualNickName')
), self.chat)
def _get_chat_by_user_name(self, user_name):
"""
通过 user_name 找到对应的聊天对象
:param user_name: user_name
:return: 找到的对应聊天对象
"""
def match_in_chats(_chats):
for c in _chats:
if c.user_name == user_name:
return c
_chat = None
if user_name.startswith('@@'):
_chat = match_in_chats(self.bot.groups())
elif user_name:
_chat = match_in_chats(self.bot.friends())
if not _chat:
_chat = match_in_chats(self.bot.mps())
if not _chat:
_chat = Chat(wrap_user_name(user_name), self.bot)
return _chat
def forward(self, chat, prefix=None, suffix=None, raise_for_unsupported=False):
"""
将本消息转发给其他聊天对象
支持以下消息类型
* 文本 (`TEXT`)
* 视频(`VIDEO`)
* 文件 (`ATTACHMENT`)
* 图片/自定义表情 (`PICTURE`)
* 但不支持表情商店中的表情
* 名片 (`CARD`)
* 仅支持公众号名片,以及自己发出的个人号名片
* 分享 (`SHARING`)
* 会转化为 `标题 + 链接` 形式的文本消息
* 语音 (`RECORDING`)
* 会以文件方式发送
* 地图 (`MAP`)
* 会转化为 `位置名称 + 地图链接` 形式的文本消息
:param Chat chat: 接收转发消息的聊天对象
:param str prefix: 转发时增加的 **前缀** 文本,原消息为文本时会自动换行
:param str suffix: 转发时增加的 **后缀** 文本,原消息为文本时会自动换行
:param bool raise_for_unsupported:
| 为 True 时,将为不支持的消息类型抛出 `NotImplementedError` 异常
例如,将公司群中的老板消息转发出来::
from wxpy import *
bot = Bot()
# 定位公司群
company_group = ensure_one(bot.groups().search('公司微信群'))
# 定位老板
boss = ensure_one(company_group.search('老板大名'))
# 将老板的消息转发到文件传输助手
@bot.register(company_group)
def forward_boss_message(msg):
if msg.member == boss:
msg.forward(bot.file_helper, prefix='老板发言')
# 堵塞线程
embed()
"""
logger.info('{}: forwarding to {}: {}'.format(self.bot, chat, self))
def wrapped_send(send_type, *args, **kwargs):
if send_type == 'msg':
if args:
text = args[0]
elif kwargs:
text = kwargs['msg']
else:
text = self.text
ret = chat.send_msg('{}{}{}'.format(
str(prefix) + '\n' if prefix else '',
text,
'\n' + str(suffix) if suffix else '',
))
else:
if prefix:
chat.send_msg(prefix)
ret = getattr(chat, 'send_{}'.format(send_type))(*args, **kwargs)
if suffix:
chat.send_msg(suffix)
return ret
def download_and_send():
fd, path = tempfile.mkstemp(
suffix='_{}'.format(self.file_name),
dir=self.bot.temp_dir.name
)
try:
self.get_file(path)
if self.type == PICTURE:
return wrapped_send('image', path)
elif self.type == VIDEO:
return wrapped_send('video', path)
else:
return wrapped_send('file', path)
finally:
os.close(fd)
def raise_properly(text):
logger.warning(text)
if raise_for_unsupported:
raise NotImplementedError(text)
if self.type == TEXT:
return wrapped_send('msg')
elif self.type == SHARING:
return wrapped_send('msg', '{}\n{}'.format(self.text, self.url))
elif self.type == MAP:
return wrapped_send('msg', '{}: {}\n{}'.format(
self.location['poiname'], self.location['label'], self.url
))
elif self.type == ATTACHMENT:
# noinspection SpellCheckingInspection
content = \
"<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''>" \
"<title>{file_name}</title><des></des><action></action>" \
"<type>6</type><content></content><url></url><lowurl></lowurl>" \
"<appattach><totallen>{file_size}</totallen><attachid>{media_id}</attachid>" \
"<fileext>{file_ext}</fileext></appattach><extinfo></extinfo></appmsg>"
content = content.format(
file_name=self.file_name,
file_size=self.file_size,
media_id=self.media_id,
file_ext=os.path.splitext(self.file_name)[1].replace('.', '')
)
return wrapped_send(
send_type='raw_msg',
raw_type=self.raw['MsgType'],
raw_content=content,
uri='/webwxsendappmsg?fun=async&f=json'
)
elif self.type == CARD:
if self.card.raw.get('AttrStatus') and self.sender != self.bot.self:
# 为个人名片,且不为自己所发出
raise_properly('Personal cards sent from others are unsupported:\n{}'.format(self))
else:
return wrapped_send(
send_type='raw_msg',
raw_type=self.raw['MsgType'],
raw_content=self.raw['Content'],
uri='/webwxsendmsg'
)
elif self.type == PICTURE:
if self.raw.get('HasProductId'):
# 来自表情商店的表情
raise_properly('Stickers from store are unsupported:\n{}'.format(self))
else:
return download_and_send()
elif self.type == VIDEO:
return download_and_send()
elif self.type == RECORDING:
return download_and_send()
else:
raise_properly('Unsupported message type:\n{}'.format(self))
|
zhaquirks/aduro/__init__.py | WolfRevo/zha-device-handlers | 213 | 12743135 | """ADUROLIGHT module for custom device handlers."""
|
sdk/python/feast/infra/online_stores/datastore.py | kevjumba/feast | 810 | 12743162 | <filename>sdk/python/feast/infra/online_stores/datastore.py
# Copyright 2021 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from datetime import datetime
from multiprocessing.pool import ThreadPool
from queue import Empty, Queue
from threading import Lock, Thread
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple
from pydantic import PositiveInt, StrictStr
from pydantic.typing import Literal
from feast import Entity, utils
from feast.errors import FeastProviderLoginError
from feast.feature_view import FeatureView
from feast.infra.infra_object import DATASTORE_INFRA_OBJECT_CLASS_TYPE, InfraObject
from feast.infra.online_stores.helpers import compute_entity_id
from feast.infra.online_stores.online_store import OnlineStore
from feast.protos.feast.core.DatastoreTable_pb2 import (
DatastoreTable as DatastoreTableProto,
)
from feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from feast.usage import log_exceptions_and_usage, tracing_span
LOGGER = logging.getLogger(__name__)
try:
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import datastore
from google.cloud.datastore.client import Key
except ImportError as e:
from feast.errors import FeastExtrasDependencyImportError
raise FeastExtrasDependencyImportError("gcp", str(e))
ProtoBatch = Sequence[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
]
class DatastoreOnlineStoreConfig(FeastConfigBaseModel):
"""Online store config for GCP Datastore"""
type: Literal["datastore"] = "datastore"
""" Online store type selector"""
project_id: Optional[StrictStr] = None
""" (optional) GCP Project Id """
namespace: Optional[StrictStr] = None
""" (optional) Datastore namespace """
write_concurrency: Optional[PositiveInt] = 40
""" (optional) Amount of threads to use when writing batches of feature rows into Datastore"""
write_batch_size: Optional[PositiveInt] = 50
""" (optional) Amount of feature rows per batch being written into Datastore"""
class DatastoreOnlineStore(OnlineStore):
"""
OnlineStore is an object used for all interaction between Feast and the service used for offline storage of
features.
"""
_client: Optional[datastore.Client] = None
@log_exceptions_and_usage(online_store="datastore")
def update(
self,
config: RepoConfig,
tables_to_delete: Sequence[FeatureView],
tables_to_keep: Sequence[FeatureView],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
online_config = config.online_store
assert isinstance(online_config, DatastoreOnlineStoreConfig)
client = self._get_client(online_config)
feast_project = config.project
for table in tables_to_keep:
key = client.key("Project", feast_project, "Table", table.name)
entity = datastore.Entity(
key=key, exclude_from_indexes=("created_ts", "event_ts", "values")
)
entity.update({"created_ts": datetime.utcnow()})
client.put(entity)
for table in tables_to_delete:
_delete_all_values(
client, client.key("Project", feast_project, "Table", table.name)
)
# Delete the table metadata datastore entity
key = client.key("Project", feast_project, "Table", table.name)
client.delete(key)
def teardown(
self,
config: RepoConfig,
tables: Sequence[FeatureView],
entities: Sequence[Entity],
):
online_config = config.online_store
assert isinstance(online_config, DatastoreOnlineStoreConfig)
client = self._get_client(online_config)
feast_project = config.project
for table in tables:
_delete_all_values(
client, client.key("Project", feast_project, "Table", table.name)
)
# Delete the table metadata datastore entity
key = client.key("Project", feast_project, "Table", table.name)
client.delete(key)
def _get_client(self, online_config: DatastoreOnlineStoreConfig):
if not self._client:
self._client = _initialize_client(
online_config.project_id, online_config.namespace
)
return self._client
@log_exceptions_and_usage(online_store="datastore")
def online_write_batch(
self,
config: RepoConfig,
table: FeatureView,
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
online_config = config.online_store
assert isinstance(online_config, DatastoreOnlineStoreConfig)
client = self._get_client(online_config)
write_concurrency = online_config.write_concurrency
write_batch_size = online_config.write_batch_size
feast_project = config.project
with ThreadPool(processes=write_concurrency) as pool:
pool.map(
lambda b: self._write_minibatch(
client, feast_project, table, b, progress
),
self._to_minibatches(data, batch_size=write_batch_size),
)
@staticmethod
def _to_minibatches(data: ProtoBatch, batch_size) -> Iterator[ProtoBatch]:
"""
Split data into minibatches, making sure we stay under GCP datastore transaction size
limits.
"""
iterable = iter(data)
while True:
batch = list(itertools.islice(iterable, batch_size))
if len(batch) > 0:
yield batch
else:
break
@staticmethod
def _write_minibatch(
client,
project: str,
table: FeatureView,
data: Sequence[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
):
entities = []
for entity_key, features, timestamp, created_ts in data:
document_id = compute_entity_id(entity_key)
key = client.key(
"Project", project, "Table", table.name, "Row", document_id,
)
entity = datastore.Entity(
key=key, exclude_from_indexes=("created_ts", "event_ts", "values")
)
content_entity = datastore.Entity(
exclude_from_indexes=tuple(features.keys())
)
for k, v in features.items():
content_entity[k] = v.SerializeToString()
entity["key"] = entity_key.SerializeToString()
entity["values"] = content_entity
entity["event_ts"] = utils.make_tzaware(timestamp)
entity["created_ts"] = (
utils.make_tzaware(created_ts) if created_ts is not None else None
)
entities.append(entity)
with client.transaction():
client.put_multi(entities)
if progress:
progress(len(entities))
@log_exceptions_and_usage(online_store="datastore")
def online_read(
self,
config: RepoConfig,
table: FeatureView,
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
online_config = config.online_store
assert isinstance(online_config, DatastoreOnlineStoreConfig)
client = self._get_client(online_config)
feast_project = config.project
keys: List[Key] = []
result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
for entity_key in entity_keys:
document_id = compute_entity_id(entity_key)
key = client.key(
"Project", feast_project, "Table", table.name, "Row", document_id
)
keys.append(key)
# NOTE: get_multi doesn't return values in the same order as the keys in the request.
# Also, len(values) can be less than len(keys) in the case of missing values.
with tracing_span(name="remote_call"):
values = client.get_multi(keys)
values_dict = {v.key: v for v in values} if values is not None else {}
for key in keys:
if key in values_dict:
value = values_dict[key]
res = {}
for feature_name, value_bin in value["values"].items():
val = ValueProto()
val.ParseFromString(value_bin)
res[feature_name] = val
result.append((value["event_ts"], res))
else:
result.append((None, None))
return result
def _delete_all_values(client, key):
"""
Delete all data under the key path in datastore.
Creates and uses a queue of lists of entity keys, which are batch deleted
by multiple threads.
"""
class AtomicCounter(object):
# for tracking how many deletions have already occurred; not used outside this method
def __init__(self):
self.value = 0
self.lock = Lock()
def increment(self):
with self.lock:
self.value += 1
BATCH_SIZE = 500 # Dec 2021: delete_multi has a max size of 500: https://cloud.google.com/datastore/docs/concepts/limits
NUM_THREADS = 3
deletion_queue = Queue()
status_info_counter = AtomicCounter()
def worker(shared_counter):
while True:
try:
job = deletion_queue.get(block=False)
except Empty:
return
client.delete_multi(job)
shared_counter.increment()
LOGGER.debug(
f"batch deletions completed: {shared_counter.value} ({shared_counter.value * BATCH_SIZE} total entries) & outstanding queue size: {deletion_queue.qsize()}"
)
deletion_queue.task_done()
query = client.query(kind="Row", ancestor=key)
for page in query.fetch().pages:
deletion_queue.put([entity.key for entity in page])
for _ in range(NUM_THREADS):
Thread(target=worker, args=(status_info_counter,)).start()
deletion_queue.join()
def _initialize_client(
project_id: Optional[str], namespace: Optional[str]
) -> datastore.Client:
try:
client = datastore.Client(project=project_id, namespace=namespace,)
return client
except DefaultCredentialsError as e:
raise FeastProviderLoginError(
str(e)
+ '\nIt may be necessary to run "gcloud auth application-default login" if you would like to use your '
"local Google Cloud account "
)
class DatastoreTable(InfraObject):
"""
A Datastore table managed by Feast.
Attributes:
project: The Feast project of the table.
name: The name of the table.
project_id (optional): The GCP project id.
namespace (optional): Datastore namespace.
"""
project: str
project_id: Optional[str]
namespace: Optional[str]
def __init__(
self,
project: str,
name: str,
project_id: Optional[str] = None,
namespace: Optional[str] = None,
):
super().__init__(name)
self.project = project
self.project_id = project_id
self.namespace = namespace
def to_infra_object_proto(self) -> InfraObjectProto:
datastore_table_proto = self.to_proto()
return InfraObjectProto(
infra_object_class_type=DATASTORE_INFRA_OBJECT_CLASS_TYPE,
datastore_table=datastore_table_proto,
)
def to_proto(self) -> Any:
datastore_table_proto = DatastoreTableProto()
datastore_table_proto.project = self.project
datastore_table_proto.name = self.name
if self.project_id:
datastore_table_proto.project_id.value = self.project_id
if self.namespace:
datastore_table_proto.namespace.value = self.namespace
return datastore_table_proto
@staticmethod
def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:
datastore_table = DatastoreTable(
project=infra_object_proto.datastore_table.project,
name=infra_object_proto.datastore_table.name,
)
# Distinguish between null and empty string, since project_id and namespace are StringValues.
if infra_object_proto.datastore_table.HasField("project_id"):
datastore_table.project_id = (
infra_object_proto.datastore_table.project_id.value
)
if infra_object_proto.datastore_table.HasField("namespace"):
datastore_table.namespace = (
infra_object_proto.datastore_table.namespace.value
)
return datastore_table
@staticmethod
def from_proto(datastore_table_proto: DatastoreTableProto) -> Any:
datastore_table = DatastoreTable(
project=datastore_table_proto.project, name=datastore_table_proto.name,
)
# Distinguish between null and empty string, since project_id and namespace are StringValues.
if datastore_table_proto.HasField("project_id"):
datastore_table.project_id = datastore_table_proto.project_id.value
if datastore_table_proto.HasField("namespace"):
datastore_table.namespace = datastore_table_proto.namespace.value
return datastore_table
def update(self):
client = _initialize_client(self.project_id, self.namespace)
key = client.key("Project", self.project, "Table", self.name)
entity = datastore.Entity(
key=key, exclude_from_indexes=("created_ts", "event_ts", "values")
)
entity.update({"created_ts": datetime.utcnow()})
client.put(entity)
def teardown(self):
client = _initialize_client(self.project_id, self.namespace)
key = client.key("Project", self.project, "Table", self.name)
_delete_all_values(client, key)
# Delete the table metadata datastore entity
client.delete(key)
|
elliot/hyperoptimization/model_coordinator.py | pkaram/elliot | 175 | 12743169 | """
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
from types import SimpleNamespace
import typing as t
import numpy as np
import logging as pylog
from elliot.utils import logging
from hyperopt import STATUS_OK
class ModelCoordinator(object):
"""
This class handles the selection of hyperparameters for the hyperparameter tuning realized with HyperOpt.
"""
def __init__(self, data_objs, base: SimpleNamespace, params, model_class: t.ClassVar, test_fold_index: int):
"""
The constructor creates a Placeholder of the recommender model.
:param base: a SimpleNamespace that contains the configuration (main level) options
:param params: a SimpleNamespace that contains the hyper-parameters of the model
:param model_class: the class of the recommendation model
"""
self.logger = logging.get_logger(self.__class__.__name__, pylog.CRITICAL if base.config_test else pylog.DEBUG)
self.data_objs = data_objs
self.base = base
self.params = params
self.model_class = model_class
self.test_fold_index = test_fold_index
self.model_config_index = 0
def objective(self, args):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
sampled_namespace = SimpleNamespace(**args)
model_params = SimpleNamespace(**self.params[0].__dict__)
self.logger.info("Hyperparameter tuning exploration:")
for (k, v) in sampled_namespace.__dict__.items():
model_params.__setattr__(k, v)
self.logger.info(f"{k} set to {model_params.__getattribute__(k)}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Hyperparameter exploration number {self.model_config_index+1}")
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=model_params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
self.model_config_index += 1
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
def single(self):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
self.logger.info("Hyperparameters:")
for k, v in self.params.__dict__.items():
self.logger.info(f"{k} set to {v}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=self.params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
@staticmethod
def _average_results(results_list):
ks = list(results_list[0].keys())
eval_result_types = ["val_results", "test_results"]
metrics = list(results_list[0][ks[0]]["val_results"].keys())
return {k: {type_: {metric: np.average([fold_result[k][type_][metric]
for fold_result in results_list])
for metric in metrics}
for type_ in eval_result_types}
for k in ks}
|
src/RelativeDepthDataset.py | JasonQSY/YouTube3D | 102 | 12743178 | import numpy as np
import random
from utils import save_obj, load_obj
import torch
from torch.utils import data
import cv2
import os
import h5py
from ReDWebNet import resNet_data_preprocess
#######################################################################
##### ATTENTION:
##### When using this dataset, set the number of loading worker to 0.
##### There is problem using hdf5 and multi-threading together.
class RelativeDepthDataset(data.Dataset):
def __init__(self, csv_filename, height=240, width=320,
b_data_aug = False,
b_resnet_prep = False):
"""
b_data_aug is a dummy var, not used.
"""
super(RelativeDepthDataset, self).__init__()
print("=====================================================")
print "Using RelativeDepthDataset..."
self.parse_relative_depth_csv(csv_filename)
self.height = height
self.width = width
self.n_sample = len(self.img_names)
self.b_resnet_prep = b_resnet_prep
print "\t-(width, height): (%d, %d)" % (self.width, self.height)
print "\t-%s: %d samples" % (csv_filename, self.n_sample)
print "\t-Resnet data preprocessing:", self.b_resnet_prep
print("=====================================================")
def parse_csv_meta_data(self, csv_filename):
img_names = []
n_pairs = []
with open(csv_filename, 'r') as f:
f.readline()
while True:
dummy_info = f.readline()
if not dummy_info:
break
infos = dummy_info.split(',')
img_name, n_point = infos[0], int(infos[2])
img_names.append(img_name)
n_pairs.append(n_point)
for i in range(n_point):
f.readline()
n_pairs = np.array(n_pairs)
return img_names, n_pairs
def parse_relative_depth_csv(self, csv_filename):
hdf5_filename = csv_filename.replace('.csv', '.h5')
if not os.path.exists(hdf5_filename):
print "\tError: You need to have a hdf5 version of the csv file!"
else:
self.hdf5_handle = h5py.File(hdf5_filename, 'r')
name_filename = csv_filename.replace('.csv', '.meta')
if not os.path.exists(name_filename):
self.img_names, self.n_pairs = self.parse_csv_meta_data(csv_filename)
save_obj({"img_names":self.img_names, "n_pairs": self.n_pairs}, name_filename)
else:
temp = load_obj(name_filename)
self.img_names = temp["img_names"]
self.n_pairs = temp["n_pairs"]
def __getitem__(self, index):
# This data reader assumes that the target coordinates are in the
# same resolution as the input image, instead of the network input
# resolution!
# However, even though it resizes the input image, it does NOT
# resize the target accordingly!
# Therefore, there can only be ONE kind of input when training:
# 1. target = test = (240,320).
# When validating / testing:
# 1. target = test = (240,320)
# 2. target = test = (480,640)
color = cv2.imread(self.img_names[index])
orig_img_res = color.shape[:2]
color = cv2.resize(color, (self.width, self.height))
color = color.transpose(2, 0, 1).astype(np.float32) / 255.0
if self.b_resnet_prep:
color = resNet_data_preprocess(color)
n_pairs = self.n_pairs[index]
_hdf5_offset = int(5*index) #zero-indexed
target = self.hdf5_handle['/data'][_hdf5_offset:_hdf5_offset+5,0:n_pairs]
target[:4,:] = target[:4,:] - 1 # the coordinate in python starts from 0!!!!
return color, target.astype(np.int64), orig_img_res
def __len__(self):
return self.n_sample
def relative_depth_collate_fn(batch):
return (torch.stack([torch.from_numpy(b[0]) for b in batch], 0), [torch.from_numpy(b[1]) for b in batch], [b[2] for b in batch] )
|
rbtools/clients/errors.py | ngie-eign/rbtools | 113 | 12743190 | <reponame>ngie-eign/rbtools<filename>rbtools/clients/errors.py
"""Error definitions for SCMClient implementations."""
from __future__ import unicode_literals
class SCMError(Exception):
"""A generic error from an SCM."""
class AuthenticationError(Exception):
"""An error for when authentication fails."""
class CreateCommitError(Exception):
"""The creation of a commit has failed or was aborted."""
class MergeError(Exception):
"""An error for when merging two branches fails."""
class PushError(Exception):
"""An error for when pushing a branch to upstream fails."""
class AmendError(Exception):
"""An error for when amending a commit fails."""
class OptionsCheckError(Exception):
"""An error for when command line options are used incorrectly."""
class InvalidRevisionSpecError(Exception):
"""An error for when the specified revisions are invalid."""
class MinimumVersionError(Exception):
"""An error for when software doesn't meet version requirements."""
class TooManyRevisionsError(InvalidRevisionSpecError):
"""An error for when too many revisions were specified."""
def __init__(self):
"""Initialize the error."""
super(TooManyRevisionsError, self).__init__(
'Too many revisions specified')
class EmptyChangeError(Exception):
"""An error for when there are no changed files."""
def __init__(self):
"""Initialize the error."""
super(EmptyChangeError, self).__init__(
"Couldn't find any affected files for this change.")
|
kitsune/motidings/tests/__init__.py | AndrewDVXI/kitsune | 929 | 12743195 | from factory import fuzzy
from factory.django import DjangoModelFactory
from tidings.models import Watch
class WatchFactory(DjangoModelFactory):
class Meta:
model = Watch
event_type = "fooevent"
is_active = True
secret = fuzzy.FuzzyText(length=10)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.