ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a57e95861ca02e363da89e107d1d77c897829be
|
"""Test sobel vs gradient."""
import os
from typing import Tuple
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import src.constants as cst
import src.plot_utils.latex_style as lsty
import src.plot_utils.xarray_panels as xp
import src.time_wrapper as twr
from scipy import signal
def sobel_np(values: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Sobel operator on np array.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html
Args:
values (np.ndarray): values to differentiate.
Returns:
Tuple[np.ndarray, np.ndarray]: gx, gy
"""
sobel = np.array(
[
[1 + 1j, 0 + 2j, -1 + 1j],
[2 + 0j, 0 + 0j, -2 + 0j],
[1 - 1j, 0 - 2j, -1 - 1j],
]
) # Gx + j*Gy
grad = signal.convolve2d(values, sobel, boundary="symm", mode="same")
return np.real(grad), np.imag(grad)
@twr.timeit
def sobel_vs_grad() -> None:
"""
Sobel versus dimension.
"""
lsty.mpl_params()
ds = xr.open_dataset(cst.DEFAULT_NC)
da_temp = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX)
pc1_y: xr.DataArray = da_temp.isel(pca=0)
pc1_y.values = sobel_np(pc1_y.values)[1]
pc2_y: xr.DataArray = da_temp.isel(pca=1)
pc2_y.values = sobel_np(pc2_y.values)[1]
pc3_y: xr.DataArray = da_temp.isel(pca=2)
pc3_y.values = sobel_np(pc3_y.values)[1]
xp.sep_plots(
[pc1_y, pc2_y, pc3_y],
["$G_y$ * PC1", "$G_y$ * PC2", "$G_y$ * PC3"],
[[-40, 40], [-40, 40], [-40, 40]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pcy.png")
)
plt.clf()
pc1_x: xr.DataArray = da_temp.isel(pca=0)
pc1_x.values = sobel_np(pc1_x.values)[0]
pc2_x: xr.DataArray = da_temp.isel(pca=1)
pc2_x.values = sobel_np(pc2_x.values)[0]
pc3_x: xr.DataArray = da_temp.isel(pca=2)
pc3_x.values = sobel_np(pc3_x.values)[0]
xp.sep_plots(
[pc1_x, pc2_x, pc3_x],
["$G_x$ * PC1", "$G_x$ * PC2", "$G_x$ * PC3"],
[[-40, 40], [-40, 40], [-40, 40]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pcx.png")
)
plt.clf()
da_y = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.Y_COORD)
xp.sep_plots(
[da_y.isel(pca=0), da_y.isel(pca=1), da_y.isel(pca=2)],
["PC1 y-grad", "PC2 y-grad", "PC3 y-grad"],
[[-20, 20], [-20, 20], [-20, 20]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pc_y.png")
)
plt.clf()
da_x = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.X_COORD)
xp.sep_plots(
[da_x.isel(pca=0), da_x.isel(pca=1), da_x.isel(pca=2)],
["PC1 x-grad", "PC2 x-grad", "PC3 x-grad"],
[[-20, 20], [-20, 20], [-20, 20]],
)
plt.savefig(
os.path.join(cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_example_pc_x.png")
)
plt.clf()
def sobel_scharr_test() -> None:
"""Test scharr / sobel."""
da = xr.DataArray(np.random.randn(15, 30), dims=[cst.X_COORD, cst.Y_COORD])
# kernel = xr.DataArray(filter, dims=["kx", "ky"])
# da_new = da.rolling(XC=3, YC=3).construct(XC="kx", YC="ky").dot(kernel)
val = da.values
print("val", val)
# print(da_new)
scharr = np.array(
[
[-3 - 3j, 0 - 10j, +3 - 3j],
[-10 + 0j, 0 + 0j, +10 + 0j],
[-3 + 3j, 0 + 10j, +3 + 3j],
]
) # Gx + j*Gy
sobel = np.array(
[
[1 + 1j, 0 + 2j, -1 + 1j],
[2 + 0j, 0 + 0j, -2 + 0j],
[1 - 1j, 0 - 2j, -1 - 1j],
]
) # Gx + j*Gy
for filt in [sobel, scharr]:
grad = signal.convolve2d(val, filt, boundary="symm", mode="same")
gx = np.real(grad)
gy = np.imag(grad)
print(gx)
print(gy)
# print(grad)
_, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
ax_orig.imshow(val, cmap="gray")
ax_orig.set_title("Original")
ax_orig.set_axis_off()
ax_mag.imshow(np.absolute(grad), cmap="gray")
ax_mag.set_title("Gradient magnitude")
ax_mag.set_axis_off()
ax_ang.imshow(np.angle(grad), cmap="hsv") # hsv is cyclic, like angles
ax_ang.set_title("Gradient orientation")
ax_ang.set_axis_off()
# fig.show()
plt.savefig("example.png")
def grad_v() -> None:
"""Gradient in v direction."""
ds = xr.open_dataset(cst.DEFAULT_NC)
da_y = ds.PCA_VALUES.isel(time=cst.EXAMPLE_TIME_INDEX).differentiate(cst.Y_COORD)
xp.sep_plots(
[da_y.isel(pca=0), da_y.isel(pca=1), da_y.isel(pca=2)],
["PC1 y-grad", "PC2 y-grad", "PC3 y-grad"],
)
pc_y_grad_name = os.path.join(
cst.FIGURE_PATH, "RUN_" + cst.RUN_NAME + "_y_grad.png"
)
plt.savefig(pc_y_grad_name)
plt.clf()
if __name__ == "__main__":
sobel_vs_grad()
# python3 src/sobel.py
|
py
|
1a57e9fb7a7c33a1b960b77b07442ae74b353e4b
|
default_excluded_paths = ['/health/', '/api/notify/']
def excluded_path(path, excluded_paths):
excl_paths = excluded_paths.split(',') + default_excluded_paths
return (
path and
any([p for p in excl_paths if path.startswith(p)]))
|
py
|
1a57ed7abe9de9b81d31da48ae30b675edb6fead
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from oslo.config import cfg
from oslo import messaging
from sahara import conductor as c
from sahara import context
from sahara.openstack.common import log as logging
from sahara.plugins import base as plugin_base
from sahara.service.edp import job_manager
from sahara.service import trusts
from sahara.utils import general as g
from sahara.utils import rpc as rpc_utils
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
INFRA = None
def setup_ops(engine):
global INFRA
INFRA = engine
class LocalOps(object):
def provision_cluster(self, cluster_id):
context.spawn("cluster-creating-%s" % cluster_id,
_provision_cluster, cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
context.spawn("cluster-scaling-%s" % cluster_id,
_provision_scaled_cluster, cluster_id, node_group_id_map)
def terminate_cluster(self, cluster_id):
context.spawn("cluster-terminating-%s" % cluster_id,
_terminate_cluster, cluster_id)
def run_edp_job(self, job_execution_id):
context.spawn("Starting Job Execution %s" % job_execution_id,
_run_edp_job, job_execution_id)
class RemoteOps(rpc_utils.RPCClient):
def __init__(self):
target = messaging.Target(topic='sahara-ops', version='1.0')
super(RemoteOps, self).__init__(target)
def provision_cluster(self, cluster_id):
self.cast('provision_cluster', cluster_id=cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
self.cast('provision_scaled_cluster', cluster_id=cluster_id,
node_group_id_map=node_group_id_map)
def terminate_cluster(self, cluster_id):
self.cast('terminate_cluster', cluster_id=cluster_id)
def run_edp_job(self, job_execution_id):
self.cast('run_edp_job', job_execution_id=job_execution_id)
class OpsServer(rpc_utils.RPCServer):
def __init__(self):
target = messaging.Target(topic='sahara-ops', server=uuid.uuid4(),
version='1.0')
super(OpsServer, self).__init__(target)
def provision_cluster(self, cluster_id):
_provision_cluster(cluster_id)
def provision_scaled_cluster(self, cluster_id, node_group_id_map):
_provision_scaled_cluster(cluster_id, node_group_id_map)
def terminate_cluster(self, cluster_id):
_terminate_cluster(cluster_id)
def run_edp_job(self, job_execution_id):
_run_edp_job(job_execution_id)
def _prepare_provisioning(cluster_id):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
for nodegroup in cluster.node_groups:
conductor.node_group_update(
ctx, nodegroup,
{"image_username": INFRA.get_node_group_image_username(nodegroup)})
cluster = conductor.cluster_get(ctx, cluster_id)
return ctx, cluster, plugin
def _provision_cluster(cluster_id):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
if CONF.use_identity_api_v3 and cluster.is_transient:
trusts.create_trust(cluster)
# updating cluster infra
cluster = conductor.cluster_update(ctx, cluster,
{"status": "InfraUpdating"})
LOG.info(g.format_cluster_status(cluster))
plugin.update_infra(cluster)
# creating instances and configuring them
cluster = conductor.cluster_get(ctx, cluster_id)
INFRA.create_cluster(cluster)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# configure cluster
cluster = conductor.cluster_update(ctx, cluster, {"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
plugin.configure_cluster(cluster)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't configure cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# starting prepared and configured cluster
cluster = conductor.cluster_update(ctx, cluster, {"status": "Starting"})
LOG.info(g.format_cluster_status(cluster))
try:
plugin.start_cluster(cluster)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't start services for cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# cluster is now up and ready
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
# schedule execution pending job for cluster
for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
job_manager.run_job(je.id)
def _provision_scaled_cluster(cluster_id, node_group_id_map):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
# Decommissioning surplus nodes with the plugin
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Decommissioning"})
LOG.info(g.format_cluster_status(cluster))
instances_to_delete = []
for node_group in cluster.node_groups:
new_count = node_group_id_map[node_group.id]
if new_count < node_group.count:
instances_to_delete += node_group.instances[new_count:
node_group.count]
if instances_to_delete:
plugin.decommission_nodes(cluster, instances_to_delete)
# Scaling infrastructure
cluster = conductor.cluster_update(ctx, cluster, {"status": "Scaling"})
LOG.info(g.format_cluster_status(cluster))
instances = INFRA.scale_cluster(cluster, node_group_id_map)
# Setting up new nodes with the plugin
if instances:
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
instances = g.get_instances(cluster, instances)
plugin.scale_cluster(cluster, instances)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't scale cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
def _terminate_cluster(cluster_id):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
plugin.on_terminate_cluster(cluster)
INFRA.shutdown_cluster(cluster)
if CONF.use_identity_api_v3:
trusts.delete_trust(cluster)
conductor.cluster_destroy(ctx, cluster)
def _run_edp_job(job_execution_id):
job_manager.run_job(job_execution_id)
|
py
|
1a57ee3eb0d9a1a98589fb95487cf6759f0c11d9
|
from django.contrib import admin
from .models import Bot, TelegramUser
# Register your models here.
@admin.register(Bot)
class BotAdmin(admin.ModelAdmin):
fields = ['api_key', 'owner']
admin.site.register(TelegramUser)
|
py
|
1a57ef15e4b699557550df6c7e7829cca3a3552b
|
"""empty message
Revision ID: 4ea0f1d728b7
Revises:
Create Date: 2020-01-07 21:09:04.533411
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4ea0f1d728b7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('lotteries',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('red1', sa.Integer(), nullable=True),
sa.Column('red2', sa.Integer(), nullable=True),
sa.Column('red3', sa.Integer(), nullable=True),
sa.Column('red4', sa.Integer(), nullable=True),
sa.Column('red5', sa.Integer(), nullable=True),
sa.Column('red6', sa.Integer(), nullable=True),
sa.Column('blue', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('lotteries')
# ### end Alembic commands ###
|
py
|
1a57ef87a8b480020bbc4852b6c09a7a42e64325
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AccountFiltersOperations(object):
"""AccountFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.media.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AccountFilterCollection"]
"""List Account Filters.
List Account Filters in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccountFilterCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.media.models.AccountFilterCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilterCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AccountFilterCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.AccountFilter"]
"""Get an Account Filter.
Get the details of an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccountFilter"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
account_name, # type: str
filter_name, # type: str
parameters, # type: "_models.AccountFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.AccountFilter"
"""Create or update an Account Filter.
Creates or updates an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.AccountFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an Account Filter.
Deletes an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
filter_name, # type: str
parameters, # type: "_models.AccountFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.AccountFilter"
"""Update an Account Filter.
Updates an existing Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.AccountFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountFilter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
|
py
|
1a57efa620c1718240114e4aebf7cb42be9c31c1
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NamespaceNetworkRuleSetArgs', 'NamespaceNetworkRuleSet']
@pulumi.input_type
class NamespaceNetworkRuleSetArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
default_action: Optional[pulumi.Input[Union[str, 'DefaultAction']]] = None,
ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetIpRulesArgs']]]] = None,
trusted_service_access_enabled: Optional[pulumi.Input[bool]] = None,
virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetVirtualNetworkRulesArgs']]]] = None):
"""
The set of arguments for constructing a NamespaceNetworkRuleSet resource.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[Union[str, 'DefaultAction']] default_action: Default Action for Network Rule Set
:param pulumi.Input[Sequence[pulumi.Input['NWRuleSetIpRulesArgs']]] ip_rules: List of IpRules
:param pulumi.Input[bool] trusted_service_access_enabled: Value that indicates whether Trusted Service Access is Enabled or not.
:param pulumi.Input[Sequence[pulumi.Input['NWRuleSetVirtualNetworkRulesArgs']]] virtual_network_rules: List VirtualNetwork Rules
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if default_action is not None:
pulumi.set(__self__, "default_action", default_action)
if ip_rules is not None:
pulumi.set(__self__, "ip_rules", ip_rules)
if trusted_service_access_enabled is not None:
pulumi.set(__self__, "trusted_service_access_enabled", trusted_service_access_enabled)
if virtual_network_rules is not None:
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The Namespace name
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group within the azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> Optional[pulumi.Input[Union[str, 'DefaultAction']]]:
"""
Default Action for Network Rule Set
"""
return pulumi.get(self, "default_action")
@default_action.setter
def default_action(self, value: Optional[pulumi.Input[Union[str, 'DefaultAction']]]):
pulumi.set(self, "default_action", value)
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetIpRulesArgs']]]]:
"""
List of IpRules
"""
return pulumi.get(self, "ip_rules")
@ip_rules.setter
def ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetIpRulesArgs']]]]):
pulumi.set(self, "ip_rules", value)
@property
@pulumi.getter(name="trustedServiceAccessEnabled")
def trusted_service_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Value that indicates whether Trusted Service Access is Enabled or not.
"""
return pulumi.get(self, "trusted_service_access_enabled")
@trusted_service_access_enabled.setter
def trusted_service_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "trusted_service_access_enabled", value)
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetVirtualNetworkRulesArgs']]]]:
"""
List VirtualNetwork Rules
"""
return pulumi.get(self, "virtual_network_rules")
@virtual_network_rules.setter
def virtual_network_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NWRuleSetVirtualNetworkRulesArgs']]]]):
pulumi.set(self, "virtual_network_rules", value)
class NamespaceNetworkRuleSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_action: Optional[pulumi.Input[Union[str, 'DefaultAction']]] = None,
ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetIpRulesArgs']]]]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
trusted_service_access_enabled: Optional[pulumi.Input[bool]] = None,
virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetVirtualNetworkRulesArgs']]]]] = None,
__props__=None):
"""
Description of topic resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'DefaultAction']] default_action: Default Action for Network Rule Set
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetIpRulesArgs']]]] ip_rules: List of IpRules
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[bool] trusted_service_access_enabled: Value that indicates whether Trusted Service Access is Enabled or not.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetVirtualNetworkRulesArgs']]]] virtual_network_rules: List VirtualNetwork Rules
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NamespaceNetworkRuleSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Description of topic resource.
:param str resource_name: The name of the resource.
:param NamespaceNetworkRuleSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NamespaceNetworkRuleSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_action: Optional[pulumi.Input[Union[str, 'DefaultAction']]] = None,
ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetIpRulesArgs']]]]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
trusted_service_access_enabled: Optional[pulumi.Input[bool]] = None,
virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NWRuleSetVirtualNetworkRulesArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NamespaceNetworkRuleSetArgs.__new__(NamespaceNetworkRuleSetArgs)
__props__.__dict__["default_action"] = default_action
__props__.__dict__["ip_rules"] = ip_rules
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["trusted_service_access_enabled"] = trusted_service_access_enabled
__props__.__dict__["virtual_network_rules"] = virtual_network_rules
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/v20180101preview:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-native:eventhub:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-nextgen:eventhub:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-native:eventhub/v20170401:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-native:eventhub/v20210101preview:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-nextgen:eventhub/v20210101preview:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-native:eventhub/v20210601preview:NamespaceNetworkRuleSet"), pulumi.Alias(type_="azure-nextgen:eventhub/v20210601preview:NamespaceNetworkRuleSet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NamespaceNetworkRuleSet, __self__).__init__(
'azure-native:eventhub/v20180101preview:NamespaceNetworkRuleSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NamespaceNetworkRuleSet':
"""
Get an existing NamespaceNetworkRuleSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NamespaceNetworkRuleSetArgs.__new__(NamespaceNetworkRuleSetArgs)
__props__.__dict__["default_action"] = None
__props__.__dict__["ip_rules"] = None
__props__.__dict__["name"] = None
__props__.__dict__["trusted_service_access_enabled"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_network_rules"] = None
return NamespaceNetworkRuleSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> pulumi.Output[Optional[str]]:
"""
Default Action for Network Rule Set
"""
return pulumi.get(self, "default_action")
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> pulumi.Output[Optional[Sequence['outputs.NWRuleSetIpRulesResponse']]]:
"""
List of IpRules
"""
return pulumi.get(self, "ip_rules")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="trustedServiceAccessEnabled")
def trusted_service_access_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Value that indicates whether Trusted Service Access is Enabled or not.
"""
return pulumi.get(self, "trusted_service_access_enabled")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> pulumi.Output[Optional[Sequence['outputs.NWRuleSetVirtualNetworkRulesResponse']]]:
"""
List VirtualNetwork Rules
"""
return pulumi.get(self, "virtual_network_rules")
|
py
|
1a57efb187529318c31e764dd1a026b10209d028
|
def bsearch_rightmost(array, x):
left = 0
right = len(array)
while left < right:
m = left + (right - left) // 2
if array[m] <= x:
left = m + 1
else:
right = m
return left - 1
lst = [11, 11, 22, 33, 44]
print(bsearch_rightmost(lst, 11))
|
py
|
1a57f1248dbdd5d9ce7743a0963bb57b7d157413
|
"""converted from ..\fonts\CGA_PRAVETZ__8x8.bin """
WIDTH = 8
HEIGHT = 8
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x78\x78\x30\x30\x00\x30\x00'\
b'\x6c\x6c\x6c\x00\x00\x00\x00\x00'\
b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00'\
b'\x30\x7c\xc0\x78\x0c\xf8\x30\x00'\
b'\x00\xc6\xcc\x18\x30\x66\xc6\x00'\
b'\x38\x6c\x38\x76\xdc\xcc\x76\x00'\
b'\x60\x60\xc0\x00\x00\x00\x00\x00'\
b'\x18\x30\x60\x60\x60\x30\x18\x00'\
b'\x60\x30\x18\x18\x18\x30\x60\x00'\
b'\x00\x66\x3c\xff\x3c\x66\x00\x00'\
b'\x00\x30\x30\xfc\x30\x30\x00\x00'\
b'\x00\x00\x00\x00\x00\x30\x30\x60'\
b'\x00\x00\x00\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x30\x30\x00'\
b'\x06\x0c\x18\x30\x60\xc0\x80\x00'\
b'\x7c\xc6\xce\xde\xf6\xe6\x7c\x00'\
b'\x30\x70\x30\x30\x30\x30\xfc\x00'\
b'\x78\xcc\x0c\x38\x60\xcc\xfc\x00'\
b'\x78\xcc\x0c\x38\x0c\xcc\x78\x00'\
b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00'\
b'\xfc\xc0\xf8\x0c\x0c\xcc\x78\x00'\
b'\x38\x60\xc0\xf8\xcc\xcc\x78\x00'\
b'\xfc\xcc\x0c\x18\x30\x30\x30\x00'\
b'\x78\xcc\xcc\x78\xcc\xcc\x78\x00'\
b'\x78\xcc\xcc\x7c\x0c\x18\x70\x00'\
b'\x00\x30\x30\x00\x00\x30\x30\x00'\
b'\x00\x30\x30\x00\x00\x30\x30\x60'\
b'\x18\x30\x60\xc0\x60\x30\x18\x00'\
b'\x00\x00\xfc\x00\x00\xfc\x00\x00'\
b'\x60\x30\x18\x0c\x18\x30\x60\x00'\
b'\x78\xcc\x0c\x18\x30\x00\x30\x00'\
b'\x7c\xc6\xde\xde\xde\xc0\x78\x00'\
b'\x30\x78\xcc\xcc\xfc\xcc\xcc\x00'\
b'\xfc\x66\x66\x7c\x66\x66\xfc\x00'\
b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00'\
b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00'\
b'\xfe\x62\x68\x78\x68\x62\xfe\x00'\
b'\xfe\x62\x68\x78\x68\x60\xf0\x00'\
b'\x3c\x66\xc0\xc0\xce\x66\x3e\x00'\
b'\xcc\xcc\xcc\xfc\xcc\xcc\xcc\x00'\
b'\x78\x30\x30\x30\x30\x30\x78\x00'\
b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00'\
b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00'\
b'\xf0\x60\x60\x60\x62\x66\xfe\x00'\
b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00'\
b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00'\
b'\x38\x6c\xc6\xc6\xc6\x6c\x38\x00'\
b'\xfc\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x78\xcc\xcc\xcc\xdc\x78\x1c\x00'\
b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00'\
b'\x78\xcc\x60\x30\x18\xcc\x78\x00'\
b'\xfc\xb4\x30\x30\x30\x30\x78\x00'\
b'\xcc\xcc\xcc\xcc\xcc\xcc\xfc\x00'\
b'\xcc\xcc\xcc\xcc\xcc\x78\x30\x00'\
b'\xc6\xc6\xc6\xd6\xfe\xee\xc6\x00'\
b'\xc6\xc6\x6c\x38\x38\x6c\xc6\x00'\
b'\xcc\xcc\xcc\x78\x30\x30\x78\x00'\
b'\xfe\xc6\x8c\x18\x32\x66\xfe\x00'\
b'\x78\x60\x60\x60\x60\x60\x78\x00'\
b'\xc0\x60\x30\x18\x0c\x06\x02\x00'\
b'\x78\x18\x18\x18\x18\x18\x78\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff'\
b'\x30\x30\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x0c\x7c\xcc\x76\x00'\
b'\xe0\x60\x60\x7c\x66\x66\xdc\x00'\
b'\x00\x00\x78\xcc\xc0\xcc\x78\x00'\
b'\x1c\x0c\x0c\x7c\xcc\xcc\x76\x00'\
b'\x00\x00\x78\xcc\xfc\xc0\x78\x00'\
b'\x38\x6c\x60\xf0\x60\x60\xf0\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8'\
b'\xe0\x60\x6c\x76\x66\x66\xe6\x00'\
b'\x30\x00\x70\x30\x30\x30\x78\x00'\
b'\x0c\x00\x0c\x0c\x0c\xcc\xcc\x78'\
b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00'\
b'\x70\x30\x30\x30\x30\x30\x78\x00'\
b'\x00\x00\xcc\xfe\xfe\xd6\xc6\x00'\
b'\x00\x00\xf8\xcc\xcc\xcc\xcc\x00'\
b'\x00\x00\x78\xcc\xcc\xcc\x78\x00'\
b'\x00\x00\xdc\x66\x66\x7c\x60\xf0'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e'\
b'\x00\x00\xdc\x76\x66\x60\xf0\x00'\
b'\x00\x00\x7c\xc0\x78\x0c\xf8\x00'\
b'\x10\x30\x7c\x30\x30\x34\x18\x00'\
b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\xcc\xcc\xcc\x78\x30\x00'\
b'\x00\x00\xc6\xd6\xfe\xfe\x6c\x00'\
b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00'\
b'\x00\x00\xcc\xcc\xcc\x7c\x0c\xf8'\
b'\x00\x00\xfc\x98\x30\x64\xfc\x00'\
b'\x1c\x30\x30\xe0\x30\x30\x1c\x00'\
b'\x18\x18\x18\x00\x18\x18\x18\x00'\
b'\xe0\x30\x30\x1c\x30\x30\xe0\x00'\
b'\x76\xdc\x00\x00\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00'\
FONT = memoryview(_FONT)
|
py
|
1a57f145aa5971dee759e7038b823f0d923f16e1
|
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.logical_switch_groups import LogicalSwitchGroups
from hpOneView.resources.resource import Resource, ResourceHelper, ResourcePatchMixin
class LogicalSwitchGroupsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._lsg = LogicalSwitchGroups(self.connection)
self.uri = "/rest/logical-switch-groups/dce3fc90-873e-48f7-8340-cc927d625b16"
self._lsg.data = {"uri": self.uri}
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._lsg.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(count=500, filter='name=TestName',
sort='name:ascending', start=2)
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._lsg.get_all()
mock_get_all.assert_called_once_with(count=-1, filter=u'', sort=u'', start=0)
@mock.patch.object(ResourceHelper, 'create')
def test_create_called_once(self, mock_create):
lsg = {
"name": "OneView Test Logical Switch Group",
"switchMapTemplate": {
"switchMapEntryTemplates": [{
"logicalLocation": {
"locationEntries": [{
"relativeValue": 1,
"type": "StackingMemberId"
}]
},
"permittedSwitchTypeUri": "/rest/switch-types/46d7ffad-4424-4e36-acf3-b379c3116206"
}]
}
}
self._lsg.create(lsg, timeout=70)
mock_create.assert_called_once_with(lsg, None, 70, None, False)
@mock.patch.object(Resource, 'ensure_resource_data')
@mock.patch.object(ResourceHelper, 'update')
def test_update_called_once(self, mock_update, mock_ensure_client):
lsg = {
"name": "Updated name",
"switchMapTemplate": {
"switchMapEntryTemplates": [{
"logicalLocation": {
"locationEntries": [{
"relativeValue": 1,
"type": "StackingMemberId"
}]
},
"permittedSwitchTypeUri": "/rest/switch-types/46d7ffad-4424-4e36-acf3-b379c3116206"
}]
},
"uri": self.uri
}
self._lsg.update(lsg, timeout=70)
mock_update.assert_called_once_with(lsg, self.uri, False, 70, None)
@mock.patch.object(ResourceHelper, 'delete')
def test_delete_called_once(self, mock_delete):
self._lsg.delete(force=True, timeout=50)
mock_delete.assert_called_once_with(self.uri, custom_headers=None, force=True, timeout=50)
@mock.patch.object(ResourceHelper, 'delete')
def test_delete_called_once_with_defaults(self, mock_delete):
self._lsg.delete()
mock_delete.assert_called_once_with(self.uri, custom_headers=None, force=False, timeout=-1)
@mock.patch.object(ResourcePatchMixin, 'patch_request')
def test_patch_should_use_user_defined_values(self, mock_patch):
mock_patch.return_value = {}
self._lsg.patch('replace',
'/scopeUris', ['rest/fake/scope123'], 1)
mock_patch.assert_called_once_with('/rest/logical-switch-groups/dce3fc90-873e-48f7-8340-cc927d625b16',
body=[{'path': '/scopeUris',
'value': ['rest/fake/scope123'],
'op': 'replace'}],
custom_headers=1, timeout=-1)
|
py
|
1a57f1f69a5ac2719abe10405e5ad2671fe804b9
|
"""Pipeline implementation.
This module provides methods to run pipelines of functions with dependencies
and handle their results.
"""
from copy import deepcopy
from importlib import import_module
import builtins
import networkx
__all__ = [
'Pipeline',
]
def _yaml_tag(loader, tag, node):
'''handler for generic YAML tags
tags are stored as a tuple `(tag, value)`
'''
import yaml
if isinstance(node, yaml.ScalarNode):
value = loader.construct_scalar(node)
elif isinstance(node, yaml.SequenceNode):
value = loader.construct_sequence(node)
elif isinstance(node, yaml.MappingNode):
value = loader.construct_mapping(node)
# tags without arguments have empty string value
if value == '':
return tag,
return tag, value
class Pipeline:
r'''Class for running pipelines.
This is the main class for running pipelines of functions with dependencies
and using their results to generate variables and tables.
'''
@classmethod
def read(cls, filename):
'''Read a pipeline from a configuration file.
Parameters
----------
filename : str
The name of the configuration file.
'''
import yaml
# register custom tags
yaml.SafeLoader.add_multi_constructor('!', _yaml_tag)
# read the file
with open(filename, 'r') as stream:
config = yaml.safe_load(stream) or {}
# construct the pipeline
return cls(config)
def __init__(self, configuration):
'''Construct the pipeline.
Parameters
----------
configuration : dict-like
Configuration for the pipeline.
Notes
-----
Each step in the pipeline is configured by a dictionary specifying
a variable name and the associated value.
A value that is a tuple `(function_name, function_args)` specifies that
the value will be the result of a function call. The first item is the
fully qualified function name, and the second value specifies the
function arguments.
If a function argument is a tuple `(variable_name,)`, it refers to the
values of previous step in the pipeline. The tuple item must be the
name of the reference variable.
'configuration' should contain the name and configuration of each
variable and/or an entry named 'tables'. 'tables' should contain a set
of nested dictionaries, first containing the name of each table, then
the name and configuration of each column and optionally an entry named
'init' with a configuration that initialises the table. If 'init' is
not specificed the table will be initialised as an empty astropy Table
by default.
See [1]_ for examples of pipeline configurations in YAML format.
References
----------
.. [1] https://github.com/skypyproject/skypy/tree/master/examples
'''
# config contains settings for all variables and table initialisation
# table_config contains settings for all table columns
self.config = deepcopy(configuration)
self.table_config = self.config.pop('tables', {})
default_table = ('astropy.table.Table',)
self.config.update({k: v.pop('.init', default_table)
for k, v in self.table_config.items()})
# Create a Directed Acyclic Graph of all jobs and dependencies
self.dag = networkx.DiGraph()
# - add nodes for each variable, table and column
# - add edges for the table dependencies
# - keep track where functions need to be called
# functions are tuples (function name, [function args])
functions = {}
for job, settings in self.config.items():
self.dag.add_node(job)
if isinstance(settings, tuple):
functions[job] = settings
for table, columns in self.table_config.items():
table_complete = '.'.join((table, 'complete'))
self.dag.add_node(table_complete)
self.dag.add_edge(table, table_complete)
for column, settings in columns.items():
job = '.'.join((table, column))
self.dag.add_node(job)
self.dag.add_edge(table, job)
self.dag.add_edge(job, table_complete)
if isinstance(settings, tuple):
functions[job] = settings
# go through functions and add edges for all references
for job, settings in functions.items():
# settings are tuple (function, [args])
args = settings[1] if len(settings) > 1 else None
# get dependencies from arguments
deps = self.get_deps(args)
# add edges for dependencies
for d in deps:
if self.dag.has_node(d):
self.dag.add_edge(d, job)
else:
raise KeyError(d)
def execute(self):
r'''Run a pipeline.
This function runs a pipeline of functions to generate variables and
the columns of a set of tables. It uses a Directed Acyclic Graph to
determine a non-blocking order of execution that resolves any
dependencies, see [1]_.
References
----------
.. [1] https://networkx.github.io/documentation/stable/
'''
for job in networkx.topological_sort(self.dag):
if job.endswith('.complete'):
continue
elif job in self.config:
settings = self.config.get(job)
setattr(self, job, self.get_value(settings))
else:
table, column = job.split('.')
settings = self.table_config[table][column]
getattr(self, table)[column] = self.get_value(settings)
def write(self, file_format=None, overwrite=False):
r'''Write pipeline results to disk.
Parameters
----------
file_format : str
File format used to write tables. Files are written using the
Astropy unified file read/write interface; see [1]_ for supported
file formats. If None (default) tables are not written to file.
overwrite : bool
Whether to overwrite any existing files without warning.
References
----------
.. [1] https://docs.astropy.org/en/stable/io/unified.html
'''
if file_format:
for table in self.table_config.keys():
filename = '.'.join((table, file_format))
getattr(self, table).write(filename, overwrite=overwrite)
def get_value(self, value):
'''return the value of a field
tuples specify function calls `(function name, function args)`
'''
# check if not function
if not isinstance(value, tuple):
# check for reference
if isinstance(value, str) and value[0] == '$':
return self[value[1:]]
else:
# plain value
return value
# value is tuple (function name, [function args])
name = value[0]
args = value[1] if len(value) > 1 else []
# Import function
function_path = name.split('.')
module = builtins
for i, key in enumerate(function_path[:-1]):
if not hasattr(module, key):
module_name = '.'.join(function_path[:i+1])
try:
module = import_module(module_name)
except ModuleNotFoundError:
raise ModuleNotFoundError(module_name)
else:
module = getattr(module, key)
function = getattr(module, function_path[-1])
# Parse arguments
parsed_args = self.get_args(args)
# Call function
if isinstance(args, dict):
result = function(**parsed_args)
elif isinstance(args, list):
result = function(*parsed_args)
else:
result = function(parsed_args)
return result
def get_args(self, args):
'''parse function arguments
strings beginning with `$` are references to other fields
'''
if isinstance(args, dict):
# recurse kwargs
return {k: self.get_args(v) for k, v in args.items()}
elif isinstance(args, list):
# recurse args
return [self.get_args(a) for a in args]
else:
# return value
return self.get_value(args)
def get_deps(self, args):
'''get dependencies from function args
returns a list of all references found
'''
if isinstance(args, str) and args[0] == '$':
# reference
return [args[1:]]
elif isinstance(args, tuple):
# recurse on function arguments
return self.get_deps(args[1]) if len(args) > 1 else []
elif isinstance(args, dict):
# get explicit dependencies
deps = args.pop('.depends', [])
# turn a single value into a list
if isinstance(deps, str) or not isinstance(deps, list):
deps = [deps]
# recurse remaining kwargs
return deps + sum([self.get_deps(a) for a in args.values()], [])
elif isinstance(args, list):
# recurse args
return sum([self.get_deps(a) for a in args], [])
else:
# no reference
return []
def __getitem__(self, label):
name, _, key = label.partition('.')
item = getattr(self, name)
return item[key] if key else item
|
py
|
1a57f32585146624a63c458569de9b61ebd4b099
|
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer as xav
import numpy as np
class LSTM_net():
def __init__(self, obs_size, nb_hidden=128, action_size=16):
self.obs_size = obs_size
self.nb_hidden = nb_hidden
self.action_size = action_size
def __graph__():
tf.reset_default_graph()
# entry points
features_ = tf.placeholder(tf.float32, [1, obs_size], name='input_features')
init_state_c_, init_state_h_ = ( tf.placeholder(tf.float32, [1, nb_hidden]) for _ in range(2) )
action_ = tf.placeholder(tf.int32, name='ground_truth_action')
action_mask_ = tf.placeholder(tf.float32, [action_size], name='action_mask')
# input projection
Wi = tf.get_variable('Wi', [obs_size, nb_hidden],
initializer=xav())
bi = tf.get_variable('bi', [nb_hidden],
initializer=tf.constant_initializer(0.))
# add relu/tanh here if necessary
projected_features = tf.matmul(features_, Wi) + bi
lstm_f = tf.contrib.rnn.LSTMCell(nb_hidden, state_is_tuple=True)
lstm_op, state = lstm_f(inputs=projected_features, state=(init_state_c_, init_state_h_))
# reshape LSTM's state tuple (2,128) -> (1,256)
state_reshaped = tf.concat(axis=1, values=(state.c, state.h))
# output projection
Wo = tf.get_variable('Wo', [2*nb_hidden, action_size],
initializer=xav())
bo = tf.get_variable('bo', [action_size],
initializer=tf.constant_initializer(0.))
# get logits
logits = tf.matmul(state_reshaped, Wo) + bo
# probabilities
# normalization : elemwise multiply with action mask
probs = tf.multiply(tf.squeeze(tf.nn.softmax(logits)), action_mask_)
# prediction
prediction = tf.arg_max(probs, dimension=0)
# loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=action_)
# train op
train_op = tf.train.AdadeltaOptimizer(0.1).minimize(loss)
# attach symbols to self
self.loss = loss
self.prediction = prediction
self.probs = probs
self.logits = logits
self.state = state
self.train_op = train_op
# attach placeholders
self.features_ = features_
self.init_state_c_ = init_state_c_
self.init_state_h_ = init_state_h_
self.action_ = action_
self.action_mask_ = action_mask_
# build graph
__graph__()
# start a session; attach to self
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.sess = sess
# set init state to zeros
self.init_state_c = np.zeros([1,self.nb_hidden], dtype=np.float32)
self.init_state_h = np.zeros([1,self.nb_hidden], dtype=np.float32)
# forward propagation
def forward(self, features, action_mask):
# forward
probs, prediction, state_c, state_h = self.sess.run( [self.probs, self.prediction, self.state.c, self.state.h],
feed_dict = {
self.features_ : features.reshape([1,self.obs_size]),
self.init_state_c_ : self.init_state_c,
self.init_state_h_ : self.init_state_h,
self.action_mask_ : action_mask
})
# maintain state
self.init_state_c = state_c
self.init_state_h = state_h
# return argmax
return prediction
# training
def train_step(self, features, action, action_mask):
_, loss_value, state_c, state_h = self.sess.run( [self.train_op, self.loss, self.state.c, self.state.h],
feed_dict = {
self.features_ : features.reshape([1, self.obs_size]),
self.action_ : [action],
self.init_state_c_ : self.init_state_c,
self.init_state_h_ : self.init_state_h,
self.action_mask_ : action_mask
})
# maintain state
self.init_state_c = state_c
self.init_state_h = state_h
return loss_value
def reset_state(self):
# set init state to zeros
self.init_state_c = np.zeros([1,self.nb_hidden], dtype=np.float32)
self.init_state_h = np.zeros([1,self.nb_hidden], dtype=np.float32)
# save session to checkpoint
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, 'ckpt/hcn.ckpt', global_step=0)
print('\n:: saved to ckpt/hcn.ckpt \n')
# restore session from checkpoint
def restore(self):
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state('ckpt/')
if ckpt and ckpt.model_checkpoint_path:
print('\n:: restoring checkpoint from', ckpt.model_checkpoint_path, '\n')
saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
print('\n:: <ERR> checkpoint not found! \n')
|
py
|
1a57f33090650e43c714539d41a3e255f132bac7
|
# -*- coding: utf-8 -*-
"""
Django settings for psppi project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (psppi/config/settings/common.py - 3 = psppi/)
APPS_DIR = ROOT_DIR.path('psppi')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'corsheaders',
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework',
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'psppi.users.apps.UsersConfig',
'psppi.responses',
'psppi.questions'
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""T""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///psppi'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(ROOT_DIR.path('dist')),
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('dist'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config-django.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config-django.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'psppi.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'psppi.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
'127.0.0.1:3000',
'attacusatlas.io'
)
|
py
|
1a57f550dfbaeb1643d9854871ec2398b9aec89e
|
#
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
"""DC Gateway Feature Implementation."""
from builtins import str
from collections import OrderedDict
import copy
from abstract_device_api.abstract_device_xsd import Feature, Firewall, \
FirewallFilter, From, NatRule, NatRules, RoutingInstance, Term, Then
import gevent
from .db import GlobalVRouterConfigDM, LogicalRouterDM, PhysicalInterfaceDM, \
RoutingInstanceDM, VirtualMachineInterfaceDM, VirtualNetworkDM, \
VirtualPortGroupDM
from .dm_utils import DMUtils
from .feature_base import FeatureBase
class JunosInterface(object):
def __init__(self, if_name, if_type, if_vlan_tag=0, if_ip=None,
li_uuid=None, port_vlan_tag=4094, vpg_obj=None):
"""Initialize JunosInterface init params."""
self.li_uuid = li_uuid
self.name = if_name
self.if_type = if_type
self.vlan_tag = if_vlan_tag
ifparts = if_name.split('.')
self.ifd_name = ifparts[0]
self.unit = ifparts[1]
self.ip = if_ip
# end __init__
def is_untagged(self):
return not self.vlan_tag
# end is_untagged
# end class JunosInterface
class DcGatewayFeature(FeatureBase):
@classmethod
def feature_name(cls):
return 'dc-gateway'
# end feature_name
def __init__(self, logger, physical_router, configs):
"""Create dc-gateway feature abstract config for public VN and LR.
It prepares dc-gateway feature abstract config
- retirve list of vn (internal vn and tenant vn) which is marked as
public vn or all vn of public lr.
- walk through this vn and create respective ri marked as
public_network True. for mx PR, it also creates firewall and
physical_interface abstract config for mx fip and snat
: Args:
: self: current instance of class
: logger: logger to be use to log messages
: physical_router: current PR of feature config
: configs: feature configs
: return: None
:
"""
self.ri_map = {}
self.firewall_config = None
self.pi_map = OrderedDict()
self.inet4_forwarding_filter = None
self.inet6_forwarding_filter = None
super(DcGatewayFeature, self).__init__(
logger, physical_router, configs)
# end __init__
def _get_export_import_set(self, vn_obj, ri_obj):
export_set = None
import_set = None
if vn_obj.route_targets:
export_set = vn_obj.route_targets & ri_obj.export_targets
import_set = vn_obj.route_targets & ri_obj.import_targets
else:
export_set = copy.copy(ri_obj.export_targets)
import_set = copy.copy(ri_obj.import_targets)
for ri2_id in ri_obj.routing_instances:
ri2 = RoutingInstanceDM.get(ri2_id)
if ri2 is None:
continue
import_set |= ri2.export_targets
return export_set, import_set
# end _get_export_import_set
def _add_ri_prefixes(self, vn, router_external, interfaces, prefixes, ri):
for interface in interfaces:
self._add_ref_to_list(
ri.get_interfaces(), interface.name)
if len(prefixes) < 1:
return
# for DC-gateway, skip routed vn prefix for public LR
routed_vn_prefix = set()
if vn:
routed_vn_prefix = vn.get_prefixes(
pr_uuid=self._physical_router.uuid,
only_routedvn_prefix=True)
for prefix in prefixes:
ri.add_static_routes(
self._get_route_for_cidr(prefix))
if router_external and prefix in routed_vn_prefix:
continue
ri.add_prefixes(self._get_subnet_for_cidr(prefix))
# if vn internal then also add rib interfaces since in
# overlay_networking we use this to filter out irb interfaces to set.
if router_external and '_contrail_lr_internal_vn_' in vn.name:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(vn.name)
lr = LogicalRouterDM.get(lr_uuid)
if lr:
vn_list = lr.get_connected_networks(
include_internal=False,
pr_uuid=self._physical_router.uuid)
for vn in vn_list:
vn_obj = VirtualNetworkDM.get(vn)
if vn_obj and vn_obj.vn_network_id is not None:
irb_name = "irb." + str(vn_obj.vn_network_id)
self._add_ref_to_list(
ri.get_routing_interfaces(), irb_name)
# end _add_ri_prefixes
def _add_inet_public_vrf_filter(cls, firewall_config, inet_type):
firewall_config.set_family(inet_type)
f = FirewallFilter(name=DMUtils.make_public_vrf_filter_name(inet_type))
f.set_comment(DMUtils.public_vrf_filter_comment())
firewall_config.add_firewall_filters(f)
term = Term(name="default-term", then=Then(accept_or_reject=True))
f.add_terms(term)
return f
# end _add_inet_public_vrf_filter
def _add_inet_filter_term(self, ri_name, prefixes, inet_type):
if inet_type == 'inet6':
prefixes = DMUtils.get_ipv6_prefixes(prefixes)
else:
prefixes = DMUtils.get_ipv4_prefixes(prefixes)
from_ = From()
for prefix in prefixes:
from_.add_destination_address(self._get_subnet_for_cidr(prefix))
then_ = Then()
then_.add_routing_instance(ri_name)
return Term(name=DMUtils.make_vrf_term_name(ri_name),
fromxx=from_, then=then_)
# end _add_inet_filter_term
def _check_term_exist(self, new_term_name):
for t in self.inet4_forwarding_filter.get_terms() or []:
if t.name == new_term_name:
return True
return False
# end _check_term_exist
def _add_ri_vrf_firewall_config(self, prefixes, ri):
has_ipv6_prefixes = DMUtils.has_ipv6_prefixes(prefixes)
has_ipv4_prefixes = DMUtils.has_ipv4_prefixes(prefixes)
term_ri_name = ri.get_name()
if ri.get_virtual_network_is_internal():
term_ri_name = ri.get_description()
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
if has_ipv4_prefixes and not self.inet4_forwarding_filter:
# create single instance inet4 filter
self.inet4_forwarding_filter = self. \
_add_inet_public_vrf_filter(self.firewall_config, "inet")
if has_ipv6_prefixes and not self.inet6_forwarding_filter:
# create single instance inet6 filter
self.inet6_forwarding_filter = self. \
_add_inet_public_vrf_filter(self.firewall_config, "inet6")
if self._check_term_exist(DMUtils.make_vrf_term_name(term_ri_name)):
return
if has_ipv4_prefixes:
# add terms to inet4 filter
term = self._add_inet_filter_term(
term_ri_name, prefixes, "inet4")
# insert before the last term
terms = self.inet4_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet4_forwarding_filter.set_terms(terms)
if has_ipv6_prefixes:
# add terms to inet6 filter
term = self._add_inet_filter_term(
term_ri_name, prefixes, "inet6")
# insert before the last term
terms = self.inet6_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet6_forwarding_filter.set_terms(terms)
# end _add_ri_firewall_config
def _add_routing_instance(self, ri_conf):
gevent.idle()
ri_name = ri_conf.get("ri_name")
vn = ri_conf.get("vn")
is_l2 = ri_conf.get("is_l2", False)
is_l2_l3 = ri_conf.get("is_l2_l3", False)
import_targets = ri_conf.get("import_targets", set())
export_targets = ri_conf.get("export_targets", set())
prefixes = ri_conf.get("prefixes") or []
gateways = ri_conf.get("gateways") or []
router_external = ri_conf.get("router_external", False)
interfaces = ri_conf.get("interfaces", [])
vni = ri_conf.get("vni", None)
fip_map = ri_conf.get("fip_map", None)
network_id = ri_conf.get("network_id", None)
is_internal_vn = True if '_contrail_lr_internal_vn_' in vn.name \
else False
encapsulation_priorities = ri_conf.get(
"encapsulation_priorities") or ["MPLSoGRE"]
highest_encapsulation = encapsulation_priorities[0]
ri = RoutingInstance(name=ri_name)
is_master_int_vn = False
if vn:
is_nat = True if fip_map else False
ri.set_comment(DMUtils.vn_ri_comment(vn, is_l2, is_l2_l3, is_nat,
router_external))
if is_internal_vn:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(
ri_name)
else:
if vn.logical_router is None:
# try updating logical router to handle DM restart
# vn.logical_router could be none as sequencing of
# locate object calls in device_manager.py
vn.set_logical_router(vn.fq_name[-1])
lr_uuid = vn.logical_router
if lr_uuid:
lr = LogicalRouterDM.get(lr_uuid)
if lr:
is_master_int_vn = lr.is_master
if is_internal_vn:
# set description only for interval VN/VRF
ri.set_description("__contrail_%s_%s" %
(lr.name, lr_uuid))
ri.set_is_master(is_master_int_vn)
ri.set_virtual_network_id(str(network_id))
ri.set_vxlan_id(str(vni))
ri.set_virtual_network_is_internal(is_internal_vn)
ri.set_is_public_network(router_external)
if is_l2_l3:
ri.set_virtual_network_mode('l2-l3')
elif is_l2:
ri.set_virtual_network_mode('l2')
if highest_encapsulation == "VXLAN":
ri.set_routing_instance_type("virtual-switch")
elif highest_encapsulation in ["MPLSoGRE", "MPLSoUDP"]:
ri.set_routing_instance_type("evpn")
else:
ri.set_virtual_network_mode('l3')
if not is_l2:
ri.set_routing_instance_type("vrf")
if fip_map is None and (router_external or not is_internal_vn):
# add RI prefixes for dc-gateway
self._add_ri_prefixes(vn, router_external, interfaces,
prefixes, ri)
if ri.get_routing_instance_type() != 'virtual-switch' and \
ri.get_virtual_network_mode() != 'l2':
self.ri_map[ri_name] = ri
# add irb physical interface and irb vni gateway settings for l2_l3
if self._is_gateway() and is_l2_l3 and not is_internal_vn:
__, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb')
intf_unit = self._add_or_lookup_li(
li_map, 'irb.' + str(network_id), network_id)
if len(gateways) > 0:
if vn.has_ipv6_subnet is True:
intf_unit.set_is_virtual_router(True)
intf_unit.set_comment(
DMUtils.vn_irb_comment(vn, False, is_l2_l3,
router_external))
for (irb_ip, gateway) in gateways:
if len(gateway) and gateway != '0.0.0.0':
intf_unit.set_gateway(gateway)
self._add_ip_address(intf_unit, irb_ip,
gateway=gateway)
else:
self._add_ip_address(intf_unit, irb_ip)
if (is_l2 and vni is not None and
self._is_evpn(self._physical_router)):
irb_name = 'irb.' + str(network_id)
self._add_ref_to_list(ri.get_routing_interfaces(), irb_name)
# add firewall config for public VRF
if router_external and is_l2 is False:
self._add_ri_vrf_firewall_config(prefixes, ri)
# add firewall config for DCI Network
if fip_map is not None:
self._add_ref_to_list(ri.get_interfaces(), interfaces[0].name)
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
f = FirewallFilter(
name=DMUtils.make_private_vrf_filter_name(ri_name))
f.set_comment(DMUtils.vn_firewall_comment(vn, "private"))
self.firewall_config.add_firewall_filters(f)
term = Term(name=DMUtils.make_vrf_term_name(ri_name))
from_ = From()
for fip_user_ip in list(fip_map.keys()):
from_.add_source_address(self._get_subnet_for_cidr(
fip_user_ip))
term.set_from(from_)
term.set_then(Then(routing_instance=[ri_name]))
f.add_terms(term)
__, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb')
intf_name = 'irb.' + str(network_id)
intf_unit = self._add_or_lookup_li(li_map, intf_name, network_id)
intf_unit.set_comment(DMUtils.vn_irb_fip_inet_comment(vn))
intf_unit.set_family("inet")
intf_unit.add_firewall_filters(
DMUtils.make_private_vrf_filter_name(ri_name))
self._add_ref_to_list(ri.get_routing_interfaces(), intf_name)
# fip services config
nat_rules = NatRules(allow_overlapping_nat_pools=True,
name=DMUtils.make_services_set_name(ri_name),
comment=DMUtils.service_set_comment(vn))
ri.set_nat_rules(nat_rules)
snat_rule = NatRule(
name=DMUtils.make_snat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "SNAT"),
direction="input", translation_type="basic-nat44")
snat_rule.set_comment(DMUtils.snat_rule_comment())
nat_rules.add_rules(snat_rule)
dnat_rule = NatRule(
name=DMUtils.make_dnat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "DNAT"),
direction="output", translation_type="dnat-44")
dnat_rule.set_comment(DMUtils.dnat_rule_comment())
nat_rules.add_rules(dnat_rule)
nat_rules.set_inside_interface(interfaces[0].name)
nat_rules.set_outside_interface(interfaces[1].name)
for pip, fip_vn in list(fip_map.items()):
fip = fip_vn["floating_ip"]
snat_rule.add_source_addresses(self._get_subnet_for_cidr(pip))
snat_rule.add_source_prefixes(self._get_subnet_for_cidr(fip))
dnat_rule.add_destination_addresses(
self._get_subnet_for_cidr(fip))
dnat_rule.add_destination_prefixes(
self._get_subnet_for_cidr(pip))
self._add_ref_to_list(ri.get_ingress_interfaces(),
interfaces[0].name)
self._add_ref_to_list(ri.get_egress_interfaces(),
interfaces[1].name)
for target in import_targets:
self._add_to_list(ri.get_import_targets(), target)
for target in export_targets:
self._add_to_list(ri.get_export_targets(), target)
# end _add_routing_instance
def _update_vn_dict_for_external_vn(self, vn_dict, pr):
# get all extended VN and private VN which has used in BMS fip pool
for vn_id in pr.virtual_networks:
vn_dict[vn_id] = []
vn = VirtualNetworkDM.get(vn_id)
if vn and vn.router_external:
# retrieve and add all tenant private vn which has used in BMS
# with fip pool of external vn
vn_list = vn.get_connected_private_networks()
for pvn in vn_list or []:
vn_dict[pvn] = []
# MX snat requires physical interface and firewall config for current
# PR. get PR's PI used in VPG's VN and its LI interface. Interface has
# l2 name (ae or PI name), vlan tag, port_vlantag and vpg obj
for vpg_uuid in pr.virtual_port_groups or []:
vpg_obj = VirtualPortGroupDM.get(vpg_uuid)
if not vpg_obj:
continue
vpg_interfaces = vpg_obj.physical_interfaces
for vmi_uuid in vpg_obj.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi_uuid)
vn = VirtualNetworkDM.get(vmi_obj.virtual_network) if \
vmi_obj and vmi_obj.virtual_network is not None else None
if not vn:
continue
vlan_tag = vmi_obj.vlan_tag
port_vlan_tag = vmi_obj.port_vlan_tag
for pi_uuid in vpg_interfaces:
if pi_uuid not in pr.physical_interfaces:
continue
ae_id = vpg_obj.pi_ae_map.get(pi_uuid)
if ae_id is not None and vlan_tag is not None:
ae_name = "ae" + str(ae_id) + "." + str(vlan_tag)
vn_dict.setdefault(vn.uuid, []).append(
JunosInterface(ae_name, 'l2', vlan_tag,
port_vlan_tag=port_vlan_tag,
vpg_obj=vpg_obj))
break
else:
pi_obj = PhysicalInterfaceDM.get(pi_uuid)
if pi_obj:
li_name = pi_obj.name + "." + str(vlan_tag)
vn_dict.setdefault(vn.uuid, []).append(
JunosInterface(li_name, 'l2', vlan_tag,
port_vlan_tag=port_vlan_tag,
vpg_obj=vpg_obj))
break
# end _update_vn_dict_for_external_vn
def _build_ri_config_for_dc(self):
pr = self._physical_router
vn_dict = {}
# For Pulic LR, add all tenant VN and contrail internal vn in dict
vn_list = []
for lr_id in pr.logical_routers or []:
lr = LogicalRouterDM.get(lr_id)
if not lr or (lr.logical_router_gateway_external is False) or \
not lr.virtual_network or \
not self._is_valid_vn(lr.virtual_network, 'l3'):
continue
if lr.logical_router_gateway_external is True:
# Here means the vn_obj is internal network and its a public LR
# So for junos family, we need to check for the CGNAT VN.
if pr.device_family == 'junos':
if lr.cgnat_vn:
ms_enabled, ms_ifc = self.is_service_interface_enabled(
ifc_prefix="ms")
cgnat_vn_obj = VirtualNetworkDM.get(lr.cgnat_vn)
if ms_enabled:
self.construct_cgnat_config(lr, cgnat_vn_obj,
ms_ifc)
vn_obj = VirtualNetworkDM.get(lr.virtual_network)
if '_contrail_lr_internal_vn_' not in vn_obj.name:
continue
ri_obj = self._get_primary_ri(vn_obj)
if ri_obj is None:
continue
lr_obj = LogicalRouterDM.get(vn_obj.logical_router)
if lr_obj is None or lr_obj.is_master is True:
continue
# vn_dict[lr.virtual_network] = []
vn_list += lr.get_connected_networks(include_internal=True,
pr_uuid=pr.uuid)
for vn_id in vn_list:
vn_dict[vn_id] = []
if pr.device_family == 'junos':
# only for Junos MX platform we support fip and snat
# through external vn
self._update_vn_dict_for_external_vn(vn_dict, pr)
if len(vn_dict) > 0:
# refresh prepared vn's pr.vn_ip_map dictionary for irb and lo0
pr.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l2_l3', 'irb',
False)
pr.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l3', 'lo0', True)
vn_irb_ip_map = pr.get_vn_irb_ip_map()
for vn_id, interfaces in self._get_sorted_key_value_pairs(vn_dict):
vn_obj = VirtualNetworkDM.get(vn_id)
if (vn_obj is None or vn_obj.get_vxlan_vni() is None or
vn_obj.vn_network_id is None):
continue
export_set = None
import_set = None
for ri_id in vn_obj.routing_instances:
# Find the primary RI by matching the fabric name
ri_obj = RoutingInstanceDM.get(ri_id)
if ri_obj is None or ri_obj.fq_name[-1] != vn_obj.fq_name[-1]:
continue
export_set, import_set = self._get_export_import_set(vn_obj,
ri_obj)
if vn_obj.get_forwarding_mode() in ['l2', 'l2_l3']:
# create ri config for is_l2 True
irb_ips = []
if vn_obj.get_forwarding_mode() == 'l2_l3' and \
self._is_gateway():
irb_ips = vn_irb_ip_map['irb'].get(vn_id, [])
vrf_name_l2 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l2')
ri_conf = {'ri_name': vrf_name_l2, 'vn': vn_obj,
'is_l2': True, 'is_l2_l3':
(vn_obj.get_forwarding_mode() == 'l2_l3'),
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(pr.uuid),
'gateways': irb_ips,
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'vni': vn_obj.get_vxlan_vni(),
'network_id': vn_obj.vn_network_id,
'encapsulation_priorities':
GlobalVRouterConfigDM.
global_encapsulation_priorities}
self._add_routing_instance(ri_conf)
if vn_obj.get_forwarding_mode() in ['l3', 'l2_l3'] and \
self._is_gateway():
interfaces = []
lo0_ips = []
if vn_obj.get_forwarding_mode() == 'l2_l3':
interfaces = [
JunosInterface(
'irb.' + str(vn_obj.vn_network_id),
'l3', 0)]
else:
lo0_ips = vn_irb_ip_map['lo0'].get(vn_id, [])
is_internal_vn = True if '_contrail_lr_internal_vn_' in \
vn_obj.name else False
vrf_name_l3 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l3')
ri_conf = {'ri_name': vrf_name_l3, 'vn': vn_obj,
'is_l2': False,
'is_l2_l3':
vn_obj.get_forwarding_mode() == 'l2_l3',
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(pr.uuid),
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'gateways': lo0_ips,
'network_id': vn_obj.vn_network_id}
if is_internal_vn:
lr_uuid = DMUtils.\
extract_lr_uuid_from_internal_vn_name(vrf_name_l3)
lr = LogicalRouterDM.get(lr_uuid)
if lr and not lr.is_master:
ri_conf['vni'] = vn_obj.get_vxlan_vni(
is_internal_vn=is_internal_vn)
ri_conf['router_external'] = lr.\
logical_router_gateway_external
dci = lr.get_interfabric_dci()
if dci:
ri_conf['connected_dci_network'] = dci.uuid
lr_vn_list = dci.\
get_connected_lr_internal_vns(
exclude_lr=lr.uuid, pr_uuid=pr.uuid)
for lr_vn in lr_vn_list:
exports, imports = lr_vn.\
get_route_targets()
if imports:
ri_conf['import_targets'] |= imports
if exports:
ri_conf['export_targets'] |= exports
self._add_routing_instance(ri_conf)
break
# end for ri_id in vn_obj.routing_instances:
si_enabled, si_ifc = self.is_service_interface_enabled(
ifc_prefix="si")
if export_set and \
pr.is_junos_service_ports_enabled() and \
len(vn_obj.instance_ip_map) > 0 and si_enabled:
service_port_ids = DMUtils.get_service_ports(
vn_obj.vn_network_id)
if not pr \
.is_service_port_id_valid(service_port_ids[0]):
self._logger.error("DM can't allocate service interfaces"
" for (vn, vn-id)=(%s,%s)" %
(vn_obj.fq_name,
vn_obj.vn_network_id))
else:
vrf_name = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l3', True)
interfaces = []
interfaces.append(
JunosInterface(
si_ifc + "." + str(service_port_ids[0]),
'l3', 0))
interfaces.append(
JunosInterface(
si_ifc + "." + str(service_port_ids[1]),
'l3', 0))
ri_conf = {'ri_name': vrf_name, 'vn': vn_obj,
'import_targets': import_set,
'interfaces': interfaces,
'fip_map': vn_obj.instance_ip_map,
'network_id': vn_obj.vn_network_id,
'restrict_proxy_arp': vn_obj.router_external}
self._add_routing_instance(ri_conf)
# end _build_ri_config_for_dc
def is_service_interface_enabled(self, ifc_prefix="si"):
pr = self._physical_router
if pr.is_junos_service_ports_enabled():
sps = pr.junos_service_ports.get('service_port')
if sps and type(sps) is list:
for sp in sps:
if sp and str(sp).strip().startswith("{}-".format(
ifc_prefix)):
return True, str(sp).strip()
return False, None
def construct_cgnat_config(self, lr, cgnat_vn, ms_ifc):
vn_obj = cgnat_vn
pr = self._physical_router
private_vns = lr.get_connected_networks(include_internal=False,
pr_uuid=pr.uuid)
if ms_ifc:
internal_vn = lr.virtual_network
internal_vn_obj = VirtualNetworkDM.get(internal_vn)
service_port_ids = DMUtils.get_service_ports(
internal_vn_obj.vn_network_id)
if not pr \
.is_service_port_id_valid(service_port_ids[0]):
self._logger.error("DM can't allocate service interfaces"
" for (vn, vn-id)=(%s,%s)" %
(internal_vn_obj.fq_name,
internal_vn_obj.vn_network_id))
else:
vrf_name = DMUtils.make_vrf_name(internal_vn_obj.fq_name[-1],
internal_vn_obj.vn_network_id,
'l3', True)
interfaces = []
interfaces.append(
JunosInterface(
ms_ifc + "." + str(service_port_ids[0]),
'l3', 0))
interfaces.append(
JunosInterface(
ms_ifc + "." + str(service_port_ids[1]),
'l3', 0))
ex_rt, im_rt = vn_obj.get_route_targets()
ri_conf = {'ri_name': vrf_name, 'vn': vn_obj,
'import_targets': im_rt,
'interfaces': interfaces,
'fip_map': vn_obj.instance_ip_map,
'network_id': vn_obj.vn_network_id,
'restrict_proxy_arp': vn_obj.router_external,
'is_cgnat_vrf': True,
'private_vns': private_vns}
self.add_routing_instance(ri_conf)
def feature_config(self, **kwargs):
self.ri_map = {}
self.firewall_config = None
self.pi_map = OrderedDict()
self.inet4_forwarding_filter = None
self.inet6_forwarding_filter = None
feature_config = Feature(name=self.feature_name())
self._build_ri_config_for_dc()
feature_config.set_routing_instances(
self._get_values_sorted_by_key(
self.ri_map))
if self.firewall_config is not None:
feature_config.set_firewall(self.firewall_config)
for pi, li_map in list(self.pi_map.values()):
pi.set_logical_interfaces(list(li_map.values()))
feature_config.add_physical_interfaces(pi)
return feature_config
# end DcGatewayFeature
|
py
|
1a57f58992b34ecc4de1edec9aa7ecb41fe1bd9c
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, Callable, Dict, List, Optional, Type
from magma.common.service import MagmaService
from magma.enodebd.data_models import transform_for_enb, transform_for_magma
from magma.enodebd.data_models.data_model import DataModel, TrParam
from magma.enodebd.data_models.data_model_parameters import (
ParameterName,
TrParameterType,
)
from magma.enodebd.device_config.enodeb_config_postprocessor import (
EnodebConfigurationPostProcessor,
)
from magma.enodebd.device_config.enodeb_configuration import EnodebConfiguration
from magma.enodebd.devices.device_utils import EnodebDeviceName
from magma.enodebd.exceptions import Tr069Error
from magma.enodebd.logger import EnodebdLogger as logger
from magma.enodebd.state_machines.acs_state_utils import (
get_all_objects_to_add,
get_all_objects_to_delete,
)
from magma.enodebd.state_machines.enb_acs import EnodebAcsStateMachine
from magma.enodebd.state_machines.enb_acs_impl import BasicEnodebAcsStateMachine
from magma.enodebd.state_machines.enb_acs_states import (
AcsMsgAndTransition,
AcsReadMsgResult,
AddObjectsState,
DeleteObjectsState,
EndSessionState,
EnodebAcsState,
ErrorState,
GetParametersState,
GetRPCMethodsState,
SendGetTransientParametersState,
SendRebootState,
SetParameterValuesNotAdminState,
WaitEmptyMessageState,
WaitGetObjectParametersState,
WaitGetParametersState,
WaitGetTransientParametersState,
WaitInformMRebootState,
WaitInformState,
WaitRebootResponseState,
WaitSetParameterValuesState,
)
from magma.enodebd.tr069 import models
class CaviumHandler(BasicEnodebAcsStateMachine):
def __init__(
self,
service: MagmaService,
) -> None:
self._state_map = {}
super().__init__(service=service, use_param_key=False)
def reboot_asap(self) -> None:
self.transition('reboot')
def is_enodeb_connected(self) -> bool:
return not isinstance(self.state, WaitInformState)
def _init_state_map(self) -> None:
self._state_map = {
'wait_inform': WaitInformState(self, when_done='get_rpc_methods'),
'get_rpc_methods': GetRPCMethodsState(self, when_done='wait_empty', when_skip='get_transient_params'),
'wait_empty': WaitEmptyMessageState(self, when_done='get_transient_params'),
'get_transient_params': SendGetTransientParametersState(self, when_done='wait_get_transient_params'),
'wait_get_transient_params': WaitGetTransientParametersState(self, when_get='get_params', when_get_obj_params='get_obj_params', when_delete='delete_objs', when_add='add_objs', when_set='set_params', when_skip='end_session'),
'get_params': GetParametersState(self, when_done='wait_get_params'),
'wait_get_params': WaitGetParametersState(self, when_done='get_obj_params'),
'get_obj_params': CaviumGetObjectParametersState(self, when_done='wait_get_obj_params'),
'wait_get_obj_params': CaviumWaitGetObjectParametersState(self, when_edit='disable_admin', when_skip='get_transient_params'),
'disable_admin': CaviumDisableAdminEnableState(self, admin_value=False, when_done='wait_disable_admin'),
'wait_disable_admin': CaviumWaitDisableAdminEnableState(self, admin_value=False, when_add='add_objs', when_delete='delete_objs', when_done='set_params'),
'delete_objs': DeleteObjectsState(self, when_add='add_objs', when_skip='set_params'),
'add_objs': AddObjectsState(self, when_done='set_params'),
'set_params': SetParameterValuesNotAdminState(self, when_done='wait_set_params'),
'wait_set_params': WaitSetParameterValuesState(self, when_done='enable_admin', when_apply_invasive='enable_admin'),
'enable_admin': CaviumDisableAdminEnableState(self, admin_value=True, when_done='wait_enable_admin'),
'wait_enable_admin': CaviumWaitDisableAdminEnableState(self, admin_value=True, when_done='check_get_params', when_add='check_get_params', when_delete='check_get_params'),
'check_get_params': GetParametersState(self, when_done='check_wait_get_params', request_all_params=True),
'check_wait_get_params': WaitGetParametersState(self, when_done='end_session'),
'end_session': EndSessionState(self),
# Below states only entered through manual user intervention
'reboot': SendRebootState(self, when_done='wait_reboot'),
'wait_reboot': WaitRebootResponseState(self, when_done='wait_post_reboot_inform'),
'wait_post_reboot_inform': WaitInformMRebootState(self, when_done='wait_reboot_delay', when_timeout='wait_inform'),
# The states below are entered when an unexpected message type is
# received
'unexpected_fault': ErrorState(self, inform_transition_target='wait_inform'),
}
@property
def device_name(self) -> str:
return EnodebDeviceName.CAVIUM
@property
def data_model_class(self) -> Type[DataModel]:
return CaviumTrDataModel
@property
def config_postprocessor(self) -> EnodebConfigurationPostProcessor:
return CaviumTrConfigurationInitializer()
@property
def state_map(self) -> Dict[str, EnodebAcsState]:
return self._state_map
@property
def disconnected_state_name(self) -> str:
return 'wait_inform'
@property
def unexpected_fault_state_name(self) -> str:
return 'unexpected_fault'
class CaviumGetObjectParametersState(EnodebAcsState):
"""
When booted, the PLMN list is empty so we cannot get individual
object parameters. Instead, get the parent object PLMN_LIST
which will include any children if they exist.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Respond with GetParameterValuesRequest """
names = [ParameterName.PLMN_LIST]
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class CaviumWaitGetObjectParametersState(WaitGetObjectParametersState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_edit: str,
when_skip: str,
):
super().__init__(
acs=acs,
when_add=when_edit,
when_delete=when_edit,
when_set=when_edit,
when_skip=when_skip,
)
class CaviumDisableAdminEnableState(EnodebAcsState):
"""
Cavium requires that we disable 'Admin Enable' before configuring
most parameters
"""
def __init__(self, acs: EnodebAcsStateMachine, admin_value: bool, when_done: str):
super().__init__()
self.acs = acs
self.admin_value = admin_value
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Returns:
A SetParameterValueRequest for setting 'Admin Enable' to False
"""
param_name = ParameterName.ADMIN_STATE
# if we want the cell to be down don't force it up
desired_admin_value = \
self.acs.desired_cfg.get_parameter(param_name) \
and self.admin_value
admin_value = \
self.acs.data_model.transform_for_enb(
param_name,
desired_admin_value,
)
admin_path = self.acs.data_model.get_parameter(param_name).path
param_values = {admin_path: admin_value}
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
name_value = models.ParameterValueStruct()
name_value.Name = admin_path
name_value.Value = models.anySimpleType()
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(admin_value)
request.ParameterList.ParameterValueStruct = [name_value]
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Disabling admin_enable (Cavium only)'
class CaviumWaitDisableAdminEnableState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
admin_value: bool,
when_done: str,
when_add: str,
when_delete: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.add_obj_transition = when_add
self.del_obj_transition = when_delete
self.admin_value = admin_value
def read_msg(self, message: Any) -> Optional[str]:
if type(message) == models.Fault:
logger.error('Received Fault in response to SetParameterValues')
if message.SetParameterValuesFault is not None:
for fault in message.SetParameterValuesFault:
logger.error(
'SetParameterValuesFault Param: %s, Code: %s, String: %s',
fault.ParameterName, fault.FaultCode, fault.FaultString,
)
raise Tr069Error(
'Received Fault in response to SetParameterValues '
'(faultstring = %s)' % message.FaultString,
)
elif not isinstance(message, models.SetParameterValuesResponse):
return AcsReadMsgResult(False, None)
if message.Status != 0:
raise Tr069Error(
'Received SetParameterValuesResponse with '
'Status=%d' % message.Status,
)
param_name = ParameterName.ADMIN_STATE
desired_admin_value = \
self.acs.desired_cfg.get_parameter(param_name) \
and self.admin_value
magma_value = \
self.acs.data_model.transform_for_magma(
param_name,
desired_admin_value,
)
self.acs.device_cfg.set_parameter(param_name, magma_value)
if len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.del_obj_transition)
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.add_obj_transition)
else:
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Disabling admin_enable (Cavium only)'
class CaviumTrDataModel(DataModel):
"""
Class to represent relevant data model parameters from TR-196/TR-098/TR-181.
This class is effectively read-only
"""
# Mapping of TR parameter paths to aliases
DEVICE_PATH = 'Device.'
FAPSERVICE_PATH = DEVICE_PATH + 'Services.FAPService.1.'
PARAMETERS = {
# Top-level objects
ParameterName.DEVICE: TrParam(DEVICE_PATH, True, TrParameterType.OBJECT, False),
ParameterName.FAP_SERVICE: TrParam(FAPSERVICE_PATH, True, TrParameterType.OBJECT, False),
# Device info parameters
ParameterName.GPS_STATUS: TrParam(DEVICE_PATH + 'FAP.GPS.ContinuousGPSStatus.GotFix', True, TrParameterType.BOOLEAN, False),
ParameterName.GPS_LAT: TrParam(DEVICE_PATH + 'FAP.GPS.LockedLatitude', True, TrParameterType.INT, False),
ParameterName.GPS_LONG: TrParam(DEVICE_PATH + 'FAP.GPS.LockedLongitude', True, TrParameterType.INT, False),
ParameterName.SW_VERSION: TrParam(DEVICE_PATH + 'DeviceInfo.SoftwareVersion', True, TrParameterType.STRING, False),
ParameterName.SERIAL_NUMBER: TrParam(DEVICE_PATH + 'DeviceInfo.SerialNumber', True, TrParameterType.STRING, False),
# Capabilities
ParameterName.DUPLEX_MODE_CAPABILITY: TrParam(
FAPSERVICE_PATH + 'Capabilities.LTE.DuplexMode', True, TrParameterType.STRING, False,
),
ParameterName.BAND_CAPABILITY: TrParam(FAPSERVICE_PATH + 'Capabilities.LTE.BandsSupported', True, TrParameterType.UNSIGNED_INT, False),
# RF-related parameters
ParameterName.EARFCNDL: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.EARFCNDL', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.EARFCNUL: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.EARFCNUL', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.BAND: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.FreqBandIndicator', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.PCI: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.PhyCellID', True, TrParameterType.STRING, False),
ParameterName.DL_BANDWIDTH: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.DLBandwidth', True, TrParameterType.STRING, False),
ParameterName.UL_BANDWIDTH: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.RF.ULBandwidth', True, TrParameterType.STRING, False),
ParameterName.CELL_ID: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.RAN.Common.CellIdentity', True, TrParameterType.UNSIGNED_INT, False),
# Other LTE parameters
ParameterName.ADMIN_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.AdminState', False, TrParameterType.BOOLEAN, False),
ParameterName.OP_STATE: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.OpState', True, TrParameterType.BOOLEAN, False),
ParameterName.RF_TX_STATUS: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.RFTxStatus', True, TrParameterType.BOOLEAN, False),
# RAN parameters
ParameterName.CELL_RESERVED: TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.RAN.CellRestriction.CellReservedForOperatorUse', True, TrParameterType.BOOLEAN, False,
),
ParameterName.CELL_BARRED: TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.RAN.CellRestriction.CellBarred', True, TrParameterType.BOOLEAN, False,
),
# Core network parameters
ParameterName.MME_IP: TrParam(
FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkServerList', True, TrParameterType.STRING, False,
),
ParameterName.MME_PORT: TrParam(FAPSERVICE_PATH + 'FAPControl.LTE.Gateway.S1SigLinkPort', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.NUM_PLMNS: TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNListNumberOfEntries', True, TrParameterType.UNSIGNED_INT, False,
),
ParameterName.PLMN: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.', True, TrParameterType.OBJECT, False),
# PLMN arrays are added below
ParameterName.TAC: TrParam(FAPSERVICE_PATH + 'CellConfig.LTE.EPC.TAC', True, TrParameterType.UNSIGNED_INT, False),
ParameterName.IP_SEC_ENABLE: TrParam(
DEVICE_PATH + 'IPsec.Enable', False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERIODIC_INFORM_INTERVAL:
TrParam(DEVICE_PATH + 'ManagementServer.PeriodicInformInterval', False, TrParameterType.UNSIGNED_INT, False),
# Management server parameters
ParameterName.PERIODIC_INFORM_ENABLE: TrParam(
DEVICE_PATH + 'ManagementServer.PeriodicInformEnable',
False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERIODIC_INFORM_INTERVAL: TrParam(
DEVICE_PATH + 'ManagementServer.PeriodicInformInterval',
False, TrParameterType.UNSIGNED_INT, False,
),
# Performance management parameters
ParameterName.PERF_MGMT_ENABLE: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Enable', False, TrParameterType.BOOLEAN, False,
),
ParameterName.PERF_MGMT_UPLOAD_INTERVAL: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.PeriodicUploadInterval', False, TrParameterType.UNSIGNED_INT, False,
),
ParameterName.PERF_MGMT_UPLOAD_URL: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.URL', False, TrParameterType.STRING, False,
),
ParameterName.PERF_MGMT_USER: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Username',
False, TrParameterType.STRING, False,
),
ParameterName.PERF_MGMT_PASSWORD: TrParam(
FAPSERVICE_PATH + 'PerfMgmt.Config.1.Password',
False, TrParameterType.STRING, False,
),
# PLMN Info
ParameterName.PLMN_LIST: TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.', False, TrParameterType.OBJECT, False,
),
}
NUM_PLMNS_IN_CONFIG = 6
for i in range(1, NUM_PLMNS_IN_CONFIG + 1):
PARAMETERS[ParameterName.PLMN_N % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.' % i, True, TrParameterType.OBJECT, False,
)
PARAMETERS[ParameterName.PLMN_N_CELL_RESERVED % i] = TrParam(
FAPSERVICE_PATH
+ 'CellConfig.LTE.EPC.PLMNList.%d.CellReservedForOperatorUse' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_ENABLE % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.Enable' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_PRIMARY % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.IsPrimary' % i, True, TrParameterType.BOOLEAN, False,
)
PARAMETERS[ParameterName.PLMN_N_PLMNID % i] = TrParam(
FAPSERVICE_PATH + 'CellConfig.LTE.EPC.PLMNList.%d.PLMNID' % i, True, TrParameterType.STRING, False,
)
TRANSFORMS_FOR_ENB = {
ParameterName.DL_BANDWIDTH: transform_for_enb.bandwidth,
ParameterName.UL_BANDWIDTH: transform_for_enb.bandwidth,
}
TRANSFORMS_FOR_MAGMA = {
ParameterName.DL_BANDWIDTH: transform_for_magma.bandwidth,
ParameterName.UL_BANDWIDTH: transform_for_magma.bandwidth,
# We don't set GPS, so we don't need transform for enb
ParameterName.GPS_LAT: transform_for_magma.gps_tr181,
ParameterName.GPS_LONG: transform_for_magma.gps_tr181,
}
@classmethod
def get_parameter(cls, param_name: ParameterName) -> Optional[TrParam]:
return cls.PARAMETERS.get(param_name)
@classmethod
def _get_magma_transforms(
cls,
) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_MAGMA
@classmethod
def _get_enb_transforms(cls) -> Dict[ParameterName, Callable[[Any], Any]]:
return cls.TRANSFORMS_FOR_ENB
@classmethod
def get_load_parameters(cls) -> List[ParameterName]:
"""
Load all the parameters instead of a subset.
"""
return [ParameterName.DEVICE]
@classmethod
def get_num_plmns(cls) -> int:
return cls.NUM_PLMNS_IN_CONFIG
@classmethod
def get_parameter_names(cls) -> List[ParameterName]:
excluded_params = [
str(ParameterName.DEVICE),
str(ParameterName.FAP_SERVICE),
]
names = list(
filter(
lambda x: (not str(x).startswith('PLMN'))
and (str(x) not in excluded_params),
cls.PARAMETERS.keys(),
),
)
return names
@classmethod
def get_numbered_param_names(
cls,
) -> Dict[ParameterName, List[ParameterName]]:
names = {}
for i in range(1, cls.NUM_PLMNS_IN_CONFIG + 1):
params = []
params.append(ParameterName.PLMN_N_CELL_RESERVED % i)
params.append(ParameterName.PLMN_N_ENABLE % i)
params.append(ParameterName.PLMN_N_PRIMARY % i)
params.append(ParameterName.PLMN_N_PLMNID % i)
names[ParameterName.PLMN_N % i] = params
return names
class CaviumTrConfigurationInitializer(EnodebConfigurationPostProcessor):
def postprocess(self, mconfig: Any, service_cfg: Any, desired_cfg: EnodebConfiguration) -> None:
desired_cfg.set_parameter(ParameterName.CELL_BARRED, True)
desired_cfg.set_parameter(ParameterName.ADMIN_STATE, True)
|
py
|
1a57f5b744acc2cf4fa8654ea23263c683247379
|
# Create the MovieReview class with the following methods:
#
# - a constructor (__init__()) that receives two input parameters that are used to initialise
# attributes *rating* and *comment*, respectively. Default value for the 2nd input parameter
# is an empty string. The constructor also sets the value of the *timestamp* attribute to the
# current date and time.
#
# - get and set methods for the *rating* and *comment* attributes (using appropriate decorators);
# designate both attributes as private; valid values for these two attributes are as follows:
# - for *rating*: int values between 1 and 5, including 1 and 5
# - for *comment*: any string value
#
# - a method that returns a string representation of a MovieReview object (__str__())
from datetime import datetime
class MovieReview:
def __init__(self, rating, comment=""):
self.rating = rating
self.comment = comment
self.timestamp = datetime.today()
@property
def rating(self):
return self.__rating
@rating.setter
def rating(self, value):
if isinstance(value, int) and 1 <= value <= 5:
self.__rating = value
elif isinstance(value, str) and (len(value) == 1) and (value in '12345'):
self.__rating = int(value)
else:
print(f"Invalid value ({value}) passed for movie rating")
self.__rating = None
@property
def comment(self):
return self.__comment if self.__comment else ""
@comment.setter
def comment(self, value):
if isinstance(value, str):
self.__comment = value
else:
print(f"Error! String value expected, received {type(value)} instead. Coercing the input to a string")
self.__comment = str(value)
def __str__(self):
s = f"{self.rating} stars; " if self.rating else "Rating not available; "
s += f"comment: '{self.comment}'" if self.comment else "no comment left"
s += f"(received {datetime.strftime(self.timestamp, '%b %d, %Y %H:%M')})"
return s
# Create the Movie class with the following methods:
#
# - a constructor (__init__()) that receives three input parameters to be used to initialise
# attributes *title*, *year*, and *director*, respectively. Default value for the 3rd input
# parameter is None. The constructor also initializes the *reviews* attribute
# (a list of MovieReview objects) to an empty list.
#
# - a method that returns a string representation of the given Movie object (__str__())
#
# - a method for adding a new review to the Movie objects, that is, to the *reviews* list.
# The review to be added is passed as the input argument; it is added to the list, only
# if it is an object of the MovieReview class and the review is not older than 1 year.
# (a useful StackOverflow entry:
# https://stackoverflow.com/questions/1345827/how-do-i-find-the-time-difference-between-two-datetime-objects-in-python)
#
# - a method (__eq__()) for checking for equality of the given Movie object and another
# object that is passed to the method as its input parameter. Two Movie objects are
# considered the same if they have the same title and director, or, if the director is
# unknown, then the same title and year.
#
# - methods for turning the given Movie object into an iterator (__iter__(), __next__())
# over the movie reviews (that is, elements of the *reviews* list)
#
class Movie:
def __init__(self, title, year, director=None):
self.title = title
self.year = year
self.director = director
self.reviews = list()
def __str__(self):
movie_str = f"Movie '{self.title}' from {self.year}"
movie_str += f" directed by {self.director}" if self.director else " (director unknown)"
if len(self.reviews) > 0:
movie_str += "\nReviews:\n" + "\n".join([str(mr) for mr in self.reviews])
else:
movie_str += ", no reviews yet"
return movie_str
def add_review(self, review):
if isinstance(review, MovieReview):
time_diff = datetime.today() - review.timestamp
time_diff_sec = time_diff.total_seconds()
secs_in_year = 365*24*60*60
time_diff_year = time_diff_sec // secs_in_year # integer division
if time_diff_year < 1:
self.reviews.append(review)
else:
print("An outdated review")
else:
print("Not an object of MovieReview class; cannot be added")
def __eq__(self, other):
if isinstance(other, Movie):
if self.director and other.director:
return (self.title == other.title) and (self.director == other.director)
else:
print("Director(s) unknown; checking for equality based on the title-year pair")
return (self.title == other.title) and (self.year == other.year)
else:
print("The other object is not a Movie")
return False
def __iter__(self):
self.__review_counter = 0
return self
def __next__(self):
if self.__review_counter == len(self.reviews):
raise StopIteration
current_review = self.reviews[self.__review_counter]
self.__review_counter += 1
return current_review
if __name__ == '__main__':
mr_1 = MovieReview(5, "Superb!")
mr_2 = MovieReview(5, "The best ever!")
mr_3 = MovieReview(3, "Expected more...")
# print(mr_1)
# print(mr_2)
# print(mr_3)
godfather = Movie("The Godfather", year=1972, director="Francis Ford Coppola")
print(godfather)
print()
godfather_2 = Movie("The Godfather: part II", 1974, "Francis Ford Coppola")
print(godfather_2)
print()
if godfather == godfather_2:
print("No difference observed!")
else:
print("Different movies!")
print()
for mr in (mr_1, mr_2, mr_3):
godfather_2.add_review(mr)
print("Printing movie data after adding reviews")
print(godfather_2)
print("\nReviews for the Godfather 2 movie:")
for review in godfather_2:
print(review)
|
py
|
1a57f5d30295816f58072f3f15118ff9104af77e
|
"""
It is a simple sorted algorithm, that builds the final sorted list one item at a time
** it's like soring the cards
Algorithm:
1. Consider the first element to be sorted and the rest to be unsorted.
2. Take the first element in the unsorted part(u1) and compare it with sorted part elements(s1).
3. If u1<s1 then insert u1 in the correct index, else leave it as it.
4. Take next elements in the unsorted part and compare with sorted elements.
5. Repeat 3 and 4 until all the elements are sorted.
"""
def insertion_sort(list1):
for index in range(1, len(list1)):
current_element = list1[index]
pos = index
while current_element < list1[pos - 1] and pos > 0:
list1[pos] = list1[pos - 1]
pos -= 1
list1[pos] = current_element
list1 = [9, 35, 0, 15, 11]
insertion_sort(list1)
print(list1)
|
py
|
1a57f5d9b9f50a2030a099b0c20a7095206b06a4
|
# pylint: disable=invalid-name
import pickle
from math import inf
import pandas as pd
import numpy as np
#from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#import matplotlib.pyplot as plt
first_time = False
#parameters = [(50, 300) - 0.54, 0.6, (40, 600) - 0.438, 0.3, (75, 300), (100, 200), (200, 200), (200, 100), (400, 100)]
#parameters = [(10, 100), (20, 100), (30, 100), (40, 100), (50, 100)] - 0.38, 0.1
#parameters = [(10, 100) - 0.388, 0.094, (20, 200), (50, 200) - - 0.414, 0.19, (50, 400), (100, 400) - 0.456, 0.3857]
parameters = [(60, 700), (40, 600), (50, 500), (100, 400), (50, 300)]
filename = 'german.data-numeric.txt'
def readData():
'''
Reads data from text file and stores as data frame
'''
df = pd.read_table(filename, header=None, delim_whitespace=True)
df = df.iloc[:, :-1]
df = (df - df.min()) / (df.max() - df.min())
Y = df.iloc[:, -1]
return (df, Y)
def mahanalobisdist(a, b):
'''
Calculates the mahalanobis distance
between 2 points of the data
'''
temp = np.array([a, b]).T
cov = np.cov(temp)
delta = a - b
inv = np.linalg.pinv(cov)
mdist = np.dot(np.dot(np.transpose(delta), inv), delta)
mdist = np.sqrt(mdist)
return mdist
def createDistanceMatrix(data, first_timeval, N):
'''
Computes the distance matrix and
writes to to a pickle file to save time
on future runs
'''
distancematrix = np.zeros((N, N))
if first_timeval:
i = 0
for value1 in data:
j = 0
for value2 in data:
distancematrix[i][j] = mahanalobisdist(value1, value2)
#print(distancematrix[i][j])
j += 1
i += 1
f = open('distancematrix', 'wb')
pickle.dump(distancematrix, f)
f.close()
else:
f2 = open('distancematrix', 'rb')
distancematrix = pickle.load(f2)
f2.close()
return distancematrix
def getLRD(N, distancematrix, k, data):
'''
Finds
1. The KNN and hence the kdistance for each point
i.e the distance to its kthNN,
2. The number of points that fall within the k-distance neighbourhood
3. Reachability distances
4. lrd (local reachability density)
for each point
'''
kdist = np.zeros(N)
kneighbours = {}
Numneighbours = 0
lrd = np.zeros(N)
for i in range(N):
distancefrompoint = distancematrix[i]
knn = np.partition(distancefrompoint, k-1)
kdist[i] = knn[k-1]
sort_index = np.argsort(distancefrompoint)
j = 0
temp = []
for dist in distancefrompoint:
if dist <= kdist[i]:
temp.append(sort_index[j])
Numneighbours += 1
j += 1
kneighbours[i] = temp
reachabilitydistance = getReachabilityDistances(N, data, kdist, distancematrix)
for i in range(N):
sumOfReachabilityDistances = 0
for value in kneighbours[i]:
sumOfReachabilityDistances += reachabilitydistance[int(value)][i]
if sumOfReachabilityDistances == 0:
lrd[i] = inf
lrd[i] = len(kneighbours[i])/sumOfReachabilityDistances
return lrd
def getReachabilityDistances(N, data, kdist, distancematrix):
'''
Calculates the reachability distance
between all pairs of points
'''
reachabilitydistance = np.zeros((N, N))
i = 0
for _ in data:
j = 0
for _ in data:
reachabilitydistance[i][j] = max(kdist[i], distancematrix[i][j])
j += 1
i += 1
return reachabilitydistance
def getAccuracy(outliers, Y, N, PrecisionList, RecallList):
'''
Gets the performace measures of the outlier detection done,
in terms of Accuracy, Precision, Recall, F1-Score
using true and false +ves and -ves
'''
tp = 0
fp = 0
tn = 0
fn = 0
#testY = []
for i, row in Y.iteritems():
if i in outliers:
#testY.append(1)
if row == 1:
tp += 1
else:
fp += 1
else:
#testY.append(0)
if row == 1:
fn += 1
else:
tn += 1
print("True +ve:" + str(tp) + " True -ve:" + str(tn))
print(" False +ve:" + str(fp) + " False -ve:" + str(fn))
A = (tp + tn)/(tp + tn + fp + fn)
P = (float(tp)/(tp + fp))
R = (float(tp)/(tp + fn))
f1 = 2*P*R/float(P+R)
print("Accuracy : " + str(A) + " Precision : " + str(P) + " Recall : " + str(R) + " F1-Score : " + str(f1))
print()
PrecisionList.append(P)
RecallList.append(R)
#return testY
# def dimRedPlot(df, testY):
# '''
# Reduce dimensions to 2, then plot the points
# of the obtained results, with outliers (i.e testY = 1)
# highlighted in red and normal pts in blue
# '''
# lda = LDA(n_components=2)
# lda_transformed = pd.DataFrame(lda.fit_transform(df, testY))
# Plot normal points in blue and outliers in red
# plt.scatter(lda_transformed[:][testY == 1], lda_transformed[:][testY == 1], label='Outliers', c='red')
# plt.scatter(lda_transformed[testY == 0][0], lda_transformed[testY == 0][1], label='Normal points', c='blue')
# #plt.legend(loc=3)
# plt.show()
def main():
'''
Calls the functions to get distance matrix,
the LRD, and the 1st O points after sorting of LRD
and gets the Precision and Recall values
'''
df, Y = readData()
i = 1
PrecisionList = []
RecallList = []
data = df.values
N = len(data)
distancematrix = createDistanceMatrix(data, first_time, N)
#O is the #of outliers
for (k, O) in parameters:
print("Experiment:", i, ", k =", k, ", num_outliers =", O)
lrd = getLRD(N, distancematrix, k, data)
sorted_outlier_factor_indexes = np.argsort(lrd)
outliers = sorted_outlier_factor_indexes[-O:]
getAccuracy(outliers, Y, N, PrecisionList, RecallList)
i += 1
#dimRedPlot(df, testY)
# plt.plot(RecallList, PrecisionList, 'ro')
# plt.axis([0, 1, 0, 1])
# plt.show()
if __name__ == '__main__':
main()
|
py
|
1a57f62b0c865d6cae232ad9b57835a46282b021
|
def handdle_data(data):
return data
|
py
|
1a57f6e0d55769ef3cf2a44bb8ed1ee8a4f8697f
|
import boringmindmachine as bmm
import logging
import os, time, datetime, urllib
import twitter
import traceback
import base64
import oauth2 as oauth
import simplejson as json
class TwitterSheep(bmm.BoringSheep):
"""
Twitter Sheep class.
Sheep are created by the Shepherd.
Sheep are initialized with a JSON key file plus parameters from the Shepherd.
Sheep are expected to take care of their own API instance.
Input bot key (JSON file) is stored as self.params and contains everything the sheep needs
"""
def __init__(self, bot_key, **kwargs):
"""
bot_key - parameters that come from the keys (and the Keymaker, and the key-making process)
- consumer_token
- consumer_token_secret
- oauth_token
- oauth_token_secret
- user_id
- screen_name
kwargs - extra parameter args passed
into the Sheep (from the Shepherd)
A Sheep object manages information for a single Twitter bot account.
The information (oauth keys, bot name, bot account, etc) are contained
in the JSON file passed in by the Shepherd.
The JSON file contains information compiled by the Keymaker.
If there is other information the Shepherd needs to pass to the
Sheep that is not in the JSON file, it can use keyword args.
"""
# This is where we should initialize the Twitter API instance
# using params found in the json file.
self.params = bot_key
# combine the user-provided parameters
# (in kwargs) with the json-provided parameters
for keys in kwargs:
self.params[key] = kwargs[key]
# Initialize your API instance
self.api = twitter.Api( consumer_key = self.params['consumer_token'],
consumer_secret = self.params['consumer_token_secret'],
access_token_key = self.params['oauth_token'],
access_token_secret = self.params['oauth_token_secret'])
# Get an OAuth token to do bot stuff
self.token = oauth.Token(
key = self.params['oauth_token'],
secret = self.params['oauth_token_secret']
)
# Add an OAuth application to consume the API
self.consumer = oauth.Consumer(
key = self.params['consumer_token'],
secret = self.params['consumer_token_secret']
)
# Create an OAuth client
self.client = oauth.Client(
self.consumer,
self.token
)
# Set names
self.name = bot_key['screen_name']
msg = "TwitterSheep: constructor: Finished setting up Twitter API for bot {screen_name}"
msg = msg.format(screen_name=self.name)
logging.info(self.sign_message(msg))
#####################################
# rainbow mind machine Sheep:
# non-Twitter actions
def dummy(self, **kwargs):
"""Debug: do nothing."""
msg = "TwitterSheep: dummy(): dummy action"
logging.debug(self.sign_message(msg))
def echo(self, **kwargs):
"""Just say hi"""
msg = "Hello world! This is {name}".format(name=self.name)
logging.info(self.sign_message(msg))
#################################
# rainbow mind machine Sheep:
# Twitter actions
#
# change_url
# change_bio
# change_color
# change_image
# tweet
# follow_user
# unfollow_user
def change_url(self, **kwargs):
"""Update twiter profile URL.
kwargs:
url: The new url (string) to set as the profile URL
Does not return anything.
"""
if( 'url' not in kwargs.keys()):
err = "TwitterSheep Error: change_url() action called without 'url' kwarg specified."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# Set the API endpoint
api_url = "https://api.twitter.com/1.1/account/update_profile.json"
bot_url = kwargs['url']
resp, content = self.client.request(
api_url,
method = "POST",
body = urllib.parse.urlencode({'url':bot_url}),
headers = None
)
msg = "TwitterSheep: change_url(): Done. Set url to: %s"%(bot_url)
logging.info(self.sign_message(msg))
def change_bio(self,**kwargs):
"""Update twitter profile bio.
kwargs:
bio: The bio string
Does not return anything.
"""
if( 'bio' not in kwargs.keys()):
err = "TwitterSheep Error: change_bio() action called without 'bio' key specified in the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# Set the API endpoint
url = "https://api.twitter.com/1.1/account/update_profile.json"
bot_bio = kwargs['bio']
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode({'description': bot_bio}),
headers=None
)
msg = "TwitterSheep: change_bio(): Done."
logging.info(self.sign_message(msg))
logging.info(content)
def change_colors(self,**kwargs):
"""
Update twitter profile colors.
kwargs:
background: RGB code for background color (no #)
links: RGB code for links color (no #)
Example:
kwargs = {
'background':'3D3D3D',
'link':'AAF'
}
Does not return anything.
"""
if( 'background' not in kwargs.keys()
and 'links' not in kwargs.keys()):
err = "TwitterSheep Error: change_colors() action called "
err += "with neither 'background' nor 'links' kwargs specified."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# json sent to the Twitter API
payload = {}
if 'background' in kwargs.keys():
background_rgbcode = kwargs['background']
payload['profile_background_color'] = background_rgbcode
if 'links' in kwargs.keys():
links_rgbcode = kwargs['links']
payload['profile_link_color'] = links_rgbcode
# Set the API endpoint
url = "https://api.twitter.com/1.1/account/update_profile_colors.json"
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode(payload),
headers=None
)
msg = "TwitterSheep: change_colors(): Done."
logging.info(self.sign_message(msg))
logging.info(content)
def change_image(self,**kwargs):
"""Update twitter profile bio.
Setting 'image' keyword argument takes the highest
priority and is the image used if present.
If that is not available, change_image() will look
for an 'image' keyword argument in the bot key.
kwargs:
image: The path to the image to use as the Twitter avatar
This method does not return anything.
"""
if( 'image' not in kwargs.keys() and 'image' not in self.params):
err = "TwitterSheep Error: change_image() action called without 'image' key specified in the bot key or the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
img_file = ''
if( 'image' in kwargs.keys() ):
img_file = kwargs['image']
if os.path.isfile(img_file) is False:
err = "TwitterSheep Error: change_image() action called with an 'image' key that is not a file!"
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
elif( 'image' in self.params ):
img_file = self.params['image']
if os.path.isfile(img_file) is False:
err = "TwitterSheep Error: change_image() action called with an 'image' key that is not a file!"
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
# json sent to the Twitter API
payload = {}
b64 = base64.encodestring(open(img_file,"rb").read())
# Set the API endpoint
api_url = "https://api.twitter.com/1.1/account/update_profile_image.json"
resp, content = self.client.request(
api_url,
method = "POST",
body=urllib.parse.urlencode({'image': b64}),
headers=None
)
logging.info("TwitterSheep: change_image(): Done.")
logging.info(content)
def follow_user(self, **kwargs):
"""
Follow a twitter user.
kwargs:
username: The username of the user to follow
notify: Whether to notify the followed user (boolean)
This method does not return anything.
"""
if( 'username' not in kwargs.keys()):
err = "TwitterSheep Error: change_image() action called without 'image' key specified in the parameters dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
if( 'notify' not in kwargs.keys()):
kwargs['notify'] = False
# json sent to the Twitter API
payload = {}
# Set the API endpoint
url = "https://api.twitter.com/1.1/friendships/create.json"
resp, content = self.client.request(
api_url,
method = "POST",
body=urllib.urlencode({'image': b64}),
headers=None
)
logging.info("TwitterSheep: follow_user(): Done.")
logging.info(content)
def unfollow_user(self, notify=True, **kwargs):
"""
Unfollow a twitter user.
kwargs:
username: The username of the user to follow
notify: Whether to notify the followed user (boolean)
This method does not return anything.
"""
if 'username' not in kwargs.keys():
err = "TwitterSheep Error: unfollow_user() action called without a 'username' key specified in the params dict."
logging.error(self.sign_message(err), exc_info=True)
raise Exception(err)
if( 'notify' not in kwargs.keys()):
kwargs['notify'] = False
# json sent to the Twitter API
payload = {}
# Set the API endpoint
url = "https://api.twitter.com/1.1/friendships/destroy.json"
payload['user_id'] = kwargs['username']
resp, content = self.client.request(
url,
method = "POST",
body=urllib.urlencode(payload),
headers=None
)
logging.info("TwitterSheep: unfollow_user(): Done.")
logging.info(content)
def tweet(self, **kwargs):
"""
Send out a tweet. This uses the function:
populate_tweet_queue()
Run an infinity loop in which the bot decides when to tweet.
Default Sheep have the following scheduling kwargs:
kwargs:
inner_sleep: Inner loop sleep time (1 s)
outer_sleep: Outer loop sleep time (10 s)
publish: Actually publish (boolean, False by default)
Additional kwargs:
media: A URL, a local file, or a file-like object (something with a read() method)
or a list of any of the above
This function never ends, so it never returns.
"""
# Process kwargs
defaults = {}
defaults['inner_sleep'] = 1.0
defaults['outer_sleep'] = 10.0
defaults['publish'] = False
# populate missing params with default values
for dk in defaults.keys():
if dk not in kwargs.keys():
kwargs[dk] = defaults[dk]
# --------------------------
# The Real McCoy
#
# call populate_tweet_queue() to populate the list of tweets to send out
#
# apply some rube goldberg logic to figure out when to tweet each item
while True:
try:
# Outer loop
tweet_queue = self.populate_tweet_queue()
nelements = len(tweet_queue)
msg = "TwitterSheep: tweet(): Populated tweet queue with %d tweets"%(nelements)
logging.debug(self.sign_message(msg))
assert nelements>0
for ii in range(nelements):
twit = tweet_queue.pop(0)
msg = "TwitterSheep: tweet(): Preparing twit"
logging.debug(self.sign_message(msg))
# Fire off the tweet
if kwargs['publish']:
if('media' in kwargs.keys()):
self._tweet(
twit,
media = kwargs['media ']
)
else:
self._tweet( twit )
msg = "TwitterSheep: tweet(): Published tweet \"%s\""%(twit)
logging.info(self.sign_message(msg))
else:
msg = "TwitterSheep: tweet(): Not publishing tweet \"%s\""%(twit)
logging.info(self.sign_message(msg))
msg = "TwitterSheep: tweet(): Finished with twit"
logging.debug(self.sign_message(msg))
time.sleep( kwargs['inner_sleep'] )
time.sleep( kwargs['outer_sleep'] )
msg = "TwitterSheep: tweet(): Completed a cycle."
logging.debug(self.sign_message(msg))
except Exception:
# oops!
msg1 = self.sign_message("TwitterSheep: tweet(): Sheep encountered an exception. More info:")
msg2 = self.sign_message(traceback.format_exc())
msg3 = self.sign_message("Sheep is continuing...")
# Add this line in to debug sheep
#raise Exception(err)
logging.error(msg1)
logging.error(msg2)
logging.error(msg3)
time.sleep( kwargs['outer_sleep'] )
except AssertionError:
err = "TwitterSheep Error: tweet(): tweet queue was empty. Check your populate_tweet_queue() method definition."
logging.error(self.sign_message(err))
raise Exception(err)
def _tweet(self,twit,media=None):
"""
Private method.
Publish a twit.
"""
# call twitter api to tweet the twit
try:
# tweet:
if(media is not None):
stats = self.api.PostUpdates(twit,media=media)
else:
stats = self.api.PostUpdates(twit)
# everything else:
msg = "TwitterSheep: _tweet(): @%s tweeted: \"%s\""%(self.name, twit)
logging.info(self.sign_message(msg))
except twitter.TwitterError as e:
if e.message[0]['code'] == 185:
msg = "TwitterSheep Error: _tweet(): Twitter error: Daily message limit reached"
logging.info(self.sign_message(msg))
elif e.message[0]['code'] == 187:
msg = "TwitterSheep Error: _tweet(): Twitter error: Duplicate error"
logging.info(self.sign_message(msg))
else:
msg = "TwitterSheep Error: _tweet(): Twitter error: %s"%(e.message)
logging.info(self.sign_message(msg))
def populate_tweet_queue(self):
"""
Populate a tweet queue.
This method should be extended by new Sheep classes that have their own
creative means of generating tweets.
The default Sheep object will generate a tweet queue filled with
5 "Hello World" messages.
Returns a list of tweets.
"""
maxlen = 5
tweet_queue = []
# (technically, a list is a queue)
for j in range(maxlen):
tweet = "Hello world! That's number %d of 5."%(j+1)
tweet_queue.append(tweet)
msg = "TwitterSheep: populate_tweet_queue(): Finished populating a new tweet queue with %d Hello World tweets."%(len(tweet_queue))
logging.debug(self.sign_message(msg))
return tweet_queue
def sign_message(self,msg):
"""
Given a message, prepend it with [@botname]
"""
result = "[@%s] %s"%(self.name, msg)
return result
|
py
|
1a57f8361e8cd6fc5ea00ec2f0a5d75eecc951e1
|
from .model_templates import SkylightData, ModelParent
import torch
import torch.nn as nn
import torch.nn.functional as F
class FifthGen(nn.Module):
def __init__(self):
super().__init__()
self.pool2x2 = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(12, 96, 11, stride=(3, 3), padding=(5, 5))
self.conv2 = nn.Conv2d(96, 192, 5, padding=(2, 2))
self.conv3 = nn.Conv2d(192, 256, 3, padding=(1, 1))
self.conv4 = nn.Conv2d(256, 512, 3, padding=(1, 1))
self.conv5 = nn.Conv2d(512, 1024, 3, padding=(1, 1))
self.conv6 = nn.Conv2d(1024, 1024, 3, padding=(1, 1))
self.conv7 = nn.Conv2d(1024, 1024, 2, stride=(2, 2), padding=(0, 0))
# self.dropout = nn.Dropout(p=0.6)
# self.features1 = nn.Linear(5, 16)
# self.features2 = nn.Linear(16, 16)
# self.features3 = nn.Linear(16, 16)
# self.features4 = nn.Linear(16, 16)
# self.features5 = nn.Linear(16, 16)
self.dropout = nn.Dropout()
self.fc1 = nn.Linear(1024 * 4 * 4, 1024 * 4 * 2)
self.fc2 = nn.Linear(1024 * 4 * 2, 1024 * 4)
self.fc3 = nn.Linear(1024 * 4, len(SkylightData.wavelengths))
def forward(self, x_image_branch):
x_image_branch = F.relu(self.conv1(x_image_branch), inplace=True) # 64x64
x_image_branch = F.relu(self.conv2(x_image_branch), inplace=True) # 64x64
x_image_branch = F.relu(
self.pool2x2(self.conv3(x_image_branch)), inplace=True
) # 32x32
x_image_branch = F.relu(self.conv4(x_image_branch), inplace=True) # 16x16
x_image_branch = F.relu(self.conv5(x_image_branch), inplace=True) # 16x16
x_image_branch = F.relu(self.conv6(x_image_branch), inplace=True) # 16x16
x_image_branch = F.relu(
self.pool2x2(self.conv7(x_image_branch)), inplace=True
) # 4x4
x_image_branch = torch.flatten(x_image_branch, 1)
x_image_branch = F.relu(self.fc1(self.dropout(x_image_branch)), inplace=True)
x_image_branch = F.relu(self.fc2(self.dropout(x_image_branch)), inplace=True)
x_image_branch = self.fc3(x_image_branch)
# x_feature_branch = F.relu(self.features1(x_feature_branch), inplace=True)
# x_feature_branch = F.relu(self.features2(x_feature_branch), inplace=True)
# x_feature_branch = F.relu(self.features3(x_feature_branch), inplace=True)
# x_feature_branch = F.relu(self.features4(x_feature_branch), inplace=True)
# x_feature_branch = F.relu(self.features5(x_feature_branch), inplace=True)
return x_image_branch
|
py
|
1a57f8c1c56a55e2dcc444b6a3776de154defcdf
|
from openpyxl import load_workbook
import re
filename = 'aalh_iit_charlesmensingcollection.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 703
iterationrow = 7
targetcol = 15
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
print(testvar)
cleandate = None
approx = 'approximately '
try:
if testvar == None:
ws.cell(row=iterationrow, column=targetcol).value = ''
elif testvar.endswith('?'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('c'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('C'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.startswith('a'):
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = approx + cleandate[0]
elif testvar.find('-') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
elif testvar.find(',') != -1:
cleandate = testvar
ws.cell(row=iterationrow, column=targetcol).value = cleandate
else :
cleandate = re.findall('\d\d\d\d', testvar)
ws.cell(row=iterationrow, column=targetcol).value = cleandate[0]
print(ws.cell(row=iterationrow, column=targetcol).value)
except:
print('STATUS = PROBLEM')
iterationrow = iterationrow + 1
wb.save("aalh_iit_charlesmensingcollection.xlsx")
|
py
|
1a57fa94cbe2726fb096b0ace56fc932bbf7e2ae
|
from simpletcp.clientsocket import ClientSocket
s1 = ClientSocket("localhost", 5000)
response = s1.send("Hello, World!")
s2 = ClientSocket("localhost", 5000, single_use=False)
r1 = s2.send("Hello for the first time...")
r2 = s2.send("...and hello for the last!")
s2.close()
# Display the correspondence
print("s1 sent\t\tHello, World!")
print("s1 received\t\t{}".format(response.decode("UTF-8")))
print("-------------------------------------------------")
print("s2 sent\t\tHello for the first time....")
print("s2 received\t\t{}".format(r1.decode("UTF-8")))
print("s2 sent\t\t...and hello for the last!.")
print("s2 received\t\t{}".format(r2.decode("UTF-8")))
|
py
|
1a57fab1fc1929ad2b5571096acc92a64b52d6ca
|
import voluptuous as vol
from esphome import pins
from esphome.components import sensor, spi
from esphome.components.spi import SPIComponent
import esphome.config_validation as cv
from esphome.const import CONF_CS_PIN, CONF_ID, CONF_NAME, CONF_SPI_ID, CONF_UPDATE_INTERVAL
from esphome.cpp_generator import Pvariable, get_variable
from esphome.cpp_helpers import gpio_output_pin_expression, setup_component
from esphome.cpp_types import App
MAX31855Sensor = sensor.sensor_ns.class_('MAX31855Sensor', sensor.PollingSensorComponent,
spi.SPIDevice)
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(MAX31855Sensor),
cv.GenerateID(CONF_SPI_ID): cv.use_variable_id(SPIComponent),
vol.Required(CONF_CS_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema))
def to_code(config):
for spi_ in get_variable(config[CONF_SPI_ID]):
yield
for cs in gpio_output_pin_expression(config[CONF_CS_PIN]):
yield
rhs = App.make_max31855_sensor(config[CONF_NAME], spi_, cs,
config.get(CONF_UPDATE_INTERVAL))
max31855 = Pvariable(config[CONF_ID], rhs)
sensor.setup_sensor(max31855, config)
setup_component(max31855, config)
BUILD_FLAGS = '-DUSE_MAX31855_SENSOR'
def to_hass_config(data, config):
return sensor.core_to_hass_config(data, config)
|
py
|
1a57fab53e21e27afe21c5b63029ca89c349b05f
|
# Copyright (c) 2021 Qianyun, Inc. All rights reserved.
cloudentry_id = "yacmp:cloudentry:type:azure"
|
py
|
1a57fac8a3f32368744b3f4bf6935b86d684b038
|
import setuptools
import pentagraph
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pentagraph",
version=pentagraph.__version__,
author=pentagraph.__author__,
author_email="[email protected]",
description="Graph representation and tools for programming with pentagame",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Penta-Game/pentagraph",
packages=setuptools.find_packages("."),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
)
|
py
|
1a57fb07c3064f80c719df1c58d95355b2f10a35
|
from neo4j import GraphDatabase
from common.database import *
from common.utils import get_data_dir
import logging
def add_annotation_entity_labels(database:Database):
"""
Add additional entity labels (e.g. Disease, Anatomy, Phenomonea, Food) to mesh topical description.
Disease: MeSH terms under tree number 'C' as Disease, excluding the top two levels (C and Cxx)
Anatomy: under tree number 'A', and remove the following terms:[Anatomy, Body Region, Animal Structures, Bacterial Structures, Plant Structures, Fungal Structures and Viral Structures]
Phenomena:
1. Phenomena and Process: G
Exclude Physical Phenomena [G01]
Exclude Genetic Phenomena (which includes Gene Expression branch) [G05]
Exclude Food (G07.203.300), Beverage(G07.203.100) and Fermented foods and beverages(G07.203.200)
Exclude Reproductive Physiological Phenomena [G08.686]
Exclude Respiratory Physiological Phenomena [G09.772]
Exclude Environment [G16.500.275]
Exclude mathematical concepts (G17)
Exclude all terms contains 'phenomena'
Psychiatry and Psychology Category: F
Eliminate all BUT Mental Disorders [F03]
Exclude the first two levels in hierarchy tree except 'mental disorders'
"""
query_disease = """
match (t:TreeNumber) where t.obsolete=0 and t.eid starts with 'C' and t.eid contains '.'
with t match (t)-[:HAS_TREENUMBER]-(td:TopicalDescriptor) where td.obsolete = 0
with distinct td as t set t:Disease
"""
database.run_query(query_disease)
query_anatomy = """
match (n:TreeNumber)
where n.obsolete = 0 and n.eid starts with 'A' and not n.eid in ['A', 'A01', 'A13', 'A18', 'A19', 'A20', 'A21']
with n match (n)-[]-(td:TopicalDescriptor) where td.obsolete = 0
with distinct td as t set t:Anatomy
"""
database.run_query(query_anatomy)
query_phenomena = """
match (t:TreeNumber)
where t.eid starts with 'G' or t.eid starts with 'F03'
with t match (t)-[:HAS_TREENUMBER]-(mesh:TopicalDescriptor) where mesh.obsolete = 0 and t.obsolete = 0
with mesh match (mesh)-[:HAS_TREENUMBER]-(t) where t.obsolete = 0
with mesh, collect(t.eid) as treenumbers
where not mesh.name contains 'Phenomena'
and none(t in treenumbers where t starts with 'G07.203.300'
or t starts with 'G07.203.100'
or t starts with 'G07.203.200'
or t starts with 'G01'
or t starts with 'G05'
or t starts with 'G08.686'
or t starts with 'G09.772'
or t starts with 'G16.500.275'
or t starts with 'G17')
with distinct mesh as m set m:Phenomena
"""
database.run_query(query_phenomena)
query_food = """
match (n:db_MESH {name:'Food'})-[:HAS_TREENUMBER]-(t:TreeNumber)
with t match (tr:TreeNumber) where tr.eid starts with t.eid and tr.eid <> t.eid
match (tr)-[:HAS_TREENUMBER]-(m:db_MESH) where m.obsolete = 0
with distinct m as term set term:Food
"""
database.run_query(query_food)
def main():
database = get_database()
add_annotation_entity_labels(database)
database.close()
if __name__ == "__main__":
main()
|
bzl
|
1a57fb54a2accaa01bfd6a62a25f8960a91e0906
|
"""Declare runtime dependencies
These are needed for local dev, and users must install them as well.
See https://docs.bazel.build/versions/main/skylark/deploying.html#dependencies
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
versions = struct(
aspect_bazel_lib = "0.11.1",
rules_nodejs = "5.4.2",
)
# WARNING: any changes in this function may be BREAKING CHANGES for users
# because we'll fetch a dependency which may be different from one that
# they were previously fetching later in their WORKSPACE setup, and now
# ours took precedence. Such breakages are challenging for users, so any
# changes in this function should be marked as BREAKING in the commit message
# and released only in semver majors.
def rules_js_dependencies():
"Dependencies for users of aspect_rules_js"
# The minimal version of bazel_skylib we require
maybe(
http_archive,
name = "bazel_skylib",
sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz"],
)
maybe(
http_archive,
name = "rules_nodejs",
sha256 = "26766278d815a6e2c43d2f6c9c72fde3fec8729e84138ffa4dabee47edc7702a",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/{0}/rules_nodejs-core-{0}.tar.gz".format(versions.rules_nodejs)],
)
maybe(
http_archive,
name = "aspect_bazel_lib",
sha256 = "a8b47eeaf3c1bd41c4f4b633ef4c959daf83fdee343379495098b50571d4b3b8",
strip_prefix = "bazel-lib-{}".format(versions.aspect_bazel_lib),
url = "https://github.com/aspect-build/bazel-lib/archive/refs/tags/v{}.tar.gz".format(versions.aspect_bazel_lib),
)
|
py
|
1a57fb7a3fdf365f1decc23da47172f945030f2e
|
import matplotlib.pyplot as pyplot
import numpy
from pprint import pprint
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('skiplines', type=int)
parser.add_argument('width', type=int)
parser.add_argument('height', type=int)
args = parser.parse_args()
u = numpy.loadtxt(args.filename,
skiprows=args.skiplines).reshape(args.width, args.height)
u = numpy.transpose(u)
pprint(u)
pyplot.contour(u)
pyplot.show()
|
py
|
1a57fcd503d9064f4ebcb06e05a15ef235f19411
|
import numpy as np
from matplotlib import pyplot as plt
import cv2
import argparse
import os
from slam import SLAM
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TODO')
parser.add_argument('path', metavar='path', type=str, help='data')
args = parser.parse_args()
path = args.path
image_names = sorted(os.listdir(path))
w = 1280
h = 1024
calibration_matrix = np.array([
[0.535719308086809*w, 0, 0.493248545285398*w],
[0, 0.669566858850269*h, 0.500408664348414*h],
[0, 0, 1]
])
sigma = 0.897966326944875
slam = SLAM(width=w, height=h, calibration_matrix=calibration_matrix)
t = tqdm.tqdm(image_names, total=len(image_names))
for name in t:
#print(name)
#fig = plt.figure()
img = cv2.imread(path + '/' + name, cv2.IMREAD_GRAYSCALE)
#plt.imshow(img)
#plt.show()
img2 = cv2.undistort(img, calibration_matrix, sigma)
# plt.imshow(img2)
# plt.show()
slam.run(img)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(slam.position[:,0], slam.position[:,1], slam.position[:,2], '-')
#ax.scatter(slam.map[:,0], slam.map[:,1], slam.map[:,2])
#ax.set_xlim3d(-20,20)
#ax.set_ylim3d(-20,20)
#ax.set_zlim3d(-20,20)
plt.show()
#print(dir(key_point[0]))
# print('angle: ', key_point[1].angle)
# print('class_id: ', key_point[1].class_id)
# print('octave: ', key_point[1].octave)
# print('pt: ', key_point[1].pt)
# print('response: ', key_point[1].response)
# print('size: ', key_point[1].size)
|
py
|
1a57fd1e0760b63ebf7cdc6b84bd8aaee76ca723
|
import json
from packlib.base import ProxmoxAction
class ClusterCephFlagsFlagUpdateFlagAction(ProxmoxAction):
"""
Set or clear (unset) a specific ceph flag
"""
def run(self, flag, value, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["flag", flag, "string"],
["value", value, "boolean"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.put(f"cluster/ceph/flags/{flag}", **proxmox_kwargs)
|
py
|
1a57fdab5953dbe75c11aff05c01daf74004da6b
|
from pypy.interpreter.error import OperationError
from pypy.interpreter import module
from pypy.interpreter.mixedmodule import MixedModule
import pypy.module.imp.importing
# put builtins here that should be optimized somehow
class Module(MixedModule):
"""Built-in functions, exceptions, and other objects."""
appleveldefs = {
'execfile' : 'app_io.execfile',
'raw_input' : 'app_io.raw_input',
'input' : 'app_io.input',
'print' : 'app_io.print_',
'apply' : 'app_functional.apply',
'sorted' : 'app_functional.sorted',
'any' : 'app_functional.any',
'all' : 'app_functional.all',
'sum' : 'app_functional.sum',
'map' : 'app_functional.map',
'reduce' : 'app_functional.reduce',
'filter' : 'app_functional.filter',
'zip' : 'app_functional.zip',
'vars' : 'app_inspect.vars',
'dir' : 'app_inspect.dir',
'bin' : 'app_operation.bin',
}
interpleveldefs = {
# constants
'__debug__' : '(space.w_True)',
'None' : '(space.w_None)',
'False' : '(space.w_False)',
'True' : '(space.w_True)',
'bytes' : '(space.w_bytes)',
'file' : 'state.get(space).w_file',
'open' : 'state.get(space).w_file',
# default __metaclass__: old-style class
'__metaclass__' : 'interp_classobj.W_ClassObject',
# interp-level function definitions
'abs' : 'operation.abs',
'chr' : 'operation.chr',
'unichr' : 'operation.unichr',
'len' : 'operation.len',
'ord' : 'operation.ord',
'pow' : 'operation.pow',
'repr' : 'operation.repr',
'hash' : 'operation.hash',
'oct' : 'operation.oct',
'hex' : 'operation.hex',
'round' : 'operation.round',
'cmp' : 'operation.cmp',
'coerce' : 'operation.coerce',
'divmod' : 'operation.divmod',
'format' : 'operation.format',
'_issubtype' : 'operation._issubtype',
'issubclass' : 'abstractinst.app_issubclass',
'isinstance' : 'abstractinst.app_isinstance',
'getattr' : 'operation.getattr',
'setattr' : 'operation.setattr',
'delattr' : 'operation.delattr',
'hasattr' : 'operation.hasattr',
'iter' : 'operation.iter',
'next' : 'operation.next',
'id' : 'operation.id',
'intern' : 'operation.intern',
'callable' : 'operation.callable',
'compile' : 'compiling.compile',
'eval' : 'compiling.eval',
'__import__' : 'pypy.module.imp.importing.importhook',
'reload' : 'pypy.module.imp.importing.reload',
'range' : 'functional.range_int',
'xrange' : 'functional.W_XRange',
'enumerate' : 'functional.W_Enumerate',
'min' : 'functional.min',
'max' : 'functional.max',
'reversed' : 'functional.reversed',
'super' : 'descriptor.W_Super',
'staticmethod' : 'pypy.interpreter.function.StaticMethod',
'classmethod' : 'pypy.interpreter.function.ClassMethod',
'property' : 'descriptor.W_Property',
'globals' : 'interp_inspect.globals',
'locals' : 'interp_inspect.locals',
}
def pick_builtin(self, w_globals):
"Look up the builtin module to use from the __builtins__ global"
# pick the __builtins__ roughly in the same way CPython does it
# this is obscure and slow
space = self.space
try:
w_builtin = space.getitem(w_globals, space.newtext('__builtins__'))
except OperationError as e:
if not e.match(space, space.w_KeyError):
raise
else:
if w_builtin is space.builtin: # common case
return space.builtin
if space.isinstance_w(w_builtin, space.w_dict):
return module.Module(space, None, w_builtin)
if isinstance(w_builtin, module.Module):
return w_builtin
# no builtin! make a default one. Give them None, at least.
builtin = module.Module(space, None)
space.setitem(builtin.w_dict, space.newtext('None'), space.w_None)
return builtin
def setup_after_space_initialization(self):
"""NOT_RPYTHON"""
space = self.space
# install the more general version of isinstance() & co. in the space
from pypy.module.__builtin__ import abstractinst as ab
space.abstract_isinstance_w = ab.abstract_isinstance_w.__get__(space)
space.abstract_issubclass_w = ab.abstract_issubclass_w.__get__(space)
space.abstract_isclass_w = ab.abstract_isclass_w.__get__(space)
space.abstract_getclass = ab.abstract_getclass.__get__(space)
space.exception_is_valid_class_w = ab.exception_is_valid_class_w.__get__(space)
space.exception_is_valid_obj_as_class_w = ab.exception_is_valid_obj_as_class_w.__get__(space)
space.exception_getclass = ab.exception_getclass.__get__(space)
space.exception_issubclass_w = ab.exception_issubclass_w.__get__(space)
|
py
|
1a57ff2ab97fb14495e8b34972b20131ae84e47d
|
from mesh import QuadMesh, Mesh1D
from plot import Plot
from fem import QuadFE, DofHandler
from function import Explicit
import numpy as np
plot = Plot()
mesh = Mesh1D()
Q0 = QuadFE(1,'DQ0')
dh0 = DofHandler(mesh,Q0)
n_levels = 10
for l in range(n_levels):
mesh.cells.refine(new_label=l)
dh0.distribute_dofs(subforest_flag=l)
f = Explicit(lambda x: np.abs(x-0.5), dim=1)
fQ = f.interpolant(dh0, subforest_flag=3)
plot.line(fQ, mesh)
plot.mesh(mesh, dofhandler=dh0, subforest_flag=0)
mesh = QuadMesh(resolution=(10,10))
plot.mesh(mesh)
|
py
|
1a5800749beb48212e8760526bf50318a0ec4ea0
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
from requests import __version__ as requests_version
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
grpc_version=None,
rest_version=requests_version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class LicenseCodesTransport(abc.ABC):
"""Abstract transport class for LicenseCodes."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_timeout=None,
client_info=client_info,
),
}
@property
def get(
self,
) -> Callable[
[compute.GetLicenseCodeRequest],
Union[compute.LicenseCode, Awaitable[compute.LicenseCode]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsLicenseCodeRequest],
Union[
compute.TestPermissionsResponse, Awaitable[compute.TestPermissionsResponse]
],
]:
raise NotImplementedError()
__all__ = ("LicenseCodesTransport",)
|
py
|
1a580097646aa9be6b07d657b60f7d767f7b380f
|
from secrets import SITE_DOMAIN
SERVER_MODE = True
TMP_FOLDER = "/tmp"
TMP_FOLDER_PREFIX = "autopublisher_"
SITE_LOGIN_URL = f"http://{SITE_DOMAIN}/user"
SITE_FILEBROWSER_URL = f"http://{SITE_DOMAIN}/imce"
SITE_RASP_URL = f"http://{SITE_DOMAIN}/node/18/edit"
SITE_NEWS_URL = f"http://{SITE_DOMAIN}/node/add/news"
|
py
|
1a5801884c5782f8def86d854ebce85baefc4b8d
|
# Takes in the size of the board as parameters
class Board:
# Initialised the board object and calls setBoard
def __init__(self, x, y):
self.board = []
self.x = x
self.y = y
self.setBoard()
# Sets the size of the board based on the x and y
def setBoard(self):
for y in range(self.y):
self.board.append([])
for x in range(self.x):
self.board[y].append(None)
# Returns the size of the board
def getBoardSize(self):
return self.x, self.y
def getBoard(self):
return self.board
# Changes the size of the board
def changeDimensions(self, new_x, new_y):
self.board = []
self.x = new_x
self.y = new_y
for y in range(self.y):
self.board.append([])
for x in range(self.x):
self.board[y].append(None)
|
py
|
1a5801c718236d84035c0ddf26eeb9e5f8f5dd59
|
from snakeeyes.blueprints.bet.views import bet
|
py
|
1a580568aa4b4863b76bf7b5322b0277d181e435
|
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Unit test file for netaddr test plugin: reserved
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import unittest
from ansible_collections.ansible.utils.plugins.test.reserved import _reserved
class TestReserved(unittest.TestCase):
def setUp(self):
pass
def test_invalid_data(self):
"""Check passing invalid argspec"""
# missing argument
with self.assertRaises(TypeError) as error:
_reserved()
self.assertIn("argument", str(error.exception))
def test_valid_data(self):
"""Check passing valid data as per criteria"""
result = _reserved(ip="253.0.0.1")
self.assertEqual(result, True)
result = _reserved(ip="128.146.1.7")
self.assertEqual(result, False)
result = _reserved(ip="string")
self.assertEqual(result, False)
|
py
|
1a58057a2c91c62cc82bf851c1e8100ab8fdfd89
|
#!/usr/bin/env python3
# get acoount authorization detailsusing boto3 and aws-shell (aws-cli)
import boto3
import json
import sys
import os
import pprint
from color import color
# declare env var for aws-shell
AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
client = boto3.client('iam')
response = client.get_policy(
PolicyArn='arn:aws:iam::632660066814:role/admin-acces-anvarov'
)
print(response['Policy'])
|
py
|
1a5806129f5ebd2d78b5debbed287f5c5a819cb9
|
import json
class StandardVocabulary:
"""Class for the standard vocabulary"""
def __init__(self, json_content: list):
"""Initiliaze the class with the json tree content (from JSTree)
Args:
json_content (list): JSON from JSTree
"""
self.jstree_as_list = json_content
self.jstree_as_dict = {
i["id"]: {
"id": i["id"],
"text": i["text"],
"icon": i["icon"],
"data": i["data"],
"parent": i["parent"],
}
for i in self.jstree_as_list
}
def update_ontology(self, dest_onto: object) -> list:
"""Update the current standard vocabulary tree with the latest modification
(destination) of the tree (delete, add, update, check parents).
Args:
dest_onto (object): Another instance of the class StandardVocabulary
Returns:
list: return the updated tree as list of dict (json)
"""
updated_jstree_as_list = []
for i in self.jstree_as_list:
if i["id"] not in dest_onto.jstree_as_dict.keys():
# If destination is missing a node: mark the node as outdated
i["data"]["outdated"] = True
if "OUTDATED" not in i["text"]:
i["text"] = "OUTDATED : " + i["text"]
updated_jstree_as_list.append(i)
elif i["id"] in dest_onto.jstree_as_dict.keys():
if (
i["text"] != dest_onto.jstree_as_dict[i["id"]]["text"]
or i["data"] != dest_onto.jstree_as_dict[i["id"]]["data"]
):
# If destination has modified data or name: update
i["text"] = dest_onto.jstree_as_dict[i["id"]]["text"]
i["data"]["description"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("description", "")
i["data"]["hpo_datamined"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("hpo_datamined", "")
i["data"]["phenotype_datamined"] = dest_onto.jstree_as_dict[
i["id"]
]["data"].get("phenotype_datamined", "")
i["data"]["gene_datamined"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("gene_datamined", "")
i["data"]["alternative_language"] = dest_onto.jstree_as_dict[
i["id"]
]["data"].get("alternative_language", "")
i["data"]["correlates_with"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("correlates_with", "")
i["data"]["synonymes"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("synonymes", "")
i["data"]["hex_color"] = dest_onto.jstree_as_dict[i["id"]][
"data"
].get("hex_color", "")
updated_jstree_as_list.append(i)
# If destination has new entry: add them
for i in dest_onto.jstree_as_dict.keys():
if i not in self.jstree_as_dict.keys():
updated_jstree_as_list.append(dest_onto.jstree_as_dict[i])
self.jstree_as_dict = {
j["id"]: {
"id": j["id"],
"text": j["text"],
"icon": j["icon"],
"data": j["data"],
"parent": j["parent"],
}
for j in updated_jstree_as_list
}
# If destination has different parent ID: change it.
for i in dest_onto.jstree_as_dict.keys():
if (
dest_onto.jstree_as_dict[i]["parent"]
!= self.jstree_as_dict[i]["parent"]
):
self.jstree_as_dict[i]["parent"] = dest_onto.jstree_as_dict[i]["parent"]
self.jstree_as_list = list(self.jstree_as_dict.values())
self.clean_tree()
return self.jstree_as_list
def dump_updated_to_file(self, file_path: str):
"""Dump the updated tree to a json file
Args:
file_path (str): path to save the json file
"""
with open(file_path, "w") as fp:
json.dump(self.jstree_as_dict, fp, indent=4)
def clean_tree(self) -> list:
"""Clean the tree of non informative fields.
Returns:
list: return the updated tree as list of dict (json)
"""
clean_tree_list = []
for i in self.jstree_as_dict:
clean_tree_list.append(self.jstree_as_dict[i])
self.jstree_as_list = clean_tree_list
return self.jstree_as_list
|
py
|
1a5806d02b9d9e4e64db87cb3c910632b4e11d35
|
from typing import Dict, Any
import os
import sys
import glob
import json
import yaml
import time
import gzip
import random
import logging
import multiprocessing as mp
import queue
import threading
import ai2thor.controller
import ai2thor.util.metrics
from robothor_challenge.startx import startx
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(sys.stdout)
ch.flush = sys.stdout.flush
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
ALLOWED_ACTIONS = ["MoveAhead", "RotateRight", "RotateLeft", "LookUp", "LookDown", "Stop"]
def get_object_by_type(event_objects, object_type):
for obj in event_objects:
if obj['objectId'].split("|")[0] == object_type:
return obj
return None
class RobothorChallenge:
def __init__(self, cfg_file, agent_class, agent_kwargs, render_depth=False):
self.agent_class = agent_class
self.agent_kwargs = agent_kwargs
self.config = self.load_config(cfg_file, render_depth)
self.setup_env()
self.controller_kwargs = {
"commit_id": self.config["thor_build_id"],
"width": self.config["width"],
"height": self.config["height"],
**self.config["initialize"]
}
self.current_scene = None
self.reachable_positions_per_scene = {}
@staticmethod
def load_config(cfg_file, render_depth):
logger.info("Loading configuration from: %s" % cfg_file)
with open(cfg_file, "r") as f:
config = yaml.safe_load(f.read())
if render_depth:
config["initialize"]["renderDepthImage"] = True
return config
@staticmethod
def setup_env():
if "DISPLAY" not in os.environ:
xthread = threading.Thread(target=startx)
xthread.daemon = True
xthread.start()
import time
# XXX change this to use xdpyinfo
time.sleep(4)
@staticmethod
def load_split(dataset_dir, split):
split_paths = os.path.join(dataset_dir, split, "episodes", "*.json.gz")
split_paths = sorted(glob.glob(split_paths))
episode_list = []
dataset = {}
for split_path in split_paths:
logger.info("Loading: {path}".format(path=split_path))
with gzip.GzipFile(split_path, "r") as f:
episodes = json.loads(f.read().decode("utf-8"))
# Build a dictionary of the dataset indexed by scene, object_type
curr_scene = None
curr_object = None
points = []
scene_points = {}
for data_point in episodes:
if curr_object != data_point["object_type"]:
scene_points[curr_object] = points
curr_object = data_point["object_type"]
points = []
if curr_scene != data_point["scene"]:
dataset[curr_scene] = scene_points
curr_scene = data_point["scene"]
scene_points = {}
points.append(data_point)
episode_list += episodes
return episode_list, dataset
@staticmethod
def inference_worker(
worker_ind: int,
in_queue: mp.Queue,
out_queue: mp.Queue,
agent_class: Any,
agent_kwargs: Dict[str, Any],
controller_kwargs: Dict[str, Any],
max_steps: int,
test: bool
):
agent = agent_class(**agent_kwargs)
controller = ai2thor.controller.Controller(**controller_kwargs)
while True:
try:
e = in_queue.get(timeout=1)
except queue.Empty:
break
logger.info("Task Start id:{id} scene:{scene} target_object:{object_type} initial_position:{initial_position} rotation:{initial_orientation}".format(**e))
controller.initialization_parameters["robothorChallengeEpisodeId"] = e["id"]
print(e["scene"])
controller.reset(e["scene"])
teleport_action = {
"action": "TeleportFull",
**e["initial_position"],
"rotation": {"x": 0, "y": e["initial_orientation"], "z": 0},
"horizon": e["initial_horizon"],
"standing": True
}
controller.step(action=teleport_action)
total_steps = 0
agent.reset()
episode_metrics = {
"trajectory" : [{
**e["initial_position"],
"rotation" : float(e["initial_orientation"]),
"horizon" : e["initial_horizon"]
}],
"actions_taken" : []
}
stopped = False
while total_steps < max_steps and stopped is False:
total_steps += 1
event = controller.last_event
event.metadata.clear()
action = agent.act({
"object_goal" : e["object_type"],
"depth" : event.depth_frame,
"rgb" : event.frame
})
if action not in ALLOWED_ACTIONS:
raise ValueError("Invalid action: {action}".format(action=action))
logger.info("Agent action: {action}".format(action=action))
event = controller.step(action=action)
episode_metrics["trajectory"].append({
**event.metadata["agent"]["position"],
"rotation": event.metadata["agent"]["rotation"]["y"],
"horizon": event.metadata["agent"]["cameraHorizon"]
})
episode_metrics["actions_taken"].append({
"action": action,
"success": event.metadata["lastActionSuccess"]
})
stopped = action == "Stop"
if not test:
target_obj = get_object_by_type(event.metadata["objects"], e["object_type"])
assert target_obj is not None
target_visible = target_obj["visible"]
episode_metrics["success"] = stopped and target_visible
if not test:
episode_result = {
"path": episode_metrics["trajectory"],
"shortest_path": e["shortest_path"],
"success": episode_metrics["success"]
}
else:
episode_result = None
out_queue.put((e["id"], episode_metrics, episode_result))
controller.stop()
print(f"Worker {worker_ind} Finished.")
def inference(self, episodes, nprocesses=1, test=False):
send_queue = mp.Queue()
receive_queue = mp.Queue()
expected_count = len(episodes)
for e in episodes:
send_queue.put(e)
processes = []
for worker_ind in range(nprocesses):
p = mp.Process(
target=self.inference_worker,
kwargs=dict(
worker_ind=worker_ind,
in_queue=send_queue,
out_queue=receive_queue,
agent_class=self.agent_class,
agent_kwargs=self.agent_kwargs,
controller_kwargs=self.controller_kwargs,
max_steps=self.config["max_steps"],
test=test
),
)
p.start()
processes.append(p)
time.sleep(0.2)
metrics = {"episodes" : {}}
episode_results = []
while len(metrics["episodes"]) < expected_count:
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=10)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
print("Went 10 seconds without a new episode result.")
if all(not p.is_alive() for p in processes):
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=1)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
raise RuntimeError("All processes dead but nothing in queue!")
for p in processes:
p.join(timeout=2)
metrics["ep_len"] = sum([len(em["trajectory"]) for em in metrics["episodes"].values()]) / len(metrics["episodes"])
if not test:
metrics["success"] = sum([r["success"] for r in episode_results]) / len(episode_results)
metrics["spl"] = ai2thor.util.metrics.compute_spl(episode_results)
if not test:
logger.info("Total Episodes: {episode_count} Success:{success} SPL:{spl} Episode Length:{ep_len}".format(episode_count=len(episodes), success=metrics["success"], spl=metrics["spl"], ep_len=metrics["ep_len"]))
else:
logger.info("Total Episodes: {episode_count} Episode Length:{ep_len}".format(episode_count=len(episodes), ep_len=metrics["ep_len"]))
return metrics
def _change_scene(self, scene):
if self.current_scene != scene:
self.current_scene = scene
self.controller.reset(scene)
logger.info("Changed to scene: '{scene}'".format(scene=scene))
def move_to_point(self, datapoint):
self._change_scene(datapoint["scene"])
logger.info("Moving to position: {p}, y-rotation: {rot}, horizon: {hor}".format(
p=datapoint["initial_position"],
rot=datapoint["initial_orientation"],
hor=datapoint["initial_horizon"]
))
return self.controller.step(
action="TeleportFull",
x=datapoint["initial_position"]["x"],
y=datapoint["initial_position"]["y"],
z=datapoint["initial_position"]["z"],
rotation={"x" : 0, "y" : datapoint["initial_orientation"], "z" : 0},
horizon=datapoint["initial_horizon"],
standing=True
)
def move_to_random_dataset_point(self, dataset, scene, object_type):
if scene in dataset:
if object_type in dataset[scene]:
datapoint = random.choice(dataset[scene][object_type])
return self.move_to_point(datapoint)
else:
logger.warning(
"No object of type: '{object_type}' for scene: '{scene}', in dataset".format(
object_type=object_type,
scene=scene
)
)
return None
else:
logger.warning("No scene: '{scene}' in dataset".format(scene=scene))
return None
def move_to_random_point(self, scene, y_rotation=0, horizon=0):
if "test" in scene:
raise RuntimeError(
"Moving to random points is not posible in test scenes"
)
reachable_positions = self._get_reachable_positions_in_scene(scene)
p = random.choice(reachable_positions)
return self.move_to_point({
"initial_position": p,
"initial_orientation": y_rotation,
"initial_horizon": horizon,
"scene" : scene
})
def _get_reachable_positions_in_scene(self, scene):
self._change_scene(scene)
if scene not in self.reachable_positions_per_scene:
event_reachable = self.controller.step({
"action" : "GetReachablePositions",
"gridSize" : self.config["initialize"]["gridSize"]
})
self.reachable_positions_per_scene[scene] = event_reachable.metadata["actionReturn"]
return self.reachable_positions_per_scene[scene]
|
py
|
1a5807890d320f7f6a32d2c058ee632d65dd105f
|
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
import mock
from nose.tools import * # noqa
from framework.auth import Auth
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory
from .. import SHORT_NAME
from .. import settings
from .factories import make_binderhub
from .utils import BaseAddonTestCase
from website.util import api_url_for
from future.moves.urllib.parse import urlparse, parse_qs
class TestViews(BaseAddonTestCase, OsfTestCase):
def test_user_binderhubs(self):
new_binderhub_a = make_binderhub(
binderhub_url='https://testa.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_A',
)
url = self.project.api_url_for('{}_set_user_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binderhubs': [new_binderhub_a],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_user_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
binderhubs = res.json['binderhubs']
assert_equals(len(binderhubs), 1)
assert_equals(binderhubs[0]['binderhub_url'], 'https://testa.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[0])
new_binderhub_b = make_binderhub(
binderhub_url='https://testb.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_B',
)
url = self.project.api_url_for('{}_add_user_config'.format(SHORT_NAME))
res = self.app.post_json(url, {
'binderhub': new_binderhub_b,
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_user_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
binderhubs = res.json['binderhubs']
assert_equals(len(binderhubs), 2)
assert_equals(binderhubs[0]['binderhub_url'], 'https://testa.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[0])
assert_equals(binderhubs[1]['binderhub_url'], 'https://testb.my.site')
assert_in('binderhub_oauth_client_secret', binderhubs[1])
def test_binderhub_authorize(self):
url = self.project.api_url_for('{}_oauth_authorize'.format(SHORT_NAME),
serviceid='binderhub')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_302_FOUND)
url = res.headers['Location']
parsed = urlparse(url)
params = parse_qs(parsed.query)
assert_equal(params['response_type'][0], 'code')
assert_equal(params['scope'][0], 'identity')
assert_equal(urlparse(params['redirect_uri'][0]).path, '/project/binderhub/callback')
def test_empty_binder_url(self):
self.node_settings.set_binder_url('')
self.node_settings.save()
url = self.project.api_url_for('{}_get_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['binder_url'], settings.DEFAULT_BINDER_URL)
def test_binder_url(self):
self.node_settings.set_binder_url('URL_1')
self.node_settings.save()
url = self.project.api_url_for('{}_get_config'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['binder_url'], 'URL_1')
def test_ember_empty_binder_url(self):
url = self.project.api_url_for('{}_set_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binder_url': '',
'available_binderhubs': [],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_config_ember'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['data']['id'], self.project._id)
assert_equals(res.json['data']['type'], 'binderhub-config')
binderhubs = res.json['data']['attributes']['binderhubs']
default_binderhub = [b for b in binderhubs if b['default']][0]
assert_equals(default_binderhub['url'], settings.DEFAULT_BINDER_URL)
assert_not_in('binderhub_oauth_client_secret', default_binderhub)
def test_ember_custom_binder_url(self):
new_binderhub = make_binderhub(
binderhub_url='https://testa.my.site',
binderhub_oauth_client_secret='MY_CUSTOM_SECRET_A',
)
url = self.project.api_url_for('{}_set_config'.format(SHORT_NAME))
res = self.app.put_json(url, {
'binder_url': 'https://testa.my.site',
'available_binderhubs': [new_binderhub],
}, auth=self.user.auth)
url = self.project.api_url_for('{}_get_config_ember'.format(SHORT_NAME))
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.json['data']['id'], self.project._id)
assert_equals(res.json['data']['type'], 'binderhub-config')
binderhubs = res.json['data']['attributes']['binderhubs']
default_binderhub = [b for b in binderhubs if b['default']][0]
assert_equals(default_binderhub['url'], 'https://testa.my.site')
assert_not_in('binderhub_oauth_client_secret', default_binderhub)
|
py
|
1a5807abf8d39eb81aad0931d425f35a26031663
|
import math
import numpy as np
import torch
from envs.LQR import LQR
from utils import get_AB
torch.manual_seed(2021)
np.random.seed(2021)
learning_rate = 0.0003
gamma = 0.9
lmbda = 0.9
eps_clip = 0.2
K_epoch = 10
rollout_len = 3
buffer_size = 30
minibatch_size = 32
def PDcontrol(x, K):
u = K @ x
return u
def main(state_dim):
print_interval = 20
# create environment
# state_dim = 8
action_dim = 1
# A = np.array([[1.0]])
dt = 0.1
A, B = get_AB(state_dim, action_dim, dt)
sigma = 0.1
W = sigma * np.eye(state_dim)
# B = np.eye(2)
Q = np.eye(state_dim) * 10.0
R = np.eye(action_dim)
env = LQR(A, B, Q, R, W, state_dim)
P, K, op_cost, La = env.optimum()
# print('Optimal cost:{}; La: {}'.format(op_cost, La))
# print(f'P: {P};\n K : {K}')
return op_cost, La
# print(A + B @ K)
# print(np.linalg.eigvals(A + B @ K))
sample_num = 1000
avg_score = 0.0
for n_epi in range(sample_num):
s = env.reset(factor=2.0)
score = 0.0
for i in range(1000):
for t in range(rollout_len):
a = PDcontrol(s, K)
s_prime, r, done, info = env.step(a)
s = s_prime
score += r
score /= 1000 * rollout_len
avg_score += score
if n_epi % print_interval == 0 and n_epi != 0:
print("# of episode :{}, avg score : {:.1f}".
format(n_epi, avg_score / print_interval))
avg_score = 0.0
if __name__ == '__main__':
for x in range(1, 7):
op_cost, La = main(2 * x)
print(f'State dim: {2 * x}; optimal cost: {op_cost}; La: {La}')
|
py
|
1a580956b68b93ca1f22bb5d67d0c36ee363e5bd
|
from com.huawei.iotplatform.client.dto.CustomField import CustomField
from com.huawei.iotplatform.client.dto.DeviceInfoDTO import DeviceInfoDTO
from com.huawei.iotplatform.client.dto.DeviceInfoDTO2 import DeviceInfoDTO2
from com.huawei.iotplatform.client.dto.Location import Location
from com.huawei.iotplatform.client.dto.TagDTO2 import TagDTO2
class RegDirectDeviceInDTO(object):
customFields = CustomField
deviceInfo = DeviceInfoDTO
deviceInfo2 = DeviceInfoDTO2
location = Location
tags = TagDTO2
def __init__(self):
self.verifyCode = None
self.nodeId = None
self.endUserId = None
self.psk = None
self.timeout = None
self.deviceName = None
self.groupId = None
self.imsi = None
self.isSecure = None
self.productId = None
self.account = None
def getVerifyCode(self):
return self.verifyCode
def setVerifyCode(self, verifyCode):
self.verifyCode = verifyCode
def getNodeId(self):
return self.nodeId
def setNodeId(self, nodeId):
self.nodeId = nodeId
def getEndUserId(self):
return self.endUserId
def setEndUserId(self, endUserId):
self.endUserId = endUserId
def getPsk(self):
return self.psk
def setPsk(self, psk):
self.psk = psk
def getTimeout(self):
return self.timeout
def setTimeout(self, timeout):
self.timeout = timeout
def getDeviceName(self):
return self.deviceName
def setDeviceName(self, deviceName):
self.deviceName = deviceName
def getGroupId(self):
return self.groupId
def setGroupId(self, groupId):
self.groupId = groupId
def getImsi(self):
return self.imsi
def setImsi(self, imsi):
self.imsi = imsi
def getIsSecure(self):
return self.isSecure
def setIsSecure(self, isSecure):
self.isSecure = isSecure
def getProductId(self):
return self.productId
def setProductId(self, productId):
self.productId = productId
def getAccount(self):
return self.account
def setAccount(self, account):
self.account = account
def getCustomFields(self):
return self.customFields
def setCustomFields(self, customFields):
self.customFields = customFields
def getDeviceInfo(self):
return self.deviceInfo
def setDeviceInfo(self, deviceInfo):
self.deviceInfo = deviceInfo
def getDeviceInfo2(self):
return self.deviceInfo2
def setDeviceInfo2(self, deviceInfo2):
self.deviceInfo2 = deviceInfo2
def getLocation(self):
return self.location
def setLocation(self, location):
self.location = location
def getTags(self):
return self.tags
def setTags(self, tags):
self.tags = tags
|
py
|
1a580a09cb421ba248ac1f1ac2ef73f8e8450b52
|
from distutils.core import setup
from os import path
import site
site_dir = site.getsitepackages()[0]
with open('requirements.txt', 'r') as f:
requirements = list(map(str.strip, f))
if path.exists('README.md'):
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
else:
long_description = None
setup_kwargs = dict(
name='sk-torch',
version='0.1dev',
packages=['sktorch'],
provides=['sktorch'],
url='[email protected]:mattHawthorn/sk-torch.git',
license='MIT license',
author='Matt Hawthorn',
maintainer='Matt Hawthorn',
author_email='[email protected]',
description='A wrapper around pytorch module objects with a sklearn-like interface, allowing boilerplate-free '
'training of complex neural nets.',
long_description=long_description,
requires=requirements
)
if __name__ == "__main__":
try:
setup(**setup_kwargs)
except Exception as e:
print(e)
print("Failed to execute setup()")
exit(1)
exit(0)
|
py
|
1a580a113ee33592782577fbfe22d3d26e73c393
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def powerset(*sets):
return itertools.chain.from_iterable(
combinations(sets, r)
for r in range(len(sets)+1))
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
def run_interp_threaded(id, source, **shared):
def run():
_run(id, source, shared)
t = threading.Thread(target=run)
t.start()
t.join()
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
class RunStringTests(TestBase):
SCRIPT = dedent("""
with open('{}', 'w') as out:
out.write('{}')
""")
FILENAME = 'spam'
def setUp(self):
super().setUp()
self.id = interpreters.create()
self._fs = None
def tearDown(self):
if self._fs is not None:
self._fs.close()
super().tearDown()
@property
def fs(self):
if self._fs is None:
self._fs = FSFixture(self)
return self._fs
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
py
|
1a580a67cac9d9b946c8d83ffdd9f176f782d3dd
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test ICX Orderbook."""
from test_framework.test_framework import DefiTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal
from decimal import Decimal
class ICXOrderbookErrorTest (DefiTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-txnotokens=0', '-amkheight=50', '-bayfrontheight=50', '-eunosheight=50', '-eunospayaheight=50', '-txindex=1'],
['-txnotokens=0', '-amkheight=50', '-bayfrontheight=50', '-eunosheight=50', '-eunospayaheight=50', '-txindex=1']]
def run_test(self):
assert_equal(len(self.nodes[0].listtokens()), 1) # only one token == DFI
print("Generating initial chain...")
self.nodes[0].generate(25)
self.sync_blocks()
self.nodes[1].generate(101)
self.sync_blocks()
self.nodes[1].createtoken({
"symbol": "BTC",
"name": "BTC token",
"isDAT": True,
"collateralAddress": self.nodes[1].get_genesis_keys().ownerAuthAddress
})
self.nodes[1].generate(1)
self.sync_blocks()
symbolDFI = "DFI"
symbolBTC = "BTC"
self.nodes[1].minttokens("2@" + symbolBTC)
self.nodes[1].generate(1)
self.sync_blocks()
idDFI = list(self.nodes[0].gettoken(symbolDFI).keys())[0]
idBTC = list(self.nodes[0].gettoken(symbolBTC).keys())[0]
accountDFI = self.nodes[0].get_genesis_keys().ownerAuthAddress
accountBTC = self.nodes[1].get_genesis_keys().ownerAuthAddress
self.nodes[0].utxostoaccount({accountDFI: "101@" + symbolDFI})
self.nodes[0].generate(1)
self.sync_blocks()
# initialDFI = self.nodes[0].getaccount(accountDFI, {}, True)[idDFI]
# initialBTC = self.nodes[1].getaccount(accountBTC, {}, True)[idBTC]
# print("Initial DFI:", initialDFI, ", id", idDFI)
# print("Initial BTC:", initialBTC, ", id", idBTC)
poolOwner = self.nodes[0].getnewaddress("", "legacy")
# transfer DFI
self.nodes[1].accounttoaccount(accountBTC, {accountDFI: "1@" + symbolBTC})
self.nodes[1].generate(1)
self.sync_blocks()
# create pool
self.nodes[0].createpoolpair({
"tokenA": symbolDFI,
"tokenB": symbolBTC,
"commission": 1,
"status": True,
"ownerAddress": poolOwner,
"pairSymbol": "DFIBTC",
}, [])
self.nodes[0].generate(1)
# check tokens id
pool = self.nodes[0].getpoolpair("DFIBTC")
idDFIBTC = list(self.nodes[0].gettoken("DFIBTC").keys())[0]
assert(pool[idDFIBTC]['idTokenA'] == idDFI)
assert(pool[idDFIBTC]['idTokenB'] == idBTC)
# transfer
self.nodes[0].addpoolliquidity({
accountDFI: ["100@" + symbolDFI, "1@" + symbolBTC]
}, accountDFI, [])
self.nodes[0].generate(1)
self.nodes[0].setgov({"ICX_TAKERFEE_PER_BTC":Decimal('0.001')})
self.nodes[0].generate(1)
result = self.nodes[0].getgov("ICX_TAKERFEE_PER_BTC")
assert_equal(result["ICX_TAKERFEE_PER_BTC"], Decimal('0.001'))
# DFI/BTC scenario
# unexistant token for create order
try:
self.nodes[0].icx_createorder({
'tokenFrom': "DOGE",
'chainTo': "BTC",
'ownerAddress': accountDFI,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941',
'amountFrom': 1,
'orderPrice':0.01})
except JSONRPCException as e:
errorString = e.error['message']
assert("Token DOGE does not exist!" in errorString)
# wrong chain for create order
try:
self.nodes[0].icx_createorder({
'tokenFrom': idDFI,
'chainTo': "LTC",
'ownerAddress': accountDFI,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941',
'amountFrom': 1,
'orderPrice':0.01})
except JSONRPCException as e:
errorString = e.error['message']
assert("Invalid parameters, argument \"chainTo\" must be \"BTC\" if \"tokenFrom\" specified" in errorString)
# wrong address for DFI for create order
try:
self.nodes[0].icx_createorder({
'tokenFrom': idDFI,
'chainTo': "BTC",
'ownerAddress': accountBTC,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941',
'amountFrom': 1,
'orderPrice':0.01})
except JSONRPCException as e:
errorString = e.error['message']
assert("Address ("+accountBTC+") is not owned by the wallet" in errorString)
# invalid receivePubkey for create order
try:
self.nodes[0].icx_createorder({
'tokenFrom': idDFI,
'chainTo': "BTC",
'ownerAddress': accountDFI,
'receivePubkey': '000000000000000000000000000000000000000000000000000000000000000000',
'amountFrom': 1,
'orderPrice':0.01})
except JSONRPCException as e:
errorString = e.error['message']
assert("Invalid public key:" in errorString)
# not enough balance od DFI for create order
try:
self.nodes[0].icx_createorder({
'tokenFrom': idDFI,
'chainTo': "BTC",
'ownerAddress': accountDFI,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941',
'amountFrom': 1500,
'orderPrice':0.01})
except JSONRPCException as e:
errorString = e.error['message']
assert("Not enough balance for Token DFI on address "+accountDFI in errorString)
orderTx = self.nodes[0].icx_createorder({
'tokenFrom': idDFI,
'chainTo': "BTC",
'ownerAddress': accountDFI,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941',
'amountFrom': 1,
'orderPrice':0.01})["txid"]
self.nodes[0].generate(1)
self.sync_blocks()
# unexistent orderTx
try:
self.nodes[1].icx_makeoffer({
'orderTx': "76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d",
'amount': 0.10,
'ownerAddress': accountBTC})
except JSONRPCException as e:
errorString = e.error['message']
assert("orderTx (76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d) does not exist" in errorString)
# invalid ownerAddress
try:
self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 0.01,
'ownerAddress': accountDFI})
except JSONRPCException as e:
errorString = e.error['message']
assert("Address ("+accountDFI+") is not owned by the wallet" in errorString)
# not enough DFI for takerFee
try:
self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 0.01,
'ownerAddress': accountBTC})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount 0.00000000 is less than 0.00100000" in errorString)
self.nodes[1].utxostoaccount({accountBTC: "0.001@" + symbolDFI})
self.nodes[1].generate(1)
offerTx = self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 0.01,
'ownerAddress': accountBTC})["txid"]
self.nodes[1].generate(1)
self.sync_blocks()
# ext htlc cannot be first htlc
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 24})
except JSONRPCException as e:
errorString = e.error['message']
assert("offer ("+ offerTx + ") needs to have dfc htlc submitted first, but no dfc htlc found!" in errorString)
# not enough DFI for takerFee
try:
self.nodes[0].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount 0.00000000 is less than 0.00100000" in errorString)
self.nodes[0].utxostoaccount({accountDFI: "0.001@" + symbolDFI})
self.nodes[0].generate(1)
# wrong amount
try:
self.nodes[0].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 2,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be lower or equal the offer one" in errorString)
# timeout less than minimum
try:
self.nodes[0].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'timeout': 1439})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be greater than 1439" in errorString)
dfchtlcTx = self.nodes[0].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})["txid"]
self.nodes[0].generate(1)
self.sync_blocks()
# dfc htlc already submitted
try:
self.nodes[0].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("dfc htlc already submitted" in errorString)
# less amount
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.001,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 24})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be equal to calculated dfchtlc amount" in errorString)
# more amount
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 24})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be equal to calculated dfchtlc amount" in errorString)
# different hash
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0000000029fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 24})
except JSONRPCException as e:
errorString = e.error['message']
assert("Invalid hash, external htlc hash is different than dfc htlc hash" in errorString)
# timeout less than minimum
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 23})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be greater than 23" in errorString)
# timeout more than expiration of dfc htlc
try:
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 73})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be less than expiration period of 1st htlc in DFC blocks" in errorString)
self.nodes[1].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 24})["txid"]
self.nodes[1].generate(1)
self.sync_blocks()
# wrong dfchtlcTx
try:
self.nodes[1].icx_claimdfchtlc({
'dfchtlcTx': "76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d",
'seed': 'f75a61ad8f7a6e0ab701d5be1f5d4523a9b534571e4e92e0c4610c6a6784ccef'})
except JSONRPCException as e:
errorString = e.error['message']
assert("dfc htlc with creation tx 76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d does not exists" in errorString)
# wrong seed
try:
self.nodes[1].icx_claimdfchtlc({
'dfchtlcTx': dfchtlcTx,
'seed': 'f75a61ad8f7a6e0ab7000000000d4523a9b534571e4e92e0c4610c6a6784ccef'})
except JSONRPCException as e:
errorString = e.error['message']
assert("hash generated from given seed is different than in dfc htlc" in errorString)
self.nodes[1].icx_claimdfchtlc({
'dfchtlcTx': dfchtlcTx,
'seed': 'f75a61ad8f7a6e0ab701d5be1f5d4523a9b534571e4e92e0c4610c6a6784ccef'})["txid"]
self.nodes[1].generate(1)
self.sync_blocks()
# Make sure offer and order are now closed
offer = self.nodes[0].icx_listorders({"orderTx": orderTx})
assert_equal(len(offer), 1)
order = self.nodes[0].icx_listorders()
assert_equal(len(order), 1)
order = self.nodes[0].icx_listorders({"closed": True})
assert_equal(len(order), 2)
assert_equal(order[orderTx]["status"], 'FILLED')
# BTC/DFI scenario
# unexistant token for create order
try:
self.nodes[0].icx_createorder({
'chainFrom': "BTC",
'tokenTo': "DOGE",
'ownerAddress': accountDFI,
'amountFrom': 0.01,
'orderPrice': 100})
except JSONRPCException as e:
errorString = e.error['message']
assert("Token DOGE does not exist!" in errorString)
# wrong chain for create order
try:
self.nodes[0].icx_createorder({
'chainFrom': "LTC",
'tokenTo': idDFI,
'ownerAddress': accountDFI,
'amountFrom': 0.01,
'orderPrice': 100})
except JSONRPCException as e:
errorString = e.error['message']
assert("Invalid parameters, argument \"chainFrom\" must be \"BTC\" if \"tokenTo\" specified" in errorString)
# wrong address for DFI for create order
try:
self.nodes[0].icx_createorder({
'chainFrom': "BTC",
'tokenTo': idDFI,
'ownerAddress': accountBTC,
'amountFrom': 0.01,
'orderPrice': 100})
except JSONRPCException as e:
errorString = e.error['message']
assert("Address ("+accountBTC+") is not owned by the wallet" in errorString)
orderTx = self.nodes[0].icx_createorder({
'chainFrom': "BTC",
'tokenTo': idDFI,
'ownerAddress': accountDFI,
'amountFrom': 0.01,
'orderPrice': 100})["txid"]
self.nodes[0].generate(1)
self.sync_blocks()
# unexistent orderTx
try:
self.nodes[1].icx_makeoffer({
'orderTx': "76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d",
'amount': 1,
'ownerAddress': accountBTC,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941'})
except JSONRPCException as e:
errorString = e.error['message']
assert("orderTx (76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d) does not exist" in errorString)
# invalid ownerAddress
try:
self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 1,
'ownerAddress': accountDFI,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941'})
except JSONRPCException as e:
errorString = e.error['message']
assert("Address ("+accountDFI+") is not owned by the wallet" in errorString)
# invalid receivePublikey
try:
self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 1,
'ownerAddress': accountBTC,
'receivePubkey': '000000000000000000000000000000000000000000000000000000000000000000'})
except JSONRPCException as e:
errorString = e.error['message']
assert("Invalid public key" in errorString)
offerTx = self.nodes[1].icx_makeoffer({
'orderTx': orderTx,
'amount': 1,
'ownerAddress': accountBTC,
'receivePubkey': '037f9563f30c609b19fd435a19b8bde7d6db703012ba1aba72e9f42a87366d1941'})["txid"]
self.nodes[1].utxostoaccount({accountBTC: "1@" + symbolDFI})
self.nodes[1].generate(1)
self.sync_blocks()
# dfc htlc cannot be first htlc
try:
self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("offer ("+ offerTx + ") needs to have ext htlc submitted first, but no external htlc found" in errorString)
# more amount
try:
self.nodes[0].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 72})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be lower or equal the offer one" in errorString)
# timeout less than minimum
try:
self.nodes[0].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 71})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be greater than 71" in errorString)
self.nodes[0].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 72})["txid"]
self.nodes[0].generate(1)
self.sync_blocks()
# ext htlc already submitted
try:
self.nodes[0].icx_submitexthtlc({
'offerTx': offerTx,
'amount': 0.01,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'htlcScriptAddress': '13sJQ9wBWh8ssihHUgAaCmNWJbBAG5Hr9N',
'ownerPubkey': '036494e7c9467c8c7ff3bf29e841907fb0fa24241866569944ea422479ec0e6252',
'timeout': 72})
except JSONRPCException as e:
errorString = e.error['message']
assert("ext htlc already submitted" in errorString)
# less amount
try:
self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 0.5,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be equal to calculated exthtlc amount" in errorString)
# more amount
try:
self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 2,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})
except JSONRPCException as e:
errorString = e.error['message']
assert("amount must be equal to calculated exthtlc amount" in errorString)
# timeout less than minimum
try:
self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'timeout': 479})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be greater than 479" in errorString)
# timeout more than expiration of ext htlc
try:
self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220',
'timeout': 1441})
except JSONRPCException as e:
errorString = e.error['message']
assert("timeout must be less than expiration period of 1st htlc in DFI blocks" in errorString)
dfchtlcTx = self.nodes[1].icx_submitdfchtlc({
'offerTx': offerTx,
'amount': 1,
'hash': '957fc0fd643f605b2938e0631a61529fd70bd35b2162a21d978c41e5241a5220'})["txid"]
self.nodes[1].generate(1)
self.sync_blocks()
# wrong dfchtlcTx
try:
self.nodes[0].icx_claimdfchtlc({
'dfchtlcTx': "76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d",
'seed': 'f75a61ad8f7a6e0ab701d5be1f5d4523a9b534571e4e92e0c4610c6a6784ccef'})
except JSONRPCException as e:
errorString = e.error['message']
assert("dfc htlc with creation tx 76432beb2a667efe4858b4e1ec93979b621c51c76abaab2434892655dd152e3d does not exists" in errorString)
# wrong seed
try:
self.nodes[0].icx_claimdfchtlc({
'dfchtlcTx': dfchtlcTx,
'seed': 'f75a61ad8f7a6e0ab7000000000d4523a9b534571e4e92e0c4610c6a6784ccef'})
except JSONRPCException as e:
errorString = e.error['message']
assert("hash generated from given seed is different than in dfc htlc" in errorString)
self.nodes[0].icx_claimdfchtlc({
'dfchtlcTx': dfchtlcTx,
'seed': 'f75a61ad8f7a6e0ab701d5be1f5d4523a9b534571e4e92e0c4610c6a6784ccef'})["txid"]
self.nodes[0].generate(1)
self.sync_blocks()
# Make sure offer and order are now closed
offer = self.nodes[0].icx_listorders({"orderTx": orderTx})
assert_equal(len(offer), 1)
order = self.nodes[0].icx_listorders()
assert_equal(len(order), 1)
order = self.nodes[0].icx_listorders({"closed": True})
assert_equal(len(order), 3)
assert_equal(order[orderTx]["status"], 'FILLED')
if __name__ == '__main__':
ICXOrderbookErrorTest().main()
|
py
|
1a580a747143289145f1b3b8497fcf05a67d079a
|
# Generated by Django 3.1.7 on 2021-03-21 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20210321_1830'),
]
operations = [
migrations.AlterField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.category'),
),
migrations.AlterField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.category'),
),
]
|
py
|
1a580bc1b9685b1367d55bdf266fb2d35a7f79b3
|
import sys
from . import data_prep_utils
if sys.version < '3' :
from backports import csv
else:
import csv
def autoLabel(raw_strings, module, type):
return set([tuple(module.parse(raw_sequence.strip(), type=type)) for i, raw_sequence in enumerate(set(raw_strings), 1)])
def label(module, infile, outfile, xml, type=None):
training_data = data_prep_utils.TrainingData(xml, module)
reader = csv.reader(infile)
strings = set(row[0] for row in reader if len(row) > 0)
if type is None:
tagger = module.TAGGER
else:
tagger = module.TAGGERS[type] or module.TAGGER
if tagger:
labeled_list = autoLabel(strings, module, type)
else:
raise Exception("Tagger is not defined in %s" % module.__name__)
training_data.extend(labeled_list)
with open(outfile, 'wb'):
training_data.write(outfile)
print("Training data successfully created and stored in stored in %s" % outfile)
|
py
|
1a580c74c40f2f4efd6dbc8e3cb44f8e210a7bd8
|
import clodius.tiles.format as hgfo
import pandas as pd
import numpy as np
import pandas as pd
import h5py
def csv_to_points(csv_file, output_file):
'''
Convert a csv file containing points to a numpy array
of [[x,y]] values.
Parameters:
-----------
csv_file: string
The filename of the data file
'''
df = pd.read_table(csv_file, delimiter=',')
min_x = df['x'].min()
max_x = df['x'].max()
min_y = df['y'].min()
max_y = df['y'].max()
width = max_x - min_x
height = max_y - min_y
max_width = max(width, height)
# print("max_width:", max_width, min_x, min_y, max_x, max_y)
max_zoom = 30
with h5py.File(output_file, 'w') as f_out:
dataset = f_out.create_dataset('values', (len(df), 2), compression='gzip', dtype=np.float32)
dataset[:] = df.reindex(columns=['x','y']).as_matrix()
dataset.attrs['min_x'] = min_x
dataset.attrs['max_x'] = max_x
dataset.attrs['min_y'] = min_y
dataset.attrs['max_y'] = max_y
dataset.attrs['max_zoom'] = max_zoom
dataset.attrs['max_width'] = max_width
info = {
'min_pos': [min_x, min_y],
'max_pos': [min_y, max_y],
'max_zoom': max_zoom,
'max_width': max_width
}
return df.reindex(columns=['x', 'y'])
def tileset_info(points_file):
'''
Calculate the extent, etc...
'''
with h5py.File(points_file, 'r') as f_in:
dset = f_in['values']
return {
'min_pos': [float(dset.attrs['min_x']), float(dset.attrs['min_y'])],
'max_pos': [float(dset.attrs['max_y']), float(dset.attrs['max_y'])],
'max_width': float(dset.attrs['max_width']),
'max_zoom': int(dset.attrs['max_zoom']),
'mirror_tiles': 'false'
}
def tile_bounds(points_file, z, x, y, width=1, height=1):
'''
Get the boundaries of a tile
Parameters:
-----------
tileset_info: { min_pos, max_pos, max_width}
Information about the bounds of this tileset
'''
tsinfo = tileset_info(points_file)
tile_width = tsinfo['max_width'] / 2 ** z
x_start = tsinfo['min_pos'][0] + tile_width * x
x_end = tsinfo['min_pos'][0] + tile_width * (x+width)
y_start = tsinfo['min_pos'][1] + tile_width * y
y_end = tsinfo['min_pos'][1] + tile_width * (y+width)
return (x_start, x_end, y_start, y_end)
def filter_points(data, extent):
'''
Filter points that are within the extent
Parameters:
-----------
data: [[]]
A 2D numpy array containing x,y values
extent: [x_start, x_end, y_start, y_end]
The region we want to return points within
Returns
-------
data: [[]]
A 2D numpy array containing x,y values
'''
# print("extent:", extent)
# print("data.shape", data.shape, data[:,0])
data = data[data[:,0] > extent[0]]
data = data[data[:,0] < extent[1]]
data = data[data[:,1] > extent[2]]
data = data[data[:,1] < extent[3]]
return data
def density_tiles(points_file, z, x, y, width=1, height=1):
'''
Get a 2D histogram of the given region. If the height and
width are specified, then we need to partition this into
multiple returned tiles.
'''
returns = []
with h5py.File(points_file, 'r') as f:
# get all the points in the region
all_points = filter_points(f['values'][:],
tile_bounds(points_file, z, x, y,
width, height))
for i in range(width):
for j in range(height):
# filter from the larger subregion
filtered_points = filter_points(all_points,
tile_bounds(points_file, z, x+i, y+j))
dt = np.histogram2d(filtered_points[:,0],
filtered_points[:,1], bins=256)[0].T
dt[dt == 0.] = np.nan
returns += [((z, x+i, y+j), dt)]
return returns
def tiles(points_file, z, x, y, width=1, height=1):
return [(tile_position, hgfo.format_dense_tile(data.flatten())) for
(tile_position, data) in density_tiles(points_file, z, x, y, width, height)]
|
py
|
1a580cf84493b3efbbd9edd3afa3217d80118aa2
|
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstractions for models used in federated learning."""
import abc
import collections
class BatchOutput(
collections.namedtuple('BatchOutput',
['loss', 'predictions', 'num_examples'])):
"""A structure that holds the output of a `tff.learning.Model`.
Note: All fields are optional (may be None).
- `loss`: The scalar mean loss on the examples in the batch. If the model
has multiple losses, it is the sum of all the individual losses.
- `predictions`: Tensor of predictions on the examples. The first dimension
must be the same size (the size of the batch).
- `num_examples`: Number of examples seen in the batch.
"""
__slots__ = ()
class Model(object, metaclass=abc.ABCMeta):
"""Represents a model for use in TensorFlow Federated.
Each `Model` will work on a set of `tf.Variables`, and each method should be
a computation that can be implemented as a `tf.function`; this implies the
class should essentially be stateless from a Python perspective, as each
method will generally only be traced once (per set of arguments) to create the
corresponding TensorFlow graph functions. Thus, `Model` instances should
behave as expected in both eager and graph (TF 1.0) usage.
In general, `tf.Variables` may be either:
* Weights, the variables needed to make predictions with the model.
* Local variables, e.g. to accumulate aggregated metrics across
calls to forward_pass.
The weights can be broken down into trainable variables (variables
that can and should be trained using gradient-based methods), and
non-trainable variables (which could include fixed pre-trained layers,
or static model data). These variables are provided via the
`trainable_variables`, `non_trainable_variables`, and `local_variables`
properties, and must be initialized by the user of the `Model`.
In federated learning, model weights will generally be provided by the
server, and updates to trainable model variables will be sent back to the
server. Local variables are not transmitted, and are instead initialized
locally on the device, and then used to produce `aggregated_outputs` which
are sent to the server.
All `tf.Variables` should be introduced in `__init__`; this could move to a
`build` method more inline with Keras (see
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) in
the future.
"""
@abc.abstractproperty
def trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def non_trainable_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_variables(self):
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def input_spec(self):
"""The type specification of the `batch_input` parameter for `forward_pass`.
A nested structure of `tf.TensorSpec` objects, that matches the structure of
arguments that will be passed as the `batch_input` argument of
`forward_pass`. The tensors must include a batch dimension as the first
dimension, but the batch dimension may be undefined.
Similar in spirit to `tf.keras.models.Model.input_spec`.
"""
pass
@abc.abstractmethod
def forward_pass(self, batch_input, training=True):
"""Runs the forward pass and returns results.
This method should not modify any variables that are part of the model, that
is, variables that influence the predictions; for that, see
`TrainableModel.train_on_batch`.
However, this method may update aggregated metrics computed across calls to
forward_pass; the final values of such metrics can be accessed via
`aggregated_outputs`.
Uses in TFF:
* To implement model evaluation.
* To implement federated gradient descent and other
non-Federated-Averaging algorithms, where we want the model to run the
forward pass and update metrics, but there is no optimizer
(we might only compute gradients on the returned loss).
* To implement Federated Averaging, when augmented as a `TrainableModel`.
Args:
batch_input: a nested structure that matches the structure of
`Model.input_spec` and each tensor in `batch_input` satisfies
`tf.TensorSpec.is_compatible_with()` for the corresponding
`tf.TensorSpec` in `Model.input_spec`.
training: If `True`, run the training forward pass, otherwise, run in
evaluation mode. The semantics are generally the same as the `training`
argument to `keras.Model.__call__`; this might e.g. influence how
dropout or batch normalization is handled.
Returns:
A `BatchOutput` object. The object must include the `loss` tensor if the
model will be trained via a gradient-based algorithm.
"""
pass
@abc.abstractmethod
def report_local_outputs(self):
"""Returns tensors representing values aggregated over `forward_pass` calls.
In federated learning, the values returned by this method will typically
be further aggregated across clients and made available on the server.
This method returns results from aggregating across *all* previous calls
to `forward_pass`, most typically metrics like accuracy and loss. If needed,
we may add a `clear_aggregated_outputs` method, which would likely just
run the initializers on the `local_variables`.
In general, the tensors returned can be an arbitrary function of all
the `tf.Variables` of this model, not just the `local_variables`; for
example, this could return tensors measuring the total L2 norm of the model
(which might have been updated by training).
This method may return arbitrarily shaped tensors, not just scalar metrics.
For example, it could return the average feature vector or a count of
how many times each feature exceed a certain magnitude.
Returns:
A structure of tensors (as supported by `tf.nest`)
to be aggregated across clients.
"""
pass
@abc.abstractproperty
def federated_output_computation(self):
"""Performs federated aggregation of the `Model's` `local_outputs`.
This is typically used to aggregate metrics across many clients, e.g. the
body of the computation might be:
```python
return {
'num_examples': tff.federated_sum(local_outputs.num_examples),
'loss': tff.federated_mean(local_outputs.loss)
}
```
N.B. It is assumed all TensorFlow computation happens in the
`report_local_outputs` method, and this method only uses TFF constructs to
specify aggregations across clients.
Returns:
Either a `tff.Computation`, or None if no federated aggregation is needed.
The `tff.Computation` should take as its single input a
`tff.CLIENTS`-placed `tff.Value` corresponding to the return value of
`Model.report_local_outputs`, and return an `OrderedDict` (possibly
nested) of `tff.SERVER`-placed values. The consumer of this
method should generally provide these server-placed values as outputs of
the overall computation consuming the model. Using an `OrderedDict`
allows the value returned by TFF executor to be converted back to an
`OrderedDict` via the `._asdict(recursive=True)` member function.
"""
pass
class TrainableModel(Model, metaclass=abc.ABCMeta):
"""A Model with an additional method for (local) training.
This class is primarily intended to be used in the implementation of
Federated Averaging.
"""
@abc.abstractmethod
def train_on_batch(self, batch_input):
"""Like `forward_pass`, but updates the model variables.
Typically this will invoke `forward_pass`, with any corresponding
side-effects such as updating metrics.
Args:
batch_input: The current batch, as for `forward_pass`.
Returns:
The same `BatchOutput` as `forward_pass`.
"""
pass
|
py
|
1a580e0bc6b6d7c908f2c9a0029b3715b8b7d6f9
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 2014/07/05 09:42:21 garyo"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py
|
1a580e27ea527b1626c3d2c76b37a02e22b908b1
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime, timedelta
import hashlib
import os
import random
import sys
import tempfile
import time
from glob import glob
from py4j.protocol import Py4JJavaError
from pyspark import shuffle, RDD
from pyspark.resource import ExecutorResourceRequests, ResourceProfile, ResourceProfileBuilder,\
TaskResourceRequests
from pyspark.serializers import CloudPickleSerializer, BatchedSerializer, PickleSerializer,\
MarshalSerializer, UTF8Deserializer, NoOpSerializer
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME, QuietTest
global_func = lambda: "Hi"
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it2))
def test_to_localiterator_prefetch(self):
# Test that we fetch the next partition in parallel
# We do this by returning the current time and:
# reading the first elem, waiting, and reading the second elem
# If not in parallel then these would be at different times
# But since they are being computed in parallel we see the time
# is "close enough" to the same.
rdd = self.sc.parallelize(range(2), 2)
times1 = rdd.map(lambda x: datetime.now())
times2 = rdd.map(lambda x: datetime.now())
times_iter_prefetch = times1.toLocalIterator(prefetchPartitions=True)
times_iter = times2.toLocalIterator(prefetchPartitions=False)
times_prefetch_head = next(times_iter_prefetch)
times_head = next(times_iter)
time.sleep(2)
times_next = next(times_iter)
times_prefetch_next = next(times_iter_prefetch)
self.assertTrue(times_next - times_head >= timedelta(seconds=2))
self.assertTrue(times_prefetch_next - times_prefetch_head < timedelta(seconds=1))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_union_pair_rdd(self):
# SPARK-31788: test if pair RDDs can be combined by union.
rdd = self.sc.parallelize([1, 2])
pair_rdd = rdd.zip(rdd)
unionRDD = self.sc.union([pair_rdd, pair_rdd])
self.assertEqual(
set(unionRDD.collect()),
set([(1, 1), (2, 2), (1, 1), (2, 2)])
)
self.assertEqual(unionRDD.count(), 4)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(range(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy(blocking=True)
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in range(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(range(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(range(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 2000001
kv = self.sc.parallelize(range(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(range(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('java').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('java', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
def test_overwritten_global_func(self):
# Regression test for SPARK-27000
global global_func
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Hi")
global_func = lambda: "Yeah"
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Yeah")
def test_to_local_iterator_failure(self):
# SPARK-27548 toLocalIterator task failure not propagated to Python driver
def fail(_):
raise RuntimeError("local iterator error")
rdd = self.sc.range(10).map(fail)
with self.assertRaisesRegexp(Exception, "local iterator error"):
for _ in rdd.toLocalIterator():
pass
def test_to_local_iterator_collects_single_partition(self):
# Test that partitions are not computed until requested by iteration
def fail_last(x):
if x == 9:
raise RuntimeError("This should not be hit")
return x
rdd = self.sc.range(12, numSlices=4).map(fail_last)
it = rdd.toLocalIterator()
# Only consume first 4 elements from partitions 1 and 2, this should not collect the last
# partition which would trigger the error
for i in range(4):
self.assertEqual(i, next(it))
def test_resourceprofile(self):
rp_builder = ResourceProfileBuilder()
ereqs = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g")
ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com")
treqs = TaskResourceRequests().cpus(2).resource("gpu", 2)
def assert_request_contents(exec_reqs, task_reqs):
self.assertEqual(len(exec_reqs), 5)
self.assertEqual(exec_reqs["cores"].amount, 2)
self.assertEqual(exec_reqs["memory"].amount, 6144)
self.assertEqual(exec_reqs["memoryOverhead"].amount, 1024)
self.assertEqual(exec_reqs["pyspark.memory"].amount, 2048)
self.assertEqual(exec_reqs["gpu"].amount, 2)
self.assertEqual(exec_reqs["gpu"].discoveryScript, "testGpus")
self.assertEqual(exec_reqs["gpu"].resourceName, "gpu")
self.assertEqual(exec_reqs["gpu"].vendor, "nvidia.com")
self.assertEqual(len(task_reqs), 2)
self.assertEqual(task_reqs["cpus"].amount, 2.0)
self.assertEqual(task_reqs["gpu"].amount, 2.0)
assert_request_contents(ereqs.requests, treqs.requests)
rp = rp_builder.require(ereqs).require(treqs).build
assert_request_contents(rp.executorResources, rp.taskResources)
rdd = self.sc.parallelize(range(10)).withResources(rp)
return_rp = rdd.getResourceProfile()
assert_request_contents(return_rp.executorResources, return_rp.taskResources)
rddWithoutRp = self.sc.parallelize(range(10))
self.assertEqual(rddWithoutRp.getResourceProfile(), None)
def test_multiple_group_jobs(self):
import threading
group_a = "job_ids_to_cancel"
group_b = "job_ids_to_run"
threads = []
thread_ids = range(4)
thread_ids_to_cancel = [i for i in thread_ids if i % 2 == 0]
thread_ids_to_run = [i for i in thread_ids if i % 2 != 0]
# A list which records whether job is cancelled.
# The index of the array is the thread index which job run in.
is_job_cancelled = [False for _ in thread_ids]
def run_job(job_group, index):
"""
Executes a job with the group ``job_group``. Each job waits for 3 seconds
and then exits.
"""
try:
self.sc.parallelize([15]).map(lambda x: time.sleep(x)) \
.collectWithJobGroup(job_group, "test rdd collect with setting job group")
is_job_cancelled[index] = False
except Exception:
# Assume that exception means job cancellation.
is_job_cancelled[index] = True
# Test if job succeeded when not cancelled.
run_job(group_a, 0)
self.assertFalse(is_job_cancelled[0])
# Run jobs
for i in thread_ids_to_cancel:
t = threading.Thread(target=run_job, args=(group_a, i))
t.start()
threads.append(t)
for i in thread_ids_to_run:
t = threading.Thread(target=run_job, args=(group_b, i))
t.start()
threads.append(t)
# Wait to make sure all jobs are executed.
time.sleep(3)
# And then, cancel one job group.
self.sc.cancelJobGroup(group_a)
# Wait until all threads launching jobs are finished.
for t in threads:
t.join()
for i in thread_ids_to_cancel:
self.assertTrue(
is_job_cancelled[i],
"Thread {i}: Job in group A was not cancelled.".format(i=i))
for i in thread_ids_to_run:
self.assertFalse(
is_job_cancelled[i],
"Thread {i}: Job in group B did not succeeded.".format(i=i))
if __name__ == "__main__":
import unittest
from pyspark.tests.test_rdd import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py
|
1a580f5f19497c2d80427c99fc43f3c6ce0cdd12
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.fixture_data import agents as data
from novaclient.tests.fixture_data import client
from novaclient.tests import utils
from novaclient.v1_1 import agents
class AgentsTest(utils.FixturedTestCase):
data_fixture_class = data.Fixture
scenarios = [('original', {'client_fixture_class': client.V1}),
('session', {'client_fixture_class': client.SessionV1})]
def stub_hypervisors(self, hypervisor='kvm'):
get_os_agents = {'agents':
[
{
'hypervisor': hypervisor,
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1
},
{
'hypervisor': hypervisor,
'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2
},
]
}
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.data_fixture.url(),
json=get_os_agents,
headers=headers)
def test_list_agents(self):
self.stub_hypervisors()
ags = self.cs.agents.list()
self.assert_called('GET', '/os-agents')
for a in ags:
self.assertIsInstance(a, agents.Agent)
self.assertEqual('kvm', a.hypervisor)
def test_list_agents_with_hypervisor(self):
self.stub_hypervisors('xen')
ags = self.cs.agents.list('xen')
self.assert_called('GET', '/os-agents?hypervisor=xen')
for a in ags:
self.assertIsInstance(a, agents.Agent)
self.assertEqual('xen', a.hypervisor)
def test_agents_create(self):
ag = self.cs.agents.create('win', 'x86', '7.0',
'/xxx/xxx/xxx',
'add6bb58e139be103324d04d82d8f546',
'xen')
body = {'agent': {
'url': '/xxx/xxx/xxx',
'hypervisor': 'xen',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'version': '7.0',
'architecture': 'x86',
'os': 'win'}}
self.assert_called('POST', '/os-agents', body)
self.assertEqual(1, ag._info.copy()['id'])
def test_agents_delete(self):
self.cs.agents.delete('1')
self.assert_called('DELETE', '/os-agents/1')
def _build_example_update_body(self):
return {"para": {
"url": "/yyy/yyyy/yyyy",
"version": "8.0",
"md5hash": "add6bb58e139be103324d04d82d8f546"}}
def test_agents_modify(self):
ag = self.cs.agents.update('1', '8.0',
'/yyy/yyyy/yyyy',
'add6bb58e139be103324d04d82d8f546')
body = self._build_example_update_body()
self.assert_called('PUT', '/os-agents/1', body)
self.assertEqual(1, ag.id)
|
py
|
1a580ff37f380dd8825fd1e9a9e0fe760336b74d
|
# MicroPython ST7735 TFT display driver
from machine import Pin
from machine import SPI
import font
import time
class CMD_TFT(object):
# command definitions
CMD_NOP = const(0x00) # No Operation
CMD_SWRESET = const(0x01) # Software reset
CMD_RDDID = const(0x04) # Read Display ID
CMD_RDDST = const(0x09) # Read Display Status
CMD_SLPIN = const(0x10) # Sleep in & booster off
CMD_SLPOUT = const(0x11) # Sleep out & booster on
CMD_PTLON = const(0x12) # Partial mode on
CMD_NORON = const(0x13) # Partial off (Normal)
CMD_INVOFF = const(0x20) # Display inversion off
CMD_INVON = const(0x21) # Display inversion on
CMD_DISPOFF = const(0x28) # Display off
CMD_DISPON = const(0x29) # Display on
CMD_CASET = const(0x2A) # Column address set
CMD_RASET = const(0x2B) # Row address set
CMD_RAMWR = const(0x2C) # Memory write
CMD_RAMRD = const(0x2E) # Memory read
CMD_PTLAR = const(0x30) # Partial start/end address set
CMD_COLMOD = const(0x3A) # Interface pixel format
CMD_MADCTL = const(0x36) # Memory data access control
CMD_RDID1 = const(0xDA) # Read ID1
CMD_RDID2 = const(0xDB) # Read ID2
CMD_RDID3 = const(0xDC) # Read ID3
CMD_RDID4 = const(0xDD) # Read ID4
# panel function commands
CMD_FRMCTR1 = const(0xB1) # In normal mode (Full colors)
CMD_FRMCTR2 = const(0xB2) # In Idle mode (8-colors)
CMD_FRMCTR3 = const(0xB3) # In partial mode + Full colors
CMD_INVCTR = const(0xB4) # Display inversion control
CMD_PWCTR1 = const(0xC0) # Power control settings
CMD_PWCTR2 = const(0xC1) # Power control settings
CMD_PWCTR3 = const(0xC2) # In normal mode (Full colors
CMD_PWCTR4 = const(0xC3) # In Idle mode (8-colors)
CMD_PWCTR5 = const(0xC4) # In partial mode + Full colors
CMD_VMCTR1 = const(0xC5) # VCOM control
CMD_GMCTRP1 = const(0xE0)
CMD_GMCTRN1 = const(0xE1)
def __init__(self):
"""
SPI - SPI Bus (CLK/MOSI/MISO)
DC - RS/DC data/command flag
CS - Chip Select, enable communication
RST/RES - Reset
BL/Lite - Backlight control
"""
# self.tab = tab
self.spi = SPI(1, baudrate=8000000, polarity=1, phase=0)
self.dc = Pin('D6', Pin.OUT, Pin.PULL_DOWN)
self.cs = Pin('A15', Pin.OUT, Pin.PULL_DOWN)
self.rst = Pin('D7', Pin.OUT, Pin.PULL_DOWN)
self.bl = Pin('A7', Pin.OUT, Pin.PULL_DOWN)
#self.spi, self.dc, self.cs, self.rst, self.bl
super().__init__()
# self.tab = tab
self.power_on = True
self.inverted = False
self.backlight_on = True
# default margins, set yours in HAL init
self.margin_row = 0
self.margin_col = 0
def _set_window(self, x0, y0, x1, y1):
"""
Set window frame boundaries.
Any pixels written to the display will start from this area.
"""
# set row XSTART/XEND
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, y0 + self.margin_row, 0x00, y1 + self.margin_row]))
# set column XSTART/XEND
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, x0 + self.margin_col, 0x00, x1 + self.margin_col]))
# write addresses to RAM
self.write_cmd(CMD_RAMWR)
class ST7735(CMD_TFT):
# colors
COLOR_BLACK = const(0x0000)
COLOR_BLUE = const(0x001F)
COLOR_RED = const(0xF800)
COLOR_GREEN = const(0x07E0)
COLOR_CYAN = const(0x07FF)
COLOR_MAGENTA = const(0xF81F)
COLOR_YELLOW = const(0xFFE0)
COLOR_WHITE = const(0xFFFF)
def init(self, orient=None):
# hard reset first
self.reset()
self.write_cmd(CMD_SWRESET)
time.sleep_ms(150)
self.write_cmd(CMD_SLPOUT)
time.sleep_ms(255)
# TODO: optimize data streams and delays
self.write_cmd(CMD_FRMCTR1)
self.write_data(bytearray([0x01, 0x2C, 0x2D]))
self.write_cmd(CMD_FRMCTR2)
self.write_data(bytearray([0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D]))
time.sleep_ms(10)
self.write_cmd(CMD_INVCTR)
self.write_data(bytearray([0x07]))
self.write_cmd(CMD_PWCTR1)
self.write_data(bytearray([0xA2, 0x02, 0x84]))
self.write_cmd(CMD_PWCTR2)
self.write_data(bytearray([0xC5]))
self.write_cmd(CMD_PWCTR3)
self.write_data(bytearray([0x8A, 0x00]))
self.write_cmd(CMD_PWCTR4)
self.write_data(bytearray([0x8A, 0x2A]))
self.write_cmd(CMD_PWCTR5)
self.write_data(bytearray([0x8A, 0xEE]))
self.write_cmd(CMD_VMCTR1)
self.write_data(bytearray([0x0E]))
self.write_cmd(CMD_INVOFF)
self.write_cmd(CMD_MADCTL)
if orient == None: #Si es cero la orientacion es horizontal
self.write_data(bytearray([0xA0])) # RGB Cambio de Posicion a Horizontal MV=1 MX=0 MY=1
self.width = 160 #Tamaño de la pantalla para el controlador
self.height = 128
else:
self.write_data(bytearray([0x00]))
self.width = 128
self.height = 160
self.write_cmd(CMD_COLMOD)
self.write_data(bytearray([0x05]))
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 127]))
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 159]))
self.write_cmd(CMD_GMCTRP1)
self.write_data(bytearray([0x02, 0x1c, 0x07, 0x12, 0x37, 0x32,
0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01, 0x03, 0x10]))
self.write_cmd(CMD_GMCTRN1)
self.write_data(bytearray([0x03, 0x1d, 0x07, 0x06, 0x2e, 0x2c,
0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00, 0x02, 0x10]))
self.write_cmd(CMD_NORON)
time.sleep_ms(10)
self.write_cmd(CMD_DISPON)
time.sleep_ms(100)
def show_image(self, path, x, y):
imgbmp = open(path)
# set row XSTART/XEND
self.write_cmd(CMD_RASET)
self.write_data(bytearray([0x00, y0 + self.margin_row, 0x00, y1 + self.margin_row]))
# set column XSTART/XEND
self.write_cmd(CMD_CASET)
self.write_data(bytearray([0x00, x0 + self.margin_col, 0x00, x1 + self.margin_col]))
# write addresses to RAM
self.write_cmd(CMD_RAMWR)
self.dc.value(1)
self.cs.value(0)
for _ in range(count):
self.spi.write(color)
self.cs.value(1)
def power(self, state=None):
"""
Get/set display power.
"""
if state is None:
return self.power_on
self.write_cmd(CMD_DISPON if state else CMD_DISPOFF)
self.power_on = state
def clear(self, color):
"""
Clear the display filling it with color.
"""
self.rect(0, 0, self.width, self.height, color)
def invert(self, state=None):
"""
Get/set display color inversion.
"""
if state is None:
return self.inverted
self.write_cmd(CMD_INVON if state else CMD_INVOFF)
self.inverted = state
def rgbcolor(self, r, g, b):
"""
Pack 24-bit RGB into 16-bit value.
"""
return ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3)
def pixel(self, x, y, color):
"""
Draw a single pixel on the display with given color.
"""
self._set_window(x, y, x + 1, y + 1)
self.write_pixels(1, bytearray([color >> 8, color]))
def rect(self, x, y, w, h, color):
"""
Draw a rectangle with specified coordinates/size and fill with color.
"""
# check the coordinates and trim if necessary
if (x >= self.width) or (y >= self.height):
return
if (x + w - 1) >= self.width:
w = self.width - x
if (y + h - 1) >= self.height:
h = self.height - y
self._set_window(x, y, x + w - 1, y + h - 1)
self.write_pixels((w*h), bytearray([color >> 8, color]))
def line(self, x0, y0, x1, y1, color):
# line is vertical
if x0 == x1:
# use the smallest y
start, end = (x1, y1) if y1 < y0 else (x0, y0)
self.vline(start, end, abs(y1 - y0) + 1, color)
# line is horizontal
elif y0 == y1:
# use the smallest x
start, end = (x1, y1) if x1 < x0 else (x0, y0)
self.hline(start, end, abs(x1 - x0) + 1, color)
else:
# Bresenham's algorithm
dx = abs(x1 - x0)
dy = abs(y1 - y0)
inx = 1 if x1 - x0 > 0 else -1
iny = 1 if y1 - y0 > 0 else -1
# steep line
if (dx >= dy):
dy <<= 1
e = dy - dx
dx <<= 1
while (x0 != x1):
# draw pixels
self.pixel(x0, y0, color)
if (e >= 0):
y0 += iny
e -= dx
e += dy
x0 += inx
# not steep line
else:
dx <<= 1
e = dx - dy
dy <<= 1
while(y0 != y1):
# draw pixels
self.pixel(x0, y0, color)
if (e >= 0):
x0 += inx
e -= dy
e += dx
y0 += iny
def hline(self, x, y, w, color):
if (x >= self.width) or (y >= self.height):
return
if (x + w - 1) >= self.width:
w = self.width - x
self._set_window(x, y, x + w - 1, y)
self.write_pixels(x+w-1, bytearray([color >> 8, color]))
def vline(self, x, y, h, color):
if (x >= self.width) or (y >= self.height):
return
if (y + h -1) >= self.height:
h = self.height - y
self._set_window(x, y, x, y + h - 1)
self.write_pixels(y+h-1, bytearray([color >> 8, color]))
def text(self, x, y, string, color):
"""
Draw text at a given position using the user font.
Font can be scaled with the size parameter.
"""
z=font.terminalfont
width = z['width'] + 1
px = x
for c in string:
self.char(px, y, c, z, color, 1, 1)
px += width
# wrap the text to the next line if it reaches the end
if px + width > self.width:
y += z['height'] + 1
px = x
def char(self, x, y, char, font, color, sizex=1, sizey=1):
"""
Draw a character at a given position using the user font.
Font is a data dictionary, can be scaled with sizex and sizey.
"""
if font is None:
return
startchar = font['start']
endchar = font['end']
ci = ord(char)
if (startchar <= ci <= endchar):
width = font['width']
height = font['height']
ci = (ci - startchar) * width
ch = font['data'][ci:ci + width]
# no font scaling
px = x
if (sizex <= 1 and sizey <= 1):
for c in ch:
py = y
for _ in range(height):
if c & 0x01:
self.pixel(px, py, color)
py += 1
c >>= 1
px += 1
# scale to given sizes
else:
for c in ch:
py = y
for _ in range(height):
if c & 0x01:
self.rect(px, py, sizex, sizey, color)
py += sizey
c >>= 1
px += sizex
else:
# character not found in this font
return
def reset(self):
"""
Hard reset the display.
"""
self.dc.value(0)
self.rst.value(1)
time.sleep_ms(500)
self.rst.value(0)
time.sleep_ms(500)
self.rst.value(1)
time.sleep_ms(500)
def backlight(self, state=None):
"""
Get or set the backlight status if the pin is available.
"""
if self.bl is None:
return None
else:
if state is None:
return self.backlight_on
self.bl.value(1 if state else 0)
self.backlight_on = state
def write_pixels(self, count, color):
"""
Write pixels to the display.
count - total number of pixels
color - 16-bit RGB value
"""
self.dc.value(1)
self.cs.value(0)
for _ in range(count):
self.spi.write(color)
self.cs.value(1)
def write_cmd(self, cmd):
"""
Display command write implementation using SPI.
"""
self.dc.value(0)
self.cs.value(0)
self.spi.write(bytearray([cmd]))
self.cs.value(1)
def write_data(self, data):
"""
Display data write implementation using SPI.
"""
self.dc.value(1)
self.cs.value(0)
self.spi.write(data)
self.cs.value(1)
|
py
|
1a58100459ef1e4058b1227845aa678a9803f9f3
|
''' Atribuição condicional é uma estrutura utilizada para simplificar o código,
onde o valor a ser atrbuído será aquele que satisfazer a condição.
<variável> = <valor1> if (True) else <valor2>
var = 10 if (True) else 20
x = 10
texto = 'sim' if x == 10 else 'não'
print(texto)
x = 9
texto = 'sim' if x == 10 else 'não'
print(texto)'''
###################### PROGRAMA 1 ###############
num1 = int(input("Digite um número:"))
s = 'par' if num1 %2 == 0 else 'ímpar'
print('O número digitado é: ', s)
|
py
|
1a581088789105bec7a8ba41bee0db41148ce56d
|
from requests import HTTPError
from ..client_base import AuthClient
class ApiClient(AuthClient):
def get_status(self) -> str:
response = self.get("/status")
return response["message"]
def get_scheduler_address(self) -> str:
try:
response = self.get("/scheduler_address")
return response["message"]
except (KeyError, HTTPError):
response = self.get("/scheduler_info")
return response["address"]
def get_dashboard_link(self) -> str:
response = self.get("/dashboard_link")
return response["message"]
def get_scale(self) -> int:
response = self.get("/scale")
return int(response["message"])
def set_scale(self, n: int) -> None:
self.post("/scale", params={"n": n})
def set_adapt(self, minimum: int, maximum: int) -> None:
self.post("/adapt", params={"minimum": minimum, "maximum": maximum})
|
py
|
1a5811356864e0d1c14cecd62847ba58cee7fe8f
|
from django.contrib import admin
from .models import Order
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ["name", "phone", "date"]
|
py
|
1a5812aa7bde89c72dcc53d2d08ad8f45ac1c743
|
from .base_general import BaseGeneral
from mltoolkit.mldp.steps.collectors import UnitCollector,\
BaseChunkCollector
class ChunkAccumulator(BaseGeneral):
"""
ChunkAccumulator step allows to group or change the size of data-chunks that
are passed along the pipeline. The step does not alter the format of
data-chunks only their size.
For example, one might want to use larger chunks (e.g. size of 500) for
computational purposes (fast vectorized operations on large numpy arrays)
but to train a model on smaller data-chunks (e.g. size of 64). In that case,
the step should be added after all computationally intensive ones.
It works both by accumulating smaller upstream data-chunks and passing
larger data-chunks downstream, and splitting larger upstream data-chunks
into smaller downstream data-chunks.
The adjuster uses chunk collectors, which have different notions of size.
For example, UnitCollector works as described above. However, a
more exotic collector can accumulate data-units which have the same 'id'
field's value. And output a new chunk only after a sufficient number of
unique ids is collected.
"""
def __init__(self, collector=None, new_size=2, **kwargs):
"""
:param collector: an object that accumulates data-chunks and yields
data-chunks when gets full.
:param new_size: a parameters that is passed to the standard collector
object if collector is not provided.
:param kwargs: self-explained.
"""
super(ChunkAccumulator, self).__init__(**kwargs)
if collector is None:
self.coll = UnitCollector(max_size=new_size)
else:
if not isinstance(collector, BaseChunkCollector):
raise TypeError("Please provide a valid collector that extends"
" the BaseChunkCollector class.")
self.coll = collector
def iter(self, data_chunk_iter):
"""
Wraps the data-chunk iterable into a generator that yields data-chunks
with the adjusted size.
"""
# in case iteration was not performed until the end, reset the collector
self.coll.reset()
for data_chunk in data_chunk_iter:
for adjusted_dc in self.coll.absorb_and_yield_if_full(data_chunk):
yield adjusted_dc
# yield the last (incomplete) chunk(s)
for adjusted_dc in self.coll.yield_remaining():
yield adjusted_dc
self.coll.reset()
def reset(self):
self.coll.reset()
|
py
|
1a5812b3f8b494d1ae0bdbdc53a81b7eda61d1a4
|
# -*- coding: utf-8 -*-
from paver.easy import *
@task
def test(options):
info("Running tests for Python 2")
sh('python2 tests.py')
info("Running tests for Python 3")
sh('python3 tests.py')
@task
def coverage(options):
info("Running coverage for Python 2")
sh('coverage2 run --source ldapom ./tests.py')
sh('coverage2 report')
info("Running coverage for Python 3")
sh('coverage3 run --source ldapom ./tests.py')
sh('coverage3 report')
|
py
|
1a5815adb232eaaf93d45ab388d61628d86e3b00
|
import json
import pytest
@pytest.mark.usefixtures("testapp")
class TestBuild:
def test_build_controller(self, testapp):
data = {
'user_name': 'root',
'repo_name': 'test',
'repo_provider': 'gitlab',
'gitlab_addr': 'http://localhost',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 200
assert b'{\n "message": "Build added successfully"\n}\n' in rv.data
def test_build_controller_fail(self, testapp):
data = {
'user_name': 'root',
'repo_provider': 'gitlab',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 401
assert b'{\n "message": "Invalid request"\n}\n' in rv.data
def test_build_controller_branch(self, testapp):
data = {
'repo_branch': 'testing',
}
rv = testapp.post(
'/build/add',
data=json.dumps(data),
content_type='application/json')
assert rv.status_code == 401
|
py
|
1a5815ced81b75723866ed7a073f0de84bd2133a
|
# Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Module containing different tools for sequence motif analysis.
it contains the core Motif class containing various I/O methods
as well as methods for motif comparisons and motif searching in sequences.
It also inlcudes functionality for parsing AlignACE and MEME programs
"""
from Bio.Motif._Motif import Motif
from Bio.Motif.Parsers.AlignAce import read as _AlignAce_read
from Bio.Motif.Parsers.MEME import read as _MEME_read
from Bio.Motif.Thresholds import ScoreDistribution
_parsers={"AlignAce" : _AlignAce_read,
"MEME" : _MEME_read,
}
def _from_pfm(handle):
return Motif()._from_jaspar_pfm(handle)
def _from_sites(handle):
return Motif()._from_jaspar_sites(handle)
_readers={"jaspar-pfm": _from_pfm,
"jaspar-sites": _from_sites
}
def parse(handle,format):
"""Parses an output file of motif finding programs.
Currently supported formats:
- AlignAce
- MEME
You can also use single-motif formats, although the Bio.Motif.read()
function is simpler to use in this situation.
- jaspar-pfm
- jaspar-sites
For example:
>>> from Bio import Motif
>>> for motif in Motif.parse(open("Motif/alignace.out"),"AlignAce"):
... print motif.consensus()
TCTACGATTGAG
CTGCACCTAGCTACGAGTGAG
GTGCCCTAAGCATACTAGGCG
GCCACTAGCAGAGCAGGGGGC
CGACTCAGAGGTT
CCACGCTAAGAGAAGTGCCGGAG
GCACGTCCCTGAGCA
GTCCATCGCAAAGCGTGGGGC
GAGATCAGAGGGCCG
TGGACGCGGGG
GACCAGAGCCTCGCATGGGGG
AGCGCGCGTG
GCCGGTTGCTGTTCATTAGG
ACCGACGGCAGCTAAAAGGG
GACGCCGGGGAT
CGACTCGCGCTTACAAGG
"""
try:
parser=_parsers[format]
except KeyError:
try: #not a true parser, try reader formats
reader=_readers[format]
except:
raise ValueError("Wrong parser format")
else: #we have a proper reader
yield reader(handle)
else: # we have a proper reader
for m in parser(handle).motifs:
yield m
def read(handle,format):
"""Reads a motif from a handle using a specified file-format.
This supports the same formats as Bio.Motif.parse(), but
only for files containing exactly one record. For example,
reading a pfm file:
>>> from Bio import Motif
>>> motif = Motif.read(open("Motif/SRF.pfm"),"jaspar-pfm")
>>> motif.consensus()
Seq('GCCCATATATGG', IUPACUnambiguousDNA())
Or a single-motif MEME file,
>>> from Bio import Motif
>>> motif = Motif.read(open("Motif/meme.out"),"MEME")
>>> motif.consensus()
Seq('CTCAATCGTA', IUPACUnambiguousDNA())
If the handle contains no records, or more than one record,
an exception is raised:
>>> from Bio import Motif
>>> motif = Motif.read(open("Motif/alignace.out"),"AlignAce")
Traceback (most recent call last):
...
ValueError: More than one motif found in handle
If however you want the first record from a file containing
multiple records this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import Motif
>>> motif = Motif.parse(open("Motif/alignace.out"),"AlignAce").next()
>>> motif.consensus()
Seq('TCTACGATTGAG', IUPACUnambiguousDNA())
Use the Bio.Motif.parse(handle, format) function if you want
to read multiple records from the handle.
"""
iterator = parse(handle, format)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No motifs found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one motif found in handle")
return first
def _test():
"""Run the Bio.Motif module's doctests.
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..","..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","..","Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
#Run the doctests
_test()
|
py
|
1a581808d0d1fb06087081040565fb67ecd7d577
|
import os
import subprocess
import time
import signal
__author__ = 'thurley'
def wait_timeout(proc, seconds):
"""Wait for a process to finish, or raise exception after timeout"""
start = time.time()
end = start + seconds
interval = 0.01
while True:
result = proc.poll()
#print "waiting"
if result is not None:
return result
if time.time() >= end:
os.killpg(proc.pid, signal.SIGTERM)
raise RuntimeError("Process timed out")
time.sleep(interval)
def run_with_timeout(seconds, *popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
preexec_fn=os.setsid, *popenargs, **kwargs)
retcode = wait_timeout(process, seconds)
output, unused_err = process.communicate()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
|
py
|
1a5818ff3dbe292e5e6000aad4cf880469bf509b
|
# Generated by Django 2.2.8 on 2020-05-27 11:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('StreamServerApp', '0018_auto_20200207_1049'),
]
operations = [
migrations.CreateModel(
name='UserVideoHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='StreamServerApp.Video')),
],
),
migrations.AddField(
model_name='video',
name='history',
field=models.ManyToManyField(through='StreamServerApp.UserVideoHistory', to=settings.AUTH_USER_MODEL),
),
]
|
py
|
1a58193dace8db49f4655ff2329bbe711e364f2c
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: collect_results_tcvae.py
# --- Creation Date: 14-09-2020
# --- Last Modified: Mon 14 Sep 2020 01:56:01 AEST
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Collect results of tc_vae.
"""
import os
import json
import numpy as np
import argparse
import pandas as pd
from collections import OrderedDict
METRICS_TEMPLATE = {
'beta_vae_sklearn': {
"train_accuracy": None,
"eval_accuracy": None
},
'dci': {
"informativeness_train": None,
"informativeness_test": None,
"disentanglement": None,
"completeness": None
},
'downstream_task_boosted_trees': {},
'factor_vae_metric': {
"train_accuracy": None,
"eval_accuracy": None,
# "num_active_dims": None # disentanglement_lib wrong implementation.
},
'mig': {
"discrete_mig": None
},
'modularity_explicitness': {
"modularity_score": None,
"explicitness_score_train": None,
"explicitness_score_test": None
},
'sap_score': {
"SAP_score": None
},
'unsupervised': {
"gaussian_total_correlation": None,
"gaussian_wasserstein_correlation": None,
"gaussian_wasserstein_correlation_norm": None,
"mutual_info_score": None
}
}
def get_mean_std_for_config(v_ls, target):
'''
v_ls: [{'eval':0.8, ..}, {'eval': 0.7, ...}, ...]
target: 'eval'
'''
pure_ls = []
for item in v_ls:
if item is not None:
pure_ls.append(item[target])
return (None, None) if len(pure_ls) == 0 else (np.mean(pure_ls),
np.std(pure_ls))
def count_samples(x):
x = list(filter(None, x))
return len(x)
def get_moments(res_dict, template):
'''
Args: result dict for each config and seed:
{'0_0_0_0': [{'eval':0.8}, {'eval': 0.7}, ...]}
template of collected results:
{'eval': None, ...}
Return: mean and std of each config:
{'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05}, ...}
'''
res_dict_moments = {}
for k, v in res_dict.items():
res_dict_moments[k] = {}
for res_k in template.keys():
res_dict_moments[k][res_k+'.mean'], \
res_dict_moments[k][res_k+'.std'] \
= get_mean_std_for_config(v, res_k)
res_dict_moments[k]['n_samples'] = count_samples(v)
return res_dict_moments
def get_metric_result(subdir, metric, representation):
result_json = os.path.join(subdir, 'metrics', representation, metric,
'results/json/evaluation_results.json')
if os.path.exists(result_json):
with open(result_json, 'r') as f:
data = json.load(f)
return data
else:
return None
def get_hyps_seed(sub_path):
config_json = os.path.join(sub_path, 'model/results/json/train_config.json')
if os.path.exists(config_json):
with open(config_json, 'r') as f:
data = json.load(f)
return data['beta_tc_vae.beta'], data['model.random_seed']
else:
return None, None
def main():
parser = argparse.ArgumentParser(description='Project description.')
parser.add_argument('--results_dir',
help='Results directory.',
type=str,
default='/mnt/hdd/repo_results/Ramiel/sweep')
parser.add_argument('--metric',
help='Name of the collect metric.',
type=str,
default='factor_vae_metric',
choices=[
'beta_vae_sklearn', 'dci',
'downstream_task_boosted_trees',
'factor_vae_metric', 'mig',
'modularity_explicitness', 'sap_score',
'unsupervised'
])
parser.add_argument('--representation',
help='Representation used.',
type=str,
default='mean',
choices=['mean', 'sampled'])
# parser.add_argument('--overwrite',
# help='Whether to overwrite output directory.',
# type=_str_to_bool,
# default=False)
args = parser.parse_args()
subdirs = os.listdir(args.results_dir)
res_dict = {}
key_template = METRICS_TEMPLATE[args.metric]
for subdir in subdirs:
sub_path = os.path.join(args.results_dir, subdir)
if not os.path.isdir(sub_path):
continue
# parse_subdir = subdir.split('-')
# hyps = '-'.join(parse_subdir[1:-1])
# seed = parse_subdir[-1]
hyps, seed = get_hyps_seed(sub_path)
if hyps not in res_dict:
res_dict[hyps] = [None] * 10
# get result for this seed, a dictionary.
res_dict[hyps][int(seed)] = get_metric_result(sub_path, args.metric,
args.representation)
# {'0_0_0_0': {'eval.mean': 0.75, 'eval.std': 0.05, 'n_samples': 2}, ...}
res_dict = get_moments(res_dict, key_template)
col_heads = ['_config'] + list(res_dict[list(res_dict.keys())[0]].keys())
col_dicts = {k: [] for k in col_heads}
for k, v in res_dict.items():
col_dicts['_config'].append(k)
for k in col_dicts.keys():
if k != '_config':
col_dicts[k].append(v[k])
new_results = OrderedDict(sorted(col_dicts.items()))
results_df = pd.DataFrame(new_results)
print('results_df:', results_df)
results_df.to_csv(os.path.join(
args.results_dir,
'collected-' + args.metric + '-' + args.representation + '.csv'),
na_rep='-',
index=False,
float_format='%.3f')
if __name__ == "__main__":
main()
|
py
|
1a581a01a718d2c08b43e2083afddc4b8b501d65
|
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, usuario, password, **extra_fields):
if not usuario:
raise ValueError('Não há email')
usuario = self.normalize_email(usuario)
user = self.model(usuario=usuario, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, usuario, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(usuario, password, **extra_fields)
def create_superuser(self, usuario, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_staff', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(usuario, password, **extra_fields)
|
py
|
1a581a7026c6790a32969fbaae3d28b278348448
|
import os
import random
import typing
from airports.airport import Airport, AirportType
from airports.airportstable import AirportsTable
from airports.download import download
from airports.runwaystable import RunwaysTable
from airports.wikipediahelper import get_wikipedia_articles
class DB:
def __init__(self) -> None:
self._airports: typing.Dict[str, Airport] = {}
self._large: typing.List[str] = []
self._medium: typing.List[str] = []
self._small: typing.List[str] = []
self._other: typing.List[str] = []
def load(self, cache_dir: str, reset_cache: bool) -> None:
airports_csv = os.path.join(cache_dir, "airports.csv")
runways_csv = os.path.join(cache_dir, "runways.csv")
wikipedia_json = os.path.join(cache_dir, "wikipedia_json")
if reset_cache:
for file_name in [airports_csv, runways_csv, wikipedia_json]:
if os.path.exists(file_name):
os.remove(file_name)
airports = AirportsTable(download("https://ourairports.com/data/airports.csv", airports_csv))
runways = RunwaysTable(download("https://ourairports.com/data/runways.csv", runways_csv))
articles = get_wikipedia_articles(wikipedia_json)
airports.add_wikipedia(articles)
airports.compute_bounds(runways.to_dict())
airports.check()
for airport in airports.good_airports():
self._airports[airport.icao_code()] = airport
if airport.airport_type() == AirportType.LARGE_AIRPORT:
self._large.append(airport.icao_code())
elif airport.airport_type() == AirportType.MEDIUM_AIRPORT:
self._medium.append(airport.icao_code())
elif airport.airport_type() == AirportType.SMALL_AIRPORT:
self._small.append(airport.icao_code())
else:
self._other.append(airport.icao_code())
def get_all_icaos(self) -> typing.List[str]:
return list(self._airports.keys())
def get(self, icao: str) -> typing.Optional[Airport]:
icao = icao.strip().upper()
if icao in self._airports:
return self._airports[icao]
return None
def get_random(self) -> Airport:
if random.choice([True, False]):
return self._airports[random.choice(self._large)]
if random.choice([True, False]):
return self._airports[random.choice(self._medium)]
if random.choice([True, False]):
return self._airports[random.choice(self._small)]
return self._airports[random.choice(list(self._airports.keys()))]
def get_random_list(self, count: int) -> typing.List[Airport]:
return random.sample(list(self._airports.values()), count)
def search(self, needle: str) -> typing.Optional[Airport]:
needle = needle.strip().upper()
for airport in self._airports.values():
if airport.matches_code(needle):
return airport
for airport in self._airports.values():
if airport.matches_name(needle):
return airport
for airport in self._airports.values():
if airport.matches_location(needle):
return airport
return None
|
py
|
1a581bc010c694748309b815e7c9da83a76f111a
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer, _build_optimizer
from fairseq.optim.lr_scheduler import FairseqLRScheduler, build_lr_scheduler
from omegaconf import II, open_dict
logger = logging.getLogger(__name__)
@dataclass
class OptimizerAndSchedulerConfig(FairseqDataclass):
optimizer: Any = None
lr_scheduler: Optional[Any] = None
lr: List[float] = II("optimization.lr")
@dataclass
class CompositeOptimizerConfig(FairseqDataclass):
groups: Dict[str, OptimizerAndSchedulerConfig] = field(
default_factory=lambda: {},
metadata={
"help": "optimizer name -> optimizer OptimizerAndSchedulerConfig. "
"Configures a different optimizer and (optionally) lr scheduler for each parameter group"
},
)
@register_optimizer("composite", dataclass=CompositeOptimizerConfig)
class FairseqCompositeOptimizer(FairseqOptimizer):
optimizers: Dict[str, FairseqOptimizer] = {}
lr_schedulers: Dict[str, FairseqLRScheduler] = {}
lr_scheduler: FairseqLRScheduler = None
_optimizer: torch.optim.Optimizer
def __init__(self, cfg: CompositeOptimizerConfig, params):
super().__init__(cfg)
assert (
len(params) > 1
), "Composite optimizer only works when there are multiple parameter groups (try fp16_no_flatten_grads: true)"
groupped_params = defaultdict(list)
for p in params:
group = getattr(p, "param_group", "default")
groupped_params[group].append(p)
assert groupped_params.keys() == cfg.groups.keys(), (
f"Parameter groups {groupped_params.keys()} and optimizer groups {cfg.groups.keys()} are not the same! "
"Try setting 'param_group' on your parameters in the model."
)
for group, group_params in groupped_params.items():
group_cfg = cfg.groups[group]
with open_dict(group_cfg):
group_cfg.optimizer.lr = group_cfg.lr
group_cfg.lr_scheduler.lr = group_cfg.lr
self.optimizers[group] = _build_optimizer(group_cfg.optimizer, group_params)
if group_cfg.lr_scheduler is not None:
self.lr_schedulers[group] = build_lr_scheduler(
group_cfg.lr_scheduler, self.optimizers[group]
)
if len(self.lr_schedulers) > 0:
assert len(self.lr_schedulers) == len(self.optimizers), (
f"Please provide an lr scheduler for each optimizer to use pass_through scheduler. "
f"Optimizers: {self.optimizers}; Lr scheds: {self.lr_schedulers}"
)
self.lr_scheduler = CompositeLRScheduler(self.lr_schedulers)
self._optimizer = CompositeOptimizer(self.optimizers)
@property
def supports_groups(self):
return True
@property
def param_groups(self):
for opt in self.optimizers.values():
for group in opt.param_groups:
yield group
def get_lr(self):
"""Return the current learning rate."""
k = (
"default"
if "default" in self.optimizers
else next(iter(self.optimizers.keys()))
)
return self.optimizers[k].param_groups[0]["lr"]
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.optimizers.items()}
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
if k not in self.optimizers:
# skip extra keys like "loss_scale" added by fp16 optimizer
continue
overrides = (
optimizer_overrides[k]
if isinstance(optimizer_overrides, dict) and k in optimizer_overrides
else None
)
self.optimizers[k].load_state_dict(state, optimizer_overrides=overrides)
class CompositeOptimizer(torch.optim.Optimizer):
def __init__(self, optimizers: Dict[str, FairseqOptimizer]):
self.optimizers = optimizers
@property
def supports_memory_efficient_fp16(self):
return all(o.supports_memory_efficient_fp16 for o in self.optimizers.values())
@property
def supports_flat_params(self):
return all(o.supports_flat_params for o in self.optimizers.values())
def step(self, closure=None, groups=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for k, opt in self.optimizers.items():
if groups is None or k in groups:
opt.step()
return loss
def zero_grad(self):
for opt in self.optimizers.values():
opt.zero_grad()
class CompositeLRScheduler(FairseqLRScheduler):
def __init__(self, lr_schedulers):
super().__init__(None, None)
self.lr_schedulers = lr_schedulers
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.lr_schedulers.items()}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
self.lr_schedulers[k].load_state_dict(state)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
for s in self.lr_schedulers.values():
s.step_begin_epoch(epoch)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
for s in self.lr_schedulers.values():
s.step(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return {k: s.step_update(num_updates) for k, s in self.lr_schedulers.items()}
|
py
|
1a581d1c7070c7f687e868215ddc71ef43cefb31
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Initialize the package for all test scripts."""
import logging
from logging import NullHandler
import os
import sys
logging.getLogger(__name__).addHandler(NullHandler())
sys.path.insert(0, os.path.dirname(os.path.abspath(__name__)))
import pyutil # noqa F401
|
py
|
1a581e9090c42b62d7db72c2453922fb110cf066
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime
import getpass
import logging
import socket
import multiprocessing
import os
import signal
import sys
import threading
import time
from time import sleep
import psutil
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from tabulate import tabulate
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.models import DagRun
from airflow.settings import Stats
from airflow.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorManager,
SimpleDag,
SimpleDagBag,
list_py_file_paths)
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
Base = models.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
session.close()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (datetime.now() - job.latest_heartbeat).total_seconds())
# Don't keep session open while sleeping as it leaves a connection open
session.close()
sleep(sleep_for)
# Update last heartbeat time
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class DagFileProcessor(AbstractDagFileProcessor):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, log_file):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
:param log_file: the path to the file where log lines should be output
:type log_file: unicode
"""
self._file_path = file_path
self._log_file = log_file
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@property
def log_file(self):
return self._log_file
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
log_file):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param log_file: the logging output for the process should be directed
to this file
:type log_file: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
"""
def helper():
# This helper runs in the newly created process
# Re-direct stdout and stderr to a separate log file. Otherwise,
# the main log becomes too hard to read. No buffering to enable
# responsive file tailing
parent_dir, _ = os.path.split(log_file)
# Create the parent directory for the log file if necessary.
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
f = open(log_file, "a")
original_stdout = sys.stdout
original_stderr = sys.stderr
sys.stdout = f
sys.stderr = f
try:
# Re-configure logging to use the new output streams
log_format = settings.LOG_FORMAT_WITH_THREAD_NAME
settings.configure_logging(log_format=log_format)
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
logging.info("Started process (PID=%s) to work on %s",
os.getpid(),
file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list)
result = scheduler_job.process_file(file_path,
pickle_dags)
result_queue.put(result)
end_time = time.time()
logging.info("Processing %s took %.3f seconds",
file_path,
end_time - start_time)
except:
# Log exceptions through the logging framework.
logging.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
f.close()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self.log_file)
self._start_time = datetime.now()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
logging.warn("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=models.DAGS_FOLDER,
num_runs=-1,
file_process_interval=conf.getint('scheduler',
'min_file_process_interval'),
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
self.using_sqlite = True
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Parse and schedule each file no faster than this interval. Default
# to 3 minutes.
self.file_process_interval = file_process_interval
# Directory where log files for the processes that scheduled the DAGs reside
self.child_process_log_directory = conf.get('scheduler',
'child_process_log_directory')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.logger.info("Skipping SLA check for {} because "
"no tasks in DAG have SLAs".format(dag))
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(or_(SlaMiss.email_sent == False,
SlaMiss.notification_sent == False))
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
@staticmethod
@provide_session
def clear_nonexistent_import_errors(session, known_file_paths):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param known_file_paths: The list of existing files that are parsed for DAGs
:type known_file_paths: list[unicode]
"""
session.query(models.ImportError).filter(
~models.ImportError.filename.in_(known_file_paths)
).delete(synchronize_session='fetch')
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.iteritems():
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not dag.catchup:
# The logic is that we move start_date up until
# one period before, so that datetime.now() is AFTER
# the period end, and the job can be created...
now = datetime.now()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# don't ever schedule in the future
if next_run_date > datetime.now():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def _process_task_instances(self, dag, queue):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
session = settings.Session()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.logger.info("Examining DAG run {}".format(run))
# don't consider runs that are executed in the future
if run.execution_date > datetime.now():
self.logger.error("Execution date is in future: {}"
.format(run.execution_date))
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.logger.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.logger.debug("Examining active DAG run {}".format(run))
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.logger.debug('Queuing task: {}'.format(ti))
queue.append(ti.key)
session.close()
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
exists but is not in the running state. This normally should not
happen, but it can if the state of DagRuns are changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
task_instances_to_change = (
session
.query(models.TaskInstance)
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids))
.filter(models.TaskInstance.state.in_(old_states))
.with_for_update()
.all()
)
""":type: list[TaskInstance]"""
for task_instance in task_instances_to_change:
dag_runs = DagRun.find(dag_id=task_instance.dag_id,
execution_date=task_instance.execution_date,
)
if len(dag_runs) == 0:
self.logger.warn("DagRun for %s %s does not exist",
task_instance.dag_id,
task_instance.execution_date)
continue
# There should only be one DAG run. Add some logging info if this
# is not the case for later debugging.
if len(dag_runs) > 1:
self.logger.warn("Multiple DagRuns found for {} {}: {}"
.format(task_instance.dag_id,
task_instance.execution_date,
dag_runs))
if not any(dag_run.state == State.RUNNING for dag_run in dag_runs):
self.logger.warn("Setting {} to state={} as it does not have "
"a DagRun in the {} state"
.format(task_instance,
new_state,
State.RUNNING))
task_instance.state = new_state
session.merge(task_instance)
session.commit()
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Fetches task instances from ORM in the specified states, figures
out pool limits, and sends them to the executor for execution.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: None
"""
# Get all the queued task instances from associated with scheduled
# DagRuns.
TI = models.TaskInstance
task_instances_to_examine = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.filter(TI.state.in_(states))
.all()
)
# Put one task instance on each line
if len(task_instances_to_examine) == 0:
self.logger.info("No tasks to send to the executor")
return
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.logger.info("Tasks up for execution:\n\t{}".format(task_instance_str))
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.logger.info("Figuring out tasks to run in Pool(name={pool}) "
"with {open_slots} open slots and {num_queued} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.logger.info("No more slots free")
# Can't schedule any more since there are no more open slots.
break
if self.executor.has_task(task_instance):
self.logger.debug("Not handling task {} as the executor reports it is running"
.format(task_instance.key))
continue
if simple_dag_bag.get_dag(task_instance.dag_id).is_paused:
self.logger.info("Not executing queued {} since {} is paused"
.format(task_instance, task_instance.dag_id))
continue
# todo: remove this logic when backfills will be part of the scheduler
dag_run = task_instance.get_dagrun()
if dag_run and dag_run.is_backfill:
continue
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
if dag_id not in dag_id_to_running_task_count:
dag_id_to_running_task_count[dag_id] = \
DagRun.get_running_tasks(
session,
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids)
current_task_concurrency = dag_id_to_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.logger.info("DAG {} has {}/{} running tasks"
.format(dag_id,
current_task_concurrency,
task_concurrency_limit))
if current_task_concurrency > task_concurrency_limit:
self.logger.info("Not executing {} since the number "
"of tasks running from DAG {} is >= to the "
"DAG's task concurrency limit of {}"
.format(task_instance,
dag_id,
task_concurrency_limit))
continue
command = " ".join(TI.generate_command(
task_instance.dag_id,
task_instance.task_id,
task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=task_instance.pool,
file_path=simple_dag_bag.get_dag(task_instance.dag_id).full_filepath,
pickle_id=simple_dag_bag.get_dag(task_instance.dag_id).pickle_id))
priority = task_instance.priority_weight
queue = task_instance.queue
self.logger.info("Sending to executor {} with priority {} and queue {}"
.format(task_instance.key, priority, queue))
# Set the state to queued
task_instance.refresh_from_db(lock_for_update=True, session=session)
if task_instance.state not in states:
self.logger.info("Task {} was set to {} outside this scheduler."
.format(task_instance.key, task_instance.state))
session.commit()
continue
self.logger.info("Setting state of {} to {}".format(
task_instance.key, State.QUEUED))
task_instance.state = State.QUEUED
task_instance.queued_dttm = (datetime.now()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
session.commit()
# These attributes will be lost after the object expires, so save them.
task_id_ = task_instance.task_id
dag_id_ = task_instance.dag_id
execution_date_ = task_instance.execution_date
make_transient(task_instance)
task_instance.task_id = task_id_
task_instance.dag_id = dag_id_
task_instance.execution_date = execution_date_
self.executor.queue_command(
task_instance,
command,
priority=priority,
queue=queue)
open_slots -= 1
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.logger.info("Not processing DAG {} since it's paused"
.format(dag.dag_id))
continue
if not dag:
self.logger.error("DAG ID {} was not found in the DagBag"
.format(dag.dag_id))
continue
self.logger.info("Processing {}".format(dag.dag_id))
dag_run = self.create_dag_run(dag)
if dag_run:
self.logger.info("Created {}".format(dag_run))
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
models.DagStat.clean_dirty([d.dag_id for d in dags])
def _process_executor_events(self):
"""
Respond to executor events.
:param executor: the executor that's running the task instances
:type executor: BaseExecutor
:return: None
"""
for key, executor_state in list(self.executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
self.logger.info("Executor reports {}.{} execution_date={} as {}"
.format(dag_id,
task_id,
execution_date,
executor_state))
def _log_file_processing_stats(self,
known_file_paths,
processor_manager):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:param processor_manager: manager for the file processors
:type stats: DagFileProcessorManager
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = processor_manager.get_last_runtime(file_path)
processor_pid = processor_manager.get_pid(file_path)
processor_start_time = processor_manager.get_start_time(file_path)
runtime = ((datetime.now() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = processor_manager.get_last_finish_time(file_path)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.logger.info(log_str)
@provide_session
def _reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = list()
tis.extend(dag_run.get_task_instances(state=State.SCHEDULED, session=session))
tis.extend(dag_run.get_task_instances(state=State.QUEUED, session=session))
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
self.logger.debug("Rescheduling orphaned task {}".format(ti))
ti.state = State.NONE
session.commit()
def _execute(self):
self.logger.info("Starting the scheduler")
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
# Use multiple processes to parse and generate tasks for the
# DAGs in parallel. By processing them in separate processes,
# we can get parallelism and isolation from potentially harmful
# user code.
self.logger.info("Processing files using up to {} processes at a time "
.format(self.max_threads))
self.logger.info("Running execute loop for {} seconds"
.format(self.run_duration))
self.logger.info("Processing each file at most {} times"
.format(self.num_runs))
self.logger.info("Process each file at most once every {} seconds"
.format(self.file_process_interval))
self.logger.info("Checking for new files in {} every {} seconds"
.format(self.subdir, self.dag_dir_list_interval))
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
def processor_factory(file_path, log_file_path):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
log_file_path)
processor_manager = DagFileProcessorManager(self.subdir,
known_file_paths,
self.max_threads,
self.file_process_interval,
self.child_process_log_directory,
self.num_runs,
processor_factory)
try:
self._execute_helper(processor_manager)
finally:
self.logger.info("Exited execute loop")
# Kill all child processes on exit since we don't want to leave
# them as orphaned.
pids_to_kill = processor_manager.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.logger.info("Terminating child PID: {}".format(child.pid))
child.terminate()
timeout = 5
self.logger.info("Waiting up to {}s for processes to exit..."
.format(timeout))
try:
psutil.wait_procs(child_processes, timeout)
except psutil.TimeoutExpired:
self.logger.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
for child in child_processes:
self.logger.info("Killing child PID: {}".format(child.pid))
child.kill()
child.wait()
def _execute_helper(self, processor_manager):
"""
:param processor_manager: manager to use
:type processor_manager: DagFileProcessorManager
:return: None
"""
self.executor.start()
session = settings.Session()
self.logger.info("Resetting state for orphaned tasks")
# grab orphaned tasks and make sure to reset their state
active_runs = DagRun.find(
state=State.RUNNING,
external_trigger=False,
session=session
)
for dr in active_runs:
self.logger.info("Resetting {} {}".format(dr.dag_id,
dr.execution_date))
self._reset_state_for_orphaned_tasks(dr, session=session)
session.close()
execute_start_time = datetime.now()
# Last time stats were printed
last_stat_print_time = datetime(2000, 1, 1)
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = datetime.now()
# Last time that the DAG dir was traversed to look for files
last_dag_dir_refresh_time = datetime.now()
# Use this value initially
known_file_paths = processor_manager.file_paths
# For the execute duration, parse and schedule DAGs
while (datetime.now() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.logger.debug("Starting Loop...")
loop_start_time = time.time()
# Traverse the DAG directory for Python files containing DAGs
# periodically
elapsed_time_since_refresh = (datetime.now() -
last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
last_dag_dir_refresh_time = datetime.now()
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
processor_manager.set_file_paths(known_file_paths)
self.logger.debug("Removing old import errors")
self.clear_nonexistent_import_errors(known_file_paths=known_file_paths)
# Kick of new processes and collect results from finished ones
self.logger.info("Heartbeating the process manager")
simple_dags = processor_manager.heartbeat()
if self.using_sqlite:
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.logger.debug("Waiting for processors to finish since we're "
"using sqlite")
processor_manager.wait_until_finished()
# Send tasks for execution if available
if len(simple_dags) > 0:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
# Call hearbeats
self.logger.info("Heartbeating the executor")
self.executor.heartbeat()
# Process events from the executor
self._process_executor_events()
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (datetime.now() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.logger.info("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = datetime.now()
# Occasionally print out stats about how fast the files are getting processed
if ((datetime.now() - last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(known_file_paths) > 0:
self._log_file_processing_stats(known_file_paths,
processor_manager)
last_stat_print_time = datetime.now()
loop_end_time = time.time()
self.logger.debug("Ran scheduling loop in {:.2f}s"
.format(loop_end_time - loop_start_time))
self.logger.debug("Sleeping for {:.2f}s"
.format(self._processor_poll_interval))
time.sleep(self._processor_poll_interval)
# Exit early for a test mode
if processor_manager.max_runs_reached():
self.logger.info("Exiting loop as all files have been processed "
"{} times".format(self.num_runs))
break
# Stop any processors
processor_manager.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
all_files_processed = True
for file_path in known_file_paths:
if processor_manager.get_last_finish_time(file_path) is None:
all_files_processed = False
break
if all_files_processed:
self.logger.info("Deactivating DAGs that haven't been touched since {}"
.format(execute_start_time.isoformat()))
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.logger.info("Processing file {} for tasks to queue".format(file_path))
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path)
except Exception:
self.logger.exception("Failed at reloading the DAG file {}".format(file_path))
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.logger.info("DAG(s) {} retrieved from {}"
.format(dagbag.dags.keys(),
file_path))
else:
self.logger.warn("No viable dags retrieved from {}".format(file_path))
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
sync_time = datetime.now()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, sync_time)
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
task_ids = [task.task_id for task in dag.tasks]
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag.dag_id,
task_ids,
dag.full_filepath,
dag.concurrency,
dag.is_paused,
pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got it's state changed to RUNNING from somewhere
# other than the scheduler from getting it's state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.logger.info("Creating / updating {} in ORM".format(ti))
session.merge(ti)
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.logger.exception("Error logging import errors!")
try:
dagbag.kill_zombies()
except Exception:
self.logger.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
DagRun = models.DagRun
# consider max_active_runs but ignore when running subdags
# "parent.child" as a dag_id is by convention a subdag
if self.dag.schedule_interval and "." not in self.dag.dag_id:
active_runs = DagRun.find(
dag_id=self.dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs
if len(active_runs) >= self.dag.max_active_runs:
self.logger.info("Dag {} has reached maximum amount of {} dag runs"
.format(self.dag.dag_id, self.dag.max_active_runs))
return
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
# create dag runs
dr_start_date = start_date or min([t.start_date for t in self.dag.tasks])
next_run_date = self.dag.normalize_schedule(dr_start_date)
end_date = end_date or datetime.now()
active_dag_runs = []
while next_run_date and next_run_date <= end_date:
run_id = 'backfill_' + next_run_date.isoformat()
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=next_run_date,
session=session)
if not run:
run = self.dag.create_dagrun(
run_id=run_id,
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
else:
run = run[0]
# set required transient field
run.dag = self.dag
# explictely mark running as we can fill gaps
run.state = State.RUNNING
run.verify_integrity(session=session)
# for some reason if we dont refresh the reference to run is lost
run.refresh_from_db()
make_transient(run)
active_dag_runs.append(run)
next_run_date = self.dag.following_schedule(next_run_date)
run_count = 0
for run in active_dag_runs:
logging.info("Checking run {}".format(run))
run_count = run_count + 1
def get_task_instances_for_dag_run(dag_run):
# this needs a fresh session sometimes tis get detached
# can be more finegrained (excluding success or skipped)
tasks = {}
for ti in dag_run.get_task_instances():
tasks[ti.key] = ti
return tasks
# Triggering what is ready to get triggered
while not deadlocked:
tasks_to_run = get_task_instances_for_dag_run(run)
self.logger.debug("Clearing out not_ready list")
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.logger.debug("Task instance to run {} state {}"
.format(ti, ti.state))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
succeeded.add(key)
self.logger.debug("Task instance {} succeeded. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
self.logger.debug("Task instance {} skipped. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
self.logger.debug('Adding {} to not_ready'.format(ti))
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
self.logger.warn("Deadlock discovered for tasks_to_run={}"
.format(tasks_to_run.values()))
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
if key not in tasks_to_run:
self.logger.warn("{} state {} not in tasks_to_run={}"
.format(key, state,
tasks_to_run.values()))
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
logging.info("Executor state: {} task {}".format(state, ti))
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(ti))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(ti))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"dag run {6} of {7}",
"tasks waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked),
run_count,
len(active_dag_runs))
self.logger.info(msg)
self.logger.debug("Finished dag run loop iteration. "
"Remaining tasks {}"
.format(tasks_to_run.values()))
if len(tasks_to_run) == 0:
break
# update dag run state
run.update_state(session=session)
if run.dag.is_paused:
models.DagStat.clean_dirty([run.dag_id], session=session)
executor.end()
session.commit()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=True) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(succeeded)
err += ' These tasks have started:\n{}\n'.format(started)
err += ' These tasks have failed:\n{}\n'.format(failed)
err += ' These tasks are skipped:\n{}\n'.format(skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
# Keeps track of the fact that the task instance has been observed
# as running at least once
self.was_running = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
try:
self.task_runner.start()
ti = self.task_instance
session = settings.Session()
if self.task_runner.process:
ti.pid = self.task_runner.process.pid
ti.hostname = socket.getfqdn()
session.merge(ti)
session.commit()
session.close()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.logger.info("Task exited with return code {}"
.format(return_code))
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.logger.exception("Exception while trying to heartbeat! "
"Sleeping for {}s".format(self.heartrate))
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.logger.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# task is already terminating, let it breathe
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
self.was_running = True
fqdn = socket.getfqdn()
if not (fqdn == ti.hostname and
self.task_runner.process.pid == ti.pid):
logging.warning("Recorded hostname and pid of {ti.hostname} "
"and {ti.pid} do not match this instance's "
"which are {fqdn} and "
"{self.task_runner.process.pid}. "
"Taking the poison pill. So long."
.format(**locals()))
raise AirflowException("Another worker/process is running this job")
elif (self.was_running
and self.task_runner.return_code() is None
and hasattr(self.task_runner, 'process')):
logging.warning(
"State of this instance has been externally set to "
"{}. Taking the poison pill. So long.".format(ti.state))
self.task_runner.terminate()
self.terminating = True
|
py
|
1a581ecb7c2b6e542432521e6dc13c679cee3981
|
from collections import OrderedDict
def get_field_keys(fields, path=""):
previous = path + "." if path else ""
results = []
if hasattr(fields, "_meta"):
fields = OrderedDict(
[
(field.name, field)
for field in fields._meta.get_fields()
# don't want to go backwards
if (field.__class__.__name__ != "ManyToOneRel") and
# avoid recursive self references
not (
field.__class__.__name__ == "ForeignKey"
and field.related_model == fields
)
]
)
for field_name, field in fields.items():
if field.__class__.__name__ in [
"NestedSerializer",
"OrderedDict",
"dict",
"ForeignKey",
]:
subobj = None
if hasattr(field, "fields"):
subobj = field.fields
elif hasattr(field, "related_model"):
subobj = field.related_model
else:
subobj = field
for result in get_field_keys(subobj, previous + field_name):
results.append(result)
else:
results.append(previous + field_name)
return results
|
py
|
1a581eecf028de8cf4a4afd3b9f04cebe9c7c900
|
#!/usr/bin/env python3
#
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Read more details from go/dram-init-chromebook."""
import argparse
import logging
from cros.factory.device import device_utils
from cros.factory.utils.type_utils import Enum
ARCH = Enum(['x86', 'arm'])
MRC_CACHE_SECTIONS = (
'RECOVERY_MRC_CACHE', # For x86 recovery mode
'RW_MRC_CACHE', # For x86 normal mode
'RW_DDR_TRAINING', # For ARM (Mediatek)
'RO_DDR_TRAINING', # For ARM (Qualcomm)
)
def GetMRCSections(dut):
with dut.temp.TempFile() as temp_file:
dut.CheckCall('flashrom -p host -r %s -i FMAP' % temp_file, log=True)
fmap_sections = dut.CheckOutput('dump_fmap -p %s' % temp_file, log=True)
mrc_sections = []
for section_info in fmap_sections.splitlines():
section_name = section_info.split()[0]
if section_name in MRC_CACHE_SECTIONS:
mrc_sections.append(section_name)
return mrc_sections
def EraseTrainingData(dut):
mrc_sections = GetMRCSections(dut)
if mrc_sections:
cmd = ['flashrom', '-p', 'host', '-E']
for section in mrc_sections:
cmd += ['-i', section]
dut.CheckCall(cmd, log=True)
if 'RECOVERY_MRC_CACHE' in mrc_sections:
# Set next boot to recovery mode to retrain RECOVERY_MRC_CACHE first.
# And it'll reboot automatically and retrain RW_MRC_CACHE.
dut.CheckCall('crossystem recovery_request=0xC4', log=True)
def VerifyTrainingData(dut):
arch = dut.CheckOutput('crossystem arch').strip()
# Currently we don't have a tool to verify training data on ARM platforms,
# but the system should run memory test after DRAM calibration.
if arch == ARCH.arm:
return
mrc_sections = GetMRCSections(dut)
with dut.temp.TempFile() as temp_file:
for section in mrc_sections:
dut.CheckCall(
'flashrom -p host -r /dev/null -i %s:%s' % (section, temp_file),
log=True)
dut.CheckCall('futility validate_rec_mrc %s' % temp_file, log=True)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description='MRC cache tool for memory training and verification.',
formatter_class=argparse.RawDescriptionHelpFormatter)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--erase',
action='store_true',
help='Erase old training data, you need to reboot to trigger retrain')
group.add_argument(
'--verify', action='store_true', help='Verify the training data')
args = parser.parse_args()
dut = device_utils.CreateDUTInterface()
if args.erase:
EraseTrainingData(dut)
elif args.verify:
VerifyTrainingData(dut)
if __name__ == '__main__':
main()
|
py
|
1a581f1cf70a29c0859324e4bef5e2cce472e2af
|
class Solution:
def isPalindrome(self, x: int) -> bool:
r = self.reverseNumber(x)
if x != r:
return False
return True
def reverseNumber(self, x: int) -> int:
result = 0
remaining = abs(x)
while remaining != 0:
result *= 10
result += remaining % 10
remaining //= 10
return result
x = -121
test = Solution()
res = test.isPalindrome(x)
print(res)
|
py
|
1a58203c26b41b8661e63aca21eff1648f96b651
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/xavier_ssd/TrekBot/TrekBot_WS/install_isolated;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/xavier_ssd/TrekBot/TrekBot_WS/devel_isolated/cartographer_ros/env.sh')
output_filename = '/xavier_ssd/TrekBot/TrekBot_WS/build_isolated/cartographer_ros/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
py
|
1a58204625088a75760e6a97169eb952fa9b4999
|
#!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
from pylith.testing.FullTestApp import TestDriver, FullTestCase
import unittest
class TestApp(TestDriver):
"""Driver application for full-scale tests.
"""
def __init__(self):
"""Constructor.
"""
TestDriver.__init__(self)
return
def _suite(self):
"""Create test suite.
"""
suite = unittest.TestSuite()
import TestTerzaghi
for test in TestTerzaghi.test_cases():
suite.addTest(unittest.makeSuite(test))
import TestTerzaghiCompaction
for test in TestTerzaghiCompaction.test_cases():
suite.addTest(unittest.makeSuite(test))
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
FullTestCase.parse_args()
TestApp().main()
# End of file
|
py
|
1a5820f434577e854cbbd4e06ee7c924377f179b
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns)
from numpy import array, spacing, sin, pi, sort
from scipy.signal import (tf2zpk, zpk2tf, tf2sos, sos2tf, sos2zpk, zpk2sos,
BadCoefficients, freqz, normalize,
buttord, cheby1, cheby2, ellip, cheb1ord, cheb2ord,
ellipord, butter, bessel, buttap, besselap,
cheb1ap, cheb2ap, ellipap, iirfilter, freqs,
lp2lp, lp2hp, lp2bp, lp2bs, bilinear, group_delay,
firwin)
from scipy.signal.filter_design import _cplxreal, _cplxpair
class TestCplxPair(TestCase):
def test_trivial_input(self):
assert_equal(_cplxpair([]).size, 0)
assert_equal(_cplxpair(1), 1)
def test_output_order(self):
assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j])
a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2]
b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2]
assert_allclose(_cplxpair(a), b)
# points spaced around the unit circle
z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7)
z1 = np.copy(z)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
# Should be able to pair up all the conjugates
x = np.random.rand(10000) + 1j * np.random.rand(10000)
y = x.conj()
z = np.random.rand(10000)
x = np.concatenate((x, y, z))
np.random.shuffle(x)
c = _cplxpair(x)
# Every other element of head should be conjugates:
assert_allclose(c[0:20000:2], np.conj(c[1:20000:2]))
# Real parts of head should be in sorted order:
assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real))
# Tail should be sorted real numbers:
assert_allclose(c[20000:], np.sort(c[20000:]))
def test_real_integer_input(self):
assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2])
def test_tolerances(self):
eps = spacing(1)
assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps),
[-1j, 1j, 1+1j*eps])
# sorting close to 0
assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j])
# Not conjugates
assert_raises(ValueError, _cplxpair, [4+5j, 4+5j])
assert_raises(ValueError, _cplxpair, [1-7j, 1-7j])
# No pairs
assert_raises(ValueError, _cplxpair, [1+3j])
assert_raises(ValueError, _cplxpair, [1-3j])
class TestCplxReal(TestCase):
def test_trivial_input(self):
assert_equal(_cplxreal([]), ([], []))
assert_equal(_cplxreal(1), ([], [1]))
def test_output_order(self):
zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1])))
assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1])
eps = spacing(1)
a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j,
1, 4, 2, 3, 0, 0,
2+3j, 2-3j,
1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order
3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j,
2-3j, 2+3j]
zc, zr = _cplxreal(a)
assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j,
3+1j])
assert_allclose(zr, [0, 0, 1, 2, 3, 4])
z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j,
0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j,
4+eps-2j, 4-1j, 4-eps+2j])
zc, zr = _cplxreal(z)
assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j,
4+2j])
assert_equal(zr, [])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j])
# No pairs
assert_raises(ValueError, _cplxreal, [1+3j])
assert_raises(ValueError, _cplxreal, [1-3j])
def test_real_integer_input(self):
zc, zr = _cplxreal([2, 0, 1, 4])
assert_array_equal(zc, [])
assert_array_equal(zr, [0, 1, 2, 4])
class TestTf2zpk(TestCase):
def test_simple(self):
z_r = np.array([0.5, -0.5])
p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
# Sort the zeros/poles so that we don't fail the test if the order
# changes
z_r.sort()
p_r.sort()
b = np.poly(z_r)
a = np.poly(p_r)
z, p, k = tf2zpk(b, a)
z.sort()
p.sort()
assert_array_almost_equal(z, z_r)
assert_array_almost_equal(p, p_r)
def test_bad_filter(self):
"""Regression test for #651: better handling of badly conditioned
filter coefficients."""
warnings.simplefilter("error", BadCoefficients)
try:
assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
finally:
warnings.simplefilter("always", BadCoefficients)
class TestZpk2Tf(TestCase):
def test_identity(self):
"""Test the identity transfer function."""
z = []
p = []
k = 1.
b, a = zpk2tf(z, p, k)
b_r = np.array([1.]) # desired result
a_r = np.array([1.]) # desired result
# The test for the *type* of the return values is a regression
# test for ticket #1095. In the case p=[], zpk2tf used to
# return the scalar 1.0 instead of array([1.0]).
assert_array_equal(b, b_r)
assert_(isinstance(b, np.ndarray))
assert_array_equal(a, a_r)
assert_(isinstance(a, np.ndarray))
class TestSos2Zpk(TestCase):
def test_basic(self):
sos = [[1, 0, 1, 1, 0, -0.81],
[1, 0, 0, 1, 0, +0.49]]
z, p, k = sos2zpk(sos)
z2 = [1j, -1j, 0, 0]
p2 = [0.9, -0.9, 0.7j, -0.7j]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
assert_array_almost_equal(k, k2)
sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873],
[1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873],
[1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]]
z, p, k = sos2zpk(sos)
z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
0.8090 - 0.5878j, -1.0000 + 0.0000j, 0]
p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
0.7922 - 0.5755j, -0.9791 + 0.0000j, 0]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
sos = array([[1, 2, 3, 1, 0.2, 0.3],
[4, 5, 6, 1, 0.4, 0.5]])
z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j,
-0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j])
p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j,
-0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j])
k = 4
z2, p2, k2 = sos2zpk(sos)
assert_allclose(_cplxpair(z2), z)
assert_allclose(_cplxpair(p2), p)
assert_allclose(k2, k)
class TestSos2Tf(TestCase):
def test_basic(self):
sos = [[1, 1, 1, 1, 0, -1],
[-2, 3, 1, 1, 10, 1]]
b, a = sos2tf(sos)
assert_array_almost_equal(b, [-2, 1, 2, 4, 1])
assert_array_almost_equal(a, [1, 10, 0, -10, -1])
class TestTf2Sos(TestCase):
def test_basic(self):
num = [2, 16, 44, 56, 32]
den = [3, 3, -15, 18, -12]
sos = tf2sos(num, den)
sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000],
[1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]]
assert_array_almost_equal(sos, sos2, decimal=4)
b = [1, -3, 11, -27, 18]
a = [16, 12, 2, -4, -1]
sos = tf2sos(b, a)
sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250],
[1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]]
# assert_array_almost_equal(sos, sos2, decimal=4)
class TestZpk2Sos(TestCase):
def test_basic(self):
for pairing in ('nearest', 'keep_odd'):
#
# Cases that match octave
#
z = [-1, -1]
p = [0.57149 + 0.29360j, 0.57149 - 0.29360j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1j, -1j]
p = [0.9, -0.9, 0.7j, -0.7j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 1, 1, 0, +0.49],
[1, 0, 0, 1, 0, -0.81]] # octave
# sos2 = [[0, 0, 1, 1, -0.9, 0],
# [1, 0, 1, 1, 0.9, 0]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = []
p = [0.8, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., 1., 0.3125],
[1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1., 1., 0.9j, -0.9j]
p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
[1, -2, 1, 1, -1.98, 0.9802]] # octave
# sos2 = [[1, -2, 1, 1, -0.2, 0.82],
# [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [0.9+0.1j, 0.9-0.1j, -0.9]
p = [0.75+0.25j, 0.75-0.25j, 0.9]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
if pairing == 'keep_odd':
sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
[1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
else: # pairing == 'nearest'
sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
[1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm
assert_array_almost_equal(sos, sos2, decimal=4)
#
# Cases that differ from octave:
#
z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+0.8090 - 0.5878j, -1.0000 + 0.0000j]
p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+0.7922 - 0.5755j, -0.9791 + 0.0000j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
# sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
# [1, -1.618, 1, 1, -1.5844, 0.95878],
# [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails
sos2 = [[1, 1, 0, 1, +0.97915, 0],
[1, 0.61803, 1, 1, +0.60515, 0.95873],
[1, -1.61803, 1, 1, -1.58430, 0.95873]]
assert_array_almost_equal(sos, sos2, decimal=4)
z = [-1 - 1.4142j, -1 + 1.4142j,
-0.625 - 1.0533j, -0.625 + 1.0533j]
p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
-0.1 - 0.5385j, -0.1 + 0.5385j]
k = 4
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[4, 8, 12, 1, 0.2, 0.3],
[1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB
# sos2 = [[4, 8, 12, 1, 0.4, 0.5],
# [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave
assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4)
z = []
p = [0.2, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., -0.2, 0.],
[1., 0., 0., 1., 1., 0.3125]]
# sos2 = [[1., 0., 0., 1., 1., 0.3125],
# [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
# The next two examples are adapted from Leland B. Jackson,
# "Digital Filters and Signal Processing (1995) p.400:
# http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
deg2rad = np.pi / 180.
k = 1.
# first example
thetas = [22.5, 45, 77.5]
mags = [0.8, 0.6, 0.9]
z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas])
z = np.concatenate((z, np.conj(z)))
p = np.array([mag * np.exp(theta * deg2rad * 1j)
for theta, mag in zip(thetas, mags)])
p = np.concatenate((p, np.conj(p)))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave,
# [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, -1.84776, 1, 1, -1.47821, 0.64]]
# Note that pole-zero pairing matches, but ordering is different
sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
[1, -1.84776, 1, 1, -1.47821, 0.64],
[1, -0.43288, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
# second example
z = np.array([np.exp(theta * deg2rad * 1j)
for theta in (85., 10.)])
z = np.concatenate((z, np.conj(z), [1, -1]))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong",
# [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, 0, -1, 1, -1.47821, 0.64000]]
# Our pole-zero pairing matches the text, Octave does not
sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
[1, -1.96962, 1, 1, -1.47821, 0.64],
[1, -0.17431, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
class TestFreqz(TestCase):
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz([1.0], worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz([1.0], worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_basic_whole(self):
w, h = freqz([1.0], worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError,
freqz, [1.0], worN=8, plot=lambda w, h: 1 / 0)
freqz([1.0], worN=8, plot=plot)
class TestNormalize(TestCase):
def test_allclose(self):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
assert_array_almost_equal(b_matlab, b_output, decimal=13)
assert_array_almost_equal(a_matlab, a_output, decimal=13)
class TestLp2lp(TestCase):
def test_basic(self):
b = [1]
a = [1, np.sqrt(2), 1]
b_lp, a_lp = lp2lp(b, a, 0.38574256627112119)
assert_array_almost_equal(b_lp, [0.1488], decimal=4)
assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4)
class TestLp2hp(TestCase):
def test_basic(self):
b = [0.25059432325190018]
a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000)
assert_allclose(b_hp, [1, 0, 0, 0])
assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4)
class TestLp2bp(TestCase):
def test_basic(self):
b = [1]
a = [1, 2, 2, 1]
b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000)
assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6)
assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13,
1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4)
class TestLp2bs(TestCase):
def test_basic(self):
b = [1]
a = [1, 1]
b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5)
assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5)
class TestBilinear(TestCase):
def test_basic(self):
b = [0.14879732743343033]
a = [1, 0.54552236880522209, 0.14879732743343033]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821],
decimal=5)
assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4)
b = [1, 0, 0.17407467530697837]
a = [1, 0.18460575326152251, 0.17407467530697837]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413],
decimal=4)
assert_array_almost_equal(a_z, [1, -1.2158, 0.72826],
decimal=4)
class TestPrototypeType(TestCase):
def test_output_type(self):
# Prototypes should consistently output arrays, not lists
# https://github.com/scipy/scipy/pull/441
for func in (buttap,
besselap,
lambda N: cheb1ap(N, 1),
lambda N: cheb2ap(N, 20),
lambda N: ellipap(N, 1, 20)):
for N in range(7):
z, p, k = func(N)
assert_(isinstance(z, np.ndarray))
assert_(isinstance(p, np.ndarray))
def dB(x):
# Return magnitude in decibels
return 20 * np.log10(abs(x))
class TestButtord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'lowpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 16)
assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'highpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs)
assert_equal(N, 18)
assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 18)
assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandstop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs)
assert_equal(N, 20)
assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01],
rtol=1e-6)
def test_analog(self):
wp = 200
ws = 600
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, True)
b, a = butter(N, Wn, 'lowpass', True)
w, h = freqs(b, a)
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 7)
assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15)
n, Wn = buttord(1, 550/450, 1, 26, analog=True)
assert_equal(n, 19)
assert_allclose(Wn, 1.0361980524629517, rtol=1e-15)
assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55)
class TestCheb1ord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'low', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'high', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'band', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'stop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5)
def test_analog(self):
wp = 700
ws = 100
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, True)
b, a = cheby1(N, rp, Wn, 'high', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 4)
assert_allclose(Wn, 700, rtol=1e-15)
assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17)
class TestCheb2ord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.28647639976553163, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.20697492182903282, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362],
rtol=1e-6)
def test_analog(self):
wp = [20, 50]
ws = [10, 60]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, True)
b, a = cheby2(N, rs, Wn, 'bp', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 11)
assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01],
rtol=1e-15)
class TestEllipord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 5)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 7)
assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5)
def test_analog(self):
wp = [1000, 6000]
ws = [2000, 5000]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, True)
b, a = ellip(N, rp, rs, Wn, 'bs', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, [1666.6666, 6000])
assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9)
class TestBessel(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = bessel(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = bessel(1, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1, 1])
z, p, k = bessel(1, 0.3, analog=True, output='zpk')
assert_array_equal(z, [])
assert_allclose(p, [-0.3], rtol=1e-14)
assert_allclose(k, 0.3, rtol=1e-14)
def test_high_order(self):
# high even order
z, p, k = bessel(24, 100, analog=True, output='zpk')
z2 = []
p2 = [
-9.055312334014323e+01 + 4.844005815403969e+00j,
-9.055312334014323e+01 - 4.844005815403969e+00j,
-8.983105162681878e+01 + 1.454056170018573e+01j,
-8.983105162681878e+01 - 1.454056170018573e+01j,
-8.837357994162065e+01 + 2.426335240122282e+01j,
-8.837357994162065e+01 - 2.426335240122282e+01j,
-8.615278316179575e+01 + 3.403202098404543e+01j,
-8.615278316179575e+01 - 3.403202098404543e+01j,
-8.312326467067703e+01 + 4.386985940217900e+01j,
-8.312326467067703e+01 - 4.386985940217900e+01j,
-7.921695461084202e+01 + 5.380628489700191e+01j,
-7.921695461084202e+01 - 5.380628489700191e+01j,
-7.433392285433246e+01 + 6.388084216250878e+01j,
-7.433392285433246e+01 - 6.388084216250878e+01j,
-6.832565803501586e+01 + 7.415032695116071e+01j,
-6.832565803501586e+01 - 7.415032695116071e+01j,
-6.096221567378025e+01 + 8.470292433074425e+01j,
-6.096221567378025e+01 - 8.470292433074425e+01j,
-5.185914574820616e+01 + 9.569048385258847e+01j,
-5.185914574820616e+01 - 9.569048385258847e+01j,
-4.027853855197555e+01 + 1.074195196518679e+02j,
-4.027853855197555e+01 - 1.074195196518679e+02j,
-2.433481337524861e+01 + 1.207298683731973e+02j,
-2.433481337524861e+01 - 1.207298683731973e+02j,
]
k2 = 9.999999999999989e+47
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high odd order
z, p, k = bessel(23, 1000, analog=True, output='zpk')
z2 = []
p2 = [
-2.497697202208956e+02 + 1.202813187870698e+03j,
-2.497697202208956e+02 - 1.202813187870698e+03j,
-4.126986617510172e+02 + 1.065328794475509e+03j,
-4.126986617510172e+02 - 1.065328794475509e+03j,
-5.304922463809596e+02 + 9.439760364018479e+02j,
-5.304922463809596e+02 - 9.439760364018479e+02j,
-9.027564978975828e+02 + 1.010534334242318e+02j,
-9.027564978975828e+02 - 1.010534334242318e+02j,
-8.909283244406079e+02 + 2.023024699647598e+02j,
-8.909283244406079e+02 - 2.023024699647598e+02j,
-8.709469394347836e+02 + 3.039581994804637e+02j,
-8.709469394347836e+02 - 3.039581994804637e+02j,
-8.423805948131370e+02 + 4.062657947488952e+02j,
-8.423805948131370e+02 - 4.062657947488952e+02j,
-8.045561642249877e+02 + 5.095305912401127e+02j,
-8.045561642249877e+02 - 5.095305912401127e+02j,
-7.564660146766259e+02 + 6.141594859516342e+02j,
-7.564660146766259e+02 - 6.141594859516342e+02j,
-6.965966033906477e+02 + 7.207341374730186e+02j,
-6.965966033906477e+02 - 7.207341374730186e+02j,
-6.225903228776276e+02 + 8.301558302815096e+02j,
-6.225903228776276e+02 - 8.301558302815096e+02j,
-9.066732476324988e+02]
k2 = 9.999999999999983e+68
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
class TestButter(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = butter(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = butter(1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = butter(1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [3.249196962329063e-01], rtol=1e-14)
assert_allclose(k, 3.375401518835469e-01, rtol=1e-14)
def test_basic(self):
# analog s-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
# All poles should be at distance wn from origin
assert_array_almost_equal(wn, abs(p))
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
assert_array_almost_equal(wn**N, k)
# digital z-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b1, a1 = butter(2, 1, analog=True)
assert_array_almost_equal(b1, [1])
assert_array_almost_equal(a1, [1, np.sqrt(2), 1])
b2, a2 = butter(5, 1, analog=True)
assert_array_almost_equal(b2, [1])
assert_array_almost_equal(a2, [1, 3.2361, 5.2361,
5.2361, 3.2361, 1], decimal=4)
b3, a3 = butter(10, 1, analog=True)
assert_array_almost_equal(b3, [1])
assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824,
74.2334, 64.8824, 42.8021, 20.4317,
6.3925, 1], decimal=4)
b2, a2 = butter(19, 1.0441379169150726, analog=True)
assert_array_almost_equal(b2, [2.2720], decimal=4)
assert_array_almost_equal(a2, 1.0e+004 * np.array([
0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570,
0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044,
1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153,
0.0026, 0.0002]), decimal=0)
b, a = butter(5, 0.4)
assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194,
0.2194, 0.1097, 0.0219], decimal=4)
assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738,
-0.3864, 0.1112, -0.0113], decimal=4)
def test_highpass(self):
# highpass, high even order
z, p, k = butter(28, 0.43, 'high', output='zpk')
z2 = np.ones(28)
p2 = [
2.068257195514592e-01 + 9.238294351481734e-01j,
2.068257195514592e-01 - 9.238294351481734e-01j,
1.874933103892023e-01 + 8.269455076775277e-01j,
1.874933103892023e-01 - 8.269455076775277e-01j,
1.717435567330153e-01 + 7.383078571194629e-01j,
1.717435567330153e-01 - 7.383078571194629e-01j,
1.588266870755982e-01 + 6.564623730651094e-01j,
1.588266870755982e-01 - 6.564623730651094e-01j,
1.481881532502603e-01 + 5.802343458081779e-01j,
1.481881532502603e-01 - 5.802343458081779e-01j,
1.394122576319697e-01 + 5.086609000582009e-01j,
1.394122576319697e-01 - 5.086609000582009e-01j,
1.321840881809715e-01 + 4.409411734716436e-01j,
1.321840881809715e-01 - 4.409411734716436e-01j,
1.262633413354405e-01 + 3.763990035551881e-01j,
1.262633413354405e-01 - 3.763990035551881e-01j,
1.214660449478046e-01 + 3.144545234797277e-01j,
1.214660449478046e-01 - 3.144545234797277e-01j,
1.104868766650320e-01 + 2.771505404367791e-02j,
1.104868766650320e-01 - 2.771505404367791e-02j,
1.111768629525075e-01 + 8.331369153155753e-02j,
1.111768629525075e-01 - 8.331369153155753e-02j,
1.125740630842972e-01 + 1.394219509611784e-01j,
1.125740630842972e-01 - 1.394219509611784e-01j,
1.147138487992747e-01 + 1.963932363793666e-01j,
1.147138487992747e-01 - 1.963932363793666e-01j,
1.176516491045901e-01 + 2.546021573417188e-01j,
1.176516491045901e-01 - 2.546021573417188e-01j,
]
k2 = 1.446671081817286e-06
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-7)
assert_allclose(k, k2, rtol=1e-10)
# highpass, high odd order
z, p, k = butter(27, 0.56, 'high', output='zpk')
z2 = np.ones(27)
p2 = [
-1.772572785680147e-01 + 9.276431102995948e-01j,
-1.772572785680147e-01 - 9.276431102995948e-01j,
-1.600766565322114e-01 + 8.264026279893268e-01j,
-1.600766565322114e-01 - 8.264026279893268e-01j,
-1.461948419016121e-01 + 7.341841939120078e-01j,
-1.461948419016121e-01 - 7.341841939120078e-01j,
-1.348975284762046e-01 + 6.493235066053785e-01j,
-1.348975284762046e-01 - 6.493235066053785e-01j,
-1.256628210712206e-01 + 5.704921366889227e-01j,
-1.256628210712206e-01 - 5.704921366889227e-01j,
-1.181038235962314e-01 + 4.966120551231630e-01j,
-1.181038235962314e-01 - 4.966120551231630e-01j,
-1.119304913239356e-01 + 4.267938916403775e-01j,
-1.119304913239356e-01 - 4.267938916403775e-01j,
-1.069237739782691e-01 + 3.602914879527338e-01j,
-1.069237739782691e-01 - 3.602914879527338e-01j,
-1.029178030691416e-01 + 2.964677964142126e-01j,
-1.029178030691416e-01 - 2.964677964142126e-01j,
-9.978747500816100e-02 + 2.347687643085738e-01j,
-9.978747500816100e-02 - 2.347687643085738e-01j,
-9.743974496324025e-02 + 1.747028739092479e-01j,
-9.743974496324025e-02 - 1.747028739092479e-01j,
-9.580754551625957e-02 + 1.158246860771989e-01j,
-9.580754551625957e-02 - 1.158246860771989e-01j,
-9.484562207782568e-02 + 5.772118357151691e-02j,
-9.484562207782568e-02 - 5.772118357151691e-02j,
-9.452783117928215e-02
]
k2 = 9.585686688851069e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-8)
assert_allclose(k, k2)
def test_bandpass(self):
z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, -1, -1]
p2 = [
4.979909925436156e-01 + 8.367609424799387e-01j,
4.979909925436156e-01 - 8.367609424799387e-01j,
4.913338722555539e-01 + 7.866774509868817e-01j,
4.913338722555539e-01 - 7.866774509868817e-01j,
5.035229361778706e-01 + 7.401147376726750e-01j,
5.035229361778706e-01 - 7.401147376726750e-01j,
5.307617160406101e-01 + 7.029184459442954e-01j,
5.307617160406101e-01 - 7.029184459442954e-01j,
5.680556159453138e-01 + 6.788228792952775e-01j,
5.680556159453138e-01 - 6.788228792952775e-01j,
6.100962560818854e-01 + 6.693849403338664e-01j,
6.100962560818854e-01 - 6.693849403338664e-01j,
6.904694312740631e-01 + 6.930501690145245e-01j,
6.904694312740631e-01 - 6.930501690145245e-01j,
6.521767004237027e-01 + 6.744414640183752e-01j,
6.521767004237027e-01 - 6.744414640183752e-01j,
]
k2 = 3.398854055800844e-08
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
# bandpass analog
z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk')
z2 = np.zeros(4)
p2 = [
-4.179137760733086e+00 + 1.095935899082837e+02j,
-4.179137760733086e+00 - 1.095935899082837e+02j,
-9.593598668443835e+00 + 1.034745398029734e+02j,
-9.593598668443835e+00 - 1.034745398029734e+02j,
-8.883991981781929e+00 + 9.582087115567160e+01j,
-8.883991981781929e+00 - 9.582087115567160e+01j,
-3.474530886568715e+00 + 9.111599925805801e+01j,
-3.474530886568715e+00 - 9.111599925805801e+01j,
]
k2 = 1.600000000000001e+05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-15)
def test_bandstop(self):
z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk')
z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j]
p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j,
-1.766850742887729e-01 - 9.466951258673900e-01j,
1.467897662432886e-01 + 9.515917126462422e-01j,
1.467897662432886e-01 - 9.515917126462422e-01j,
-1.370083529426906e-01 + 8.880376681273993e-01j,
-1.370083529426906e-01 - 8.880376681273993e-01j,
1.086774544701390e-01 + 8.915240810704319e-01j,
1.086774544701390e-01 - 8.915240810704319e-01j,
-7.982704457700891e-02 + 8.506056315273435e-01j,
-7.982704457700891e-02 - 8.506056315273435e-01j,
5.238812787110331e-02 + 8.524011102699969e-01j,
5.238812787110331e-02 - 8.524011102699969e-01j,
-1.357545000491310e-02 + 8.382287744986582e-01j,
-1.357545000491310e-02 - 8.382287744986582e-01j]
k2 = 4.577122512960063e-01
assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag))
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
def test_ba_output(self):
b, a = butter(4, [100, 300], 'bandpass', analog=True)
b2 = [1.6e+09, 0, 0, 0, 0]
a2 = [1.000000000000000e+00, 5.226251859505511e+02,
2.565685424949238e+05, 6.794127417357160e+07,
1.519411254969542e+10, 2.038238225207147e+12,
2.309116882454312e+14, 1.411088002066486e+16,
8.099999999999991e+17]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby1(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
b, a = cheby1(0, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby1(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby1(1, 0.1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14)
assert_allclose(k, 7.695063486399808e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
# Same test as TestNormalize
b, a = cheby1(8, 0.5, 0.048)
assert_array_almost_equal(b, [
2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11], decimal=14)
assert_array_almost_equal(a, [
1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01], decimal=14)
b, a = cheby1(4, 1, [0.4, 0.7], btype='band')
assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0,
-0.0335, 0, 0.0084], decimal=4)
assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137,
1.8653, 1.8982, 0.5676, 0.4103],
decimal=4)
b2, a2 = cheby1(5, 3, 1, analog=True)
assert_array_almost_equal(b2, [0.0626], decimal=4)
assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080,
0.0626], decimal=4)
b, a = cheby1(8, 0.5, 0.1)
assert_array_almost_equal(b, 1.0e-006 * np.array([
0.00703924326028, 0.05631394608227, 0.19709881128793,
0.39419762257586, 0.49274702821983, 0.39419762257586,
0.19709881128793, 0.05631394608227, 0.00703924326028]),
decimal=13)
assert_array_almost_equal(a, [
1.00000000000000, -7.44912258934158, 24.46749067762108,
-46.27560200466141, 55.11160187999928, -42.31640010161038,
20.45543300484147, -5.69110270561444, 0.69770374759022],
decimal=13)
b, a = cheby1(8, 0.5, 0.25)
assert_array_almost_equal(b, 1.0e-003 * np.array([
0.00895261138923, 0.07162089111382, 0.25067311889837,
0.50134623779673, 0.62668279724591, 0.50134623779673,
0.25067311889837, 0.07162089111382, 0.00895261138923]),
decimal=13)
assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545,
16.58122329202101, -27.71423273542923,
30.39509758355313, -22.34729670426879,
10.74509800434910, -3.08924633697497,
0.40707685889802], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk')
z2 = np.ones(24)
p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j,
-6.136558509657073e-01 - 2.700091504942893e-01j,
-3.303348340927516e-01 + 6.659400861114254e-01j,
-3.303348340927516e-01 - 6.659400861114254e-01j,
8.779713780557169e-03 + 8.223108447483040e-01j,
8.779713780557169e-03 - 8.223108447483040e-01j,
2.742361123006911e-01 + 8.356666951611864e-01j,
2.742361123006911e-01 - 8.356666951611864e-01j,
4.562984557158206e-01 + 7.954276912303594e-01j,
4.562984557158206e-01 - 7.954276912303594e-01j,
5.777335494123628e-01 + 7.435821817961783e-01j,
5.777335494123628e-01 - 7.435821817961783e-01j,
6.593260977749194e-01 + 6.955390907990932e-01j,
6.593260977749194e-01 - 6.955390907990932e-01j,
7.149590948466562e-01 + 6.559437858502012e-01j,
7.149590948466562e-01 - 6.559437858502012e-01j,
7.532432388188739e-01 + 6.256158042292060e-01j,
7.532432388188739e-01 - 6.256158042292060e-01j,
7.794365244268271e-01 + 6.042099234813333e-01j,
7.794365244268271e-01 - 6.042099234813333e-01j,
7.967253874772997e-01 + 5.911966597313203e-01j,
7.967253874772997e-01 - 5.911966597313203e-01j,
8.069756417293870e-01 + 5.862214589217275e-01j,
8.069756417293870e-01 - 5.862214589217275e-01j]
k2 = 6.190427617192018e-04
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
# high odd order
z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk')
z2 = np.ones(23)
p2 = [-7.676400532011010e-01,
-6.754621070166477e-01 + 3.970502605619561e-01j,
-6.754621070166477e-01 - 3.970502605619561e-01j,
-4.528880018446727e-01 + 6.844061483786332e-01j,
-4.528880018446727e-01 - 6.844061483786332e-01j,
-1.986009130216447e-01 + 8.382285942941594e-01j,
-1.986009130216447e-01 - 8.382285942941594e-01j,
2.504673931532608e-02 + 8.958137635794080e-01j,
2.504673931532608e-02 - 8.958137635794080e-01j,
2.001089429976469e-01 + 9.010678290791480e-01j,
2.001089429976469e-01 - 9.010678290791480e-01j,
3.302410157191755e-01 + 8.835444665962544e-01j,
3.302410157191755e-01 - 8.835444665962544e-01j,
4.246662537333661e-01 + 8.594054226449009e-01j,
4.246662537333661e-01 - 8.594054226449009e-01j,
4.919620928120296e-01 + 8.366772762965786e-01j,
4.919620928120296e-01 - 8.366772762965786e-01j,
5.385746917494749e-01 + 8.191616180796720e-01j,
5.385746917494749e-01 - 8.191616180796720e-01j,
5.855636993537203e-01 + 8.060680937701062e-01j,
5.855636993537203e-01 - 8.060680937701062e-01j,
5.688812849391721e-01 + 8.086497795114683e-01j,
5.688812849391721e-01 - 8.086497795114683e-01j]
k2 = 1.941697029206324e-05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk')
z2 = np.zeros(10)
p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j,
-3.144743169501551e+03 - 3.511680029092744e+03j,
-5.633065604514602e+02 + 2.023615191183945e+03j,
-5.633065604514602e+02 - 2.023615191183945e+03j,
-1.946412183352025e+02 + 1.372309454274755e+03j,
-1.946412183352025e+02 - 1.372309454274755e+03j,
-7.987162953085479e+01 + 1.105207708045358e+03j,
-7.987162953085479e+01 - 1.105207708045358e+03j,
-2.250315039031946e+01 + 1.001723931471477e+03j,
-2.250315039031946e+01 - 1.001723931471477e+03j]
k2 = 8.912509381337453e-01
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_bandpass(self):
z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]
p2 = [3.077784854851463e-01 + 9.453307017592942e-01j,
3.077784854851463e-01 - 9.453307017592942e-01j,
3.280567400654425e-01 + 9.272377218689016e-01j,
3.280567400654425e-01 - 9.272377218689016e-01j,
3.677912763284301e-01 + 9.038008865279966e-01j,
3.677912763284301e-01 - 9.038008865279966e-01j,
4.194425632520948e-01 + 8.769407159656157e-01j,
4.194425632520948e-01 - 8.769407159656157e-01j,
4.740921994669189e-01 + 8.496508528630974e-01j,
4.740921994669189e-01 - 8.496508528630974e-01j,
5.234866481897429e-01 + 8.259608422808477e-01j,
5.234866481897429e-01 - 8.259608422808477e-01j,
5.844717632289875e-01 + 8.052901363500210e-01j,
5.844717632289875e-01 - 8.052901363500210e-01j,
5.615189063336070e-01 + 8.100667803850766e-01j,
5.615189063336070e-01 - 8.100667803850766e-01j]
k2 = 5.007028718074307e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
def test_bandstop(self):
z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk')
z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j]
p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j,
-8.942974551472813e-02 - 3.482480481185926e-01j,
1.293775154041798e-01 + 8.753499858081858e-01j,
1.293775154041798e-01 - 8.753499858081858e-01j,
3.399741945062013e-02 + 9.690316022705607e-01j,
3.399741945062013e-02 - 9.690316022705607e-01j,
4.167225522796539e-04 + 9.927338161087488e-01j,
4.167225522796539e-04 - 9.927338161087488e-01j,
-3.912966549550960e-01 + 8.046122859255742e-01j,
-3.912966549550960e-01 - 8.046122859255742e-01j,
-3.307805547127368e-01 + 9.133455018206508e-01j,
-3.307805547127368e-01 - 9.133455018206508e-01j,
-3.072658345097743e-01 + 9.443589759799366e-01j,
-3.072658345097743e-01 - 9.443589759799366e-01j]
k2 = 3.619438310405028e-01
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-13)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True)
b2 = [1.000000000000006e+00, 0,
3.255000000000020e+05, 0,
4.238010000000026e+10, 0,
2.758944510000017e+15, 0,
8.980364380050052e+19, 0,
1.169243442282517e+24
]
a2 = [1.000000000000000e+00, 4.630555945694342e+02,
4.039266454794788e+05, 1.338060988610237e+08,
5.844333551294591e+10, 1.357346371637638e+13,
3.804661141892782e+15, 5.670715850340080e+17,
1.114411200988328e+20, 8.316815934908471e+21,
1.169243442282517e+24
]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby2(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Stopband ripple factor doesn't matter
b, a = cheby2(0, 123.456, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby2(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby2(1, 50, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [9.967826460175649e-01], rtol=1e-14)
assert_allclose(k, 1.608676991217512e-03, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
B, A = cheby2(18, 100, 0.5)
assert_array_almost_equal(B, [
0.00167583914216, 0.01249479541868, 0.05282702120282,
0.15939804265706, 0.37690207631117, 0.73227013789108,
1.20191856962356, 1.69522872823393, 2.07598674519837,
2.21972389625291, 2.07598674519838, 1.69522872823395,
1.20191856962359, 0.73227013789110, 0.37690207631118,
0.15939804265707, 0.05282702120282, 0.01249479541868,
0.00167583914216], decimal=13)
assert_array_almost_equal(A, [
1.00000000000000, -0.27631970006174, 3.19751214254060,
-0.15685969461355, 4.13926117356269, 0.60689917820044,
2.95082770636540, 0.89016501910416, 1.32135245849798,
0.51502467236824, 0.38906643866660, 0.15367372690642,
0.07255803834919, 0.02422454070134, 0.00756108751837,
0.00179848550988, 0.00033713574499, 0.00004258794833,
0.00000281030149], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk')
z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
9.981088955489852e-01 - 6.147058341984388e-02j,
9.832702870387426e-01 + 1.821525257215483e-01j,
9.832702870387426e-01 - 1.821525257215483e-01j,
9.550760158089112e-01 + 2.963609353922882e-01j,
9.550760158089112e-01 - 2.963609353922882e-01j,
9.162054748821922e-01 + 4.007087817803773e-01j,
9.162054748821922e-01 - 4.007087817803773e-01j,
8.700619897368064e-01 + 4.929423232136168e-01j,
8.700619897368064e-01 - 4.929423232136168e-01j,
5.889791753434985e-01 + 8.081482110427953e-01j,
5.889791753434985e-01 - 8.081482110427953e-01j,
5.984900456570295e-01 + 8.011302423760501e-01j,
5.984900456570295e-01 - 8.011302423760501e-01j,
6.172880888914629e-01 + 7.867371958365343e-01j,
6.172880888914629e-01 - 7.867371958365343e-01j,
6.448899971038180e-01 + 7.642754030030161e-01j,
6.448899971038180e-01 - 7.642754030030161e-01j,
6.804845629637927e-01 + 7.327624168637228e-01j,
6.804845629637927e-01 - 7.327624168637228e-01j,
8.202619107108660e-01 + 5.719881098737678e-01j,
8.202619107108660e-01 - 5.719881098737678e-01j,
7.228410452536148e-01 + 6.910143437705678e-01j,
7.228410452536148e-01 - 6.910143437705678e-01j,
7.702121399578629e-01 + 6.377877856007792e-01j,
7.702121399578629e-01 - 6.377877856007792e-01j]
p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
7.365546198286450e-01 - 4.842085129329526e-02j,
7.292038510962885e-01 + 1.442201672097581e-01j,
7.292038510962885e-01 - 1.442201672097581e-01j,
7.151293788040354e-01 + 2.369925800458584e-01j,
7.151293788040354e-01 - 2.369925800458584e-01j,
6.955051820787286e-01 + 3.250341363856910e-01j,
6.955051820787286e-01 - 3.250341363856910e-01j,
6.719122956045220e-01 + 4.070475750638047e-01j,
6.719122956045220e-01 - 4.070475750638047e-01j,
6.461722130611300e-01 + 4.821965916689270e-01j,
6.461722130611300e-01 - 4.821965916689270e-01j,
5.528045062872224e-01 + 8.162920513838372e-01j,
5.528045062872224e-01 - 8.162920513838372e-01j,
5.464847782492791e-01 + 7.869899955967304e-01j,
5.464847782492791e-01 - 7.869899955967304e-01j,
5.488033111260949e-01 + 7.520442354055579e-01j,
5.488033111260949e-01 - 7.520442354055579e-01j,
6.201874719022955e-01 + 5.500894392527353e-01j,
6.201874719022955e-01 - 5.500894392527353e-01j,
5.586478152536709e-01 + 7.112676877332921e-01j,
5.586478152536709e-01 - 7.112676877332921e-01j,
5.958145844148228e-01 + 6.107074340842115e-01j,
5.958145844148228e-01 - 6.107074340842115e-01j,
5.747812938519067e-01 + 6.643001536914696e-01j,
5.747812938519067e-01 - 6.643001536914696e-01j]
k2 = 9.932997786497189e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
# high odd order
z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk')
z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
9.690690376586687e-01 - 2.467897896011971e-01j,
9.999999999999492e-01,
8.835111277191199e-01 + 4.684101698261429e-01j,
8.835111277191199e-01 - 4.684101698261429e-01j,
7.613142857900539e-01 + 6.483830335935022e-01j,
7.613142857900539e-01 - 6.483830335935022e-01j,
6.232625173626231e-01 + 7.820126817709752e-01j,
6.232625173626231e-01 - 7.820126817709752e-01j,
4.864456563413621e-01 + 8.737108351316745e-01j,
4.864456563413621e-01 - 8.737108351316745e-01j,
3.618368136816749e-01 + 9.322414495530347e-01j,
3.618368136816749e-01 - 9.322414495530347e-01j,
2.549486883466794e-01 + 9.669545833752675e-01j,
2.549486883466794e-01 - 9.669545833752675e-01j,
1.676175432109457e-01 + 9.858520980390212e-01j,
1.676175432109457e-01 - 9.858520980390212e-01j,
1.975218468277521e-03 + 9.999980492540941e-01j,
1.975218468277521e-03 - 9.999980492540941e-01j,
1.786959496651858e-02 + 9.998403260399917e-01j,
1.786959496651858e-02 - 9.998403260399917e-01j,
9.967933660557139e-02 + 9.950196127985684e-01j,
9.967933660557139e-02 - 9.950196127985684e-01j,
5.013970951219547e-02 + 9.987422137518890e-01j,
5.013970951219547e-02 - 9.987422137518890e-01j]
p2 = [4.218866331906864e-01,
4.120110200127552e-01 + 1.361290593621978e-01j,
4.120110200127552e-01 - 1.361290593621978e-01j,
3.835890113632530e-01 + 2.664910809911026e-01j,
3.835890113632530e-01 - 2.664910809911026e-01j,
3.399195570456499e-01 + 3.863983538639875e-01j,
3.399195570456499e-01 - 3.863983538639875e-01j,
2.855977834508353e-01 + 4.929444399540688e-01j,
2.855977834508353e-01 - 4.929444399540688e-01j,
2.255765441339322e-01 + 5.851631870205766e-01j,
2.255765441339322e-01 - 5.851631870205766e-01j,
1.644087535815792e-01 + 6.637356937277153e-01j,
1.644087535815792e-01 - 6.637356937277153e-01j,
-7.293633845273095e-02 + 9.739218252516307e-01j,
-7.293633845273095e-02 - 9.739218252516307e-01j,
1.058259206358626e-01 + 7.304739464862978e-01j,
1.058259206358626e-01 - 7.304739464862978e-01j,
-5.703971947785402e-02 + 9.291057542169088e-01j,
-5.703971947785402e-02 - 9.291057542169088e-01j,
5.263875132656864e-02 + 7.877974334424453e-01j,
5.263875132656864e-02 - 7.877974334424453e-01j,
-3.007943405982616e-02 + 8.846331716180016e-01j,
-3.007943405982616e-02 - 8.846331716180016e-01j,
6.857277464483946e-03 + 8.383275456264492e-01j,
6.857277464483946e-03 - 8.383275456264492e-01j]
k2 = 6.507068761705037e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
def test_bandpass(self):
z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999999e-01,
3.676588029658514e-01 + 9.299607543341383e-01j,
3.676588029658514e-01 - 9.299607543341383e-01j,
7.009689684982283e-01 + 7.131917730894889e-01j,
7.009689684982283e-01 - 7.131917730894889e-01j,
7.815697973765858e-01 + 6.238178033919218e-01j,
7.815697973765858e-01 - 6.238178033919218e-01j,
8.063793628819866e-01 + 5.913986160941200e-01j,
8.063793628819866e-01 - 5.913986160941200e-01j,
1.000000000000001e+00,
9.944493019920448e-01 + 1.052168511576739e-01j,
9.944493019920448e-01 - 1.052168511576739e-01j,
9.854674703367308e-01 + 1.698642543566085e-01j,
9.854674703367308e-01 - 1.698642543566085e-01j,
9.762751735919308e-01 + 2.165335665157851e-01j,
9.762751735919308e-01 - 2.165335665157851e-01j,
9.792277171575134e-01 + 2.027636011479496e-01j,
9.792277171575134e-01 - 2.027636011479496e-01j]
p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
8.143803410489621e-01 - 5.411056063397541e-01j,
7.650769827887418e-01 + 5.195412242095543e-01j,
7.650769827887418e-01 - 5.195412242095543e-01j,
6.096241204063443e-01 + 3.568440484659796e-01j,
6.096241204063443e-01 - 3.568440484659796e-01j,
6.918192770246239e-01 + 4.770463577106911e-01j,
6.918192770246239e-01 - 4.770463577106911e-01j,
6.986241085779207e-01 + 1.146512226180060e-01j,
6.986241085779207e-01 - 1.146512226180060e-01j,
8.654645923909734e-01 + 1.604208797063147e-01j,
8.654645923909734e-01 - 1.604208797063147e-01j,
9.164831670444591e-01 + 1.969181049384918e-01j,
9.164831670444591e-01 - 1.969181049384918e-01j,
9.630425777594550e-01 + 2.317513360702271e-01j,
9.630425777594550e-01 - 2.317513360702271e-01j,
9.438104703725529e-01 + 2.193509900269860e-01j,
9.438104703725529e-01 - 2.193509900269860e-01j]
k2 = 9.345352824659604e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_bandstop(self):
z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk')
z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
6.230544895101009e-01 - 7.821784343111114e-01j,
9.086608545660115e-01 + 4.175349702471991e-01j,
9.086608545660115e-01 - 4.175349702471991e-01j,
9.478129721465802e-01 + 3.188268649763867e-01j,
9.478129721465802e-01 - 3.188268649763867e-01j,
-6.230544895100982e-01 + 7.821784343111109e-01j,
-6.230544895100982e-01 - 7.821784343111109e-01j,
-9.086608545660116e-01 + 4.175349702472088e-01j,
-9.086608545660116e-01 - 4.175349702472088e-01j,
-9.478129721465784e-01 + 3.188268649763897e-01j,
-9.478129721465784e-01 - 3.188268649763897e-01j]
p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
-9.464094036167638e-01 - 1.720048695084344e-01j,
-8.715844103386737e-01 + 1.370665039509297e-01j,
-8.715844103386737e-01 - 1.370665039509297e-01j,
-8.078751204586425e-01 + 5.729329866682983e-02j,
-8.078751204586425e-01 - 5.729329866682983e-02j,
9.464094036167665e-01 + 1.720048695084332e-01j,
9.464094036167665e-01 - 1.720048695084332e-01j,
8.078751204586447e-01 + 5.729329866683007e-02j,
8.078751204586447e-01 - 5.729329866683007e-02j,
8.715844103386721e-01 + 1.370665039509331e-01j,
8.715844103386721e-01 - 1.370665039509331e-01j]
k2 = 2.917823332763358e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby2(5, 20, [2010, 2100], 'stop', True)
b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12,
2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04,
1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02,
7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09,
1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15,
1.339913493808585e+33]
a2 = [1.000000000000000e+00, 1.849550755473371e+02,
2.113222918998538e+07, 3.125114149732283e+09,
1.785133457155609e+14, 1.979158697776348e+16,
7.535048322653831e+20, 5.567966191263037e+22,
1.589246884221346e+27, 5.871210648525566e+28,
1.339913493808590e+33]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestEllip(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
# Stopband ripple factor doesn't matter
b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = ellip(1, 1, 55, 0.3, output='zpk')
assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14)
assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10)
assert_allclose(k, 5.003330360576763e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b3, a3 = ellip(5, 3, 26, 1, analog=True)
assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0,
0.2409], decimal=4)
assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012,
0.2409], decimal=4)
b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop')
assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042,
0.3469, 0.3310], decimal=4)
assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323,
0.1131, -0.0060], decimal=4)
def test_highpass(self):
# high even order
z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk')
z2 = [9.761875332501075e-01 + 2.169283290099910e-01j,
9.761875332501075e-01 - 2.169283290099910e-01j,
8.413503353963494e-01 + 5.404901600661900e-01j,
8.413503353963494e-01 - 5.404901600661900e-01j,
7.160082576305009e-01 + 6.980918098681732e-01j,
7.160082576305009e-01 - 6.980918098681732e-01j,
6.456533638965329e-01 + 7.636306264739803e-01j,
6.456533638965329e-01 - 7.636306264739803e-01j,
6.127321820971366e-01 + 7.902906256703928e-01j,
6.127321820971366e-01 - 7.902906256703928e-01j,
5.983607817490196e-01 + 8.012267936512676e-01j,
5.983607817490196e-01 - 8.012267936512676e-01j,
5.922577552594799e-01 + 8.057485658286990e-01j,
5.922577552594799e-01 - 8.057485658286990e-01j,
5.896952092563588e-01 + 8.076258788449631e-01j,
5.896952092563588e-01 - 8.076258788449631e-01j,
5.886248765538837e-01 + 8.084063054565607e-01j,
5.886248765538837e-01 - 8.084063054565607e-01j,
5.881802711123132e-01 + 8.087298490066037e-01j,
5.881802711123132e-01 - 8.087298490066037e-01j,
5.879995719101164e-01 + 8.088612386766461e-01j,
5.879995719101164e-01 - 8.088612386766461e-01j,
5.879354086709576e-01 + 8.089078780868164e-01j,
5.879354086709576e-01 - 8.089078780868164e-01j]
p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j,
-3.184805259081650e-01 - 4.206951906775851e-01j,
1.417279173459985e-01 + 7.903955262836452e-01j,
1.417279173459985e-01 - 7.903955262836452e-01j,
4.042881216964651e-01 + 8.309042239116594e-01j,
4.042881216964651e-01 - 8.309042239116594e-01j,
5.128964442789670e-01 + 8.229563236799665e-01j,
5.128964442789670e-01 - 8.229563236799665e-01j,
5.569614712822724e-01 + 8.155957702908510e-01j,
5.569614712822724e-01 - 8.155957702908510e-01j,
5.750478870161392e-01 + 8.118633973883931e-01j,
5.750478870161392e-01 - 8.118633973883931e-01j,
5.825314018170804e-01 + 8.101960910679270e-01j,
5.825314018170804e-01 - 8.101960910679270e-01j,
5.856397379751872e-01 + 8.094825218722543e-01j,
5.856397379751872e-01 - 8.094825218722543e-01j,
5.869326035251949e-01 + 8.091827531557583e-01j,
5.869326035251949e-01 - 8.091827531557583e-01j,
5.874697218855733e-01 + 8.090593298213502e-01j,
5.874697218855733e-01 - 8.090593298213502e-01j,
5.876904783532237e-01 + 8.090127161018823e-01j,
5.876904783532237e-01 - 8.090127161018823e-01j,
5.877753105317594e-01 + 8.090050577978136e-01j,
5.877753105317594e-01 - 8.090050577978136e-01j]
k2 = 4.918081266957108e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
# high odd order
z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk')
z2 = [9.999999999998661e-01,
6.603717261750994e-01 + 7.509388678638675e-01j,
6.603717261750994e-01 - 7.509388678638675e-01j,
2.788635267510325e-01 + 9.603307416968041e-01j,
2.788635267510325e-01 - 9.603307416968041e-01j,
1.070215532544218e-01 + 9.942567008268131e-01j,
1.070215532544218e-01 - 9.942567008268131e-01j,
4.049427369978163e-02 + 9.991797705105507e-01j,
4.049427369978163e-02 - 9.991797705105507e-01j,
1.531059368627931e-02 + 9.998827859909265e-01j,
1.531059368627931e-02 - 9.998827859909265e-01j,
5.808061438534933e-03 + 9.999831330689181e-01j,
5.808061438534933e-03 - 9.999831330689181e-01j,
2.224277847754599e-03 + 9.999975262909676e-01j,
2.224277847754599e-03 - 9.999975262909676e-01j,
8.731857107534554e-04 + 9.999996187732845e-01j,
8.731857107534554e-04 - 9.999996187732845e-01j,
3.649057346914968e-04 + 9.999999334218996e-01j,
3.649057346914968e-04 - 9.999999334218996e-01j,
1.765538109802615e-04 + 9.999999844143768e-01j,
1.765538109802615e-04 - 9.999999844143768e-01j,
1.143655290967426e-04 + 9.999999934602630e-01j,
1.143655290967426e-04 - 9.999999934602630e-01j]
p2 = [-6.322017026545028e-01,
-4.648423756662754e-01 + 5.852407464440732e-01j,
-4.648423756662754e-01 - 5.852407464440732e-01j,
-2.249233374627773e-01 + 8.577853017985717e-01j,
-2.249233374627773e-01 - 8.577853017985717e-01j,
-9.234137570557621e-02 + 9.506548198678851e-01j,
-9.234137570557621e-02 - 9.506548198678851e-01j,
-3.585663561241373e-02 + 9.821494736043981e-01j,
-3.585663561241373e-02 - 9.821494736043981e-01j,
-1.363917242312723e-02 + 9.933844128330656e-01j,
-1.363917242312723e-02 - 9.933844128330656e-01j,
-5.131505238923029e-03 + 9.975221173308673e-01j,
-5.131505238923029e-03 - 9.975221173308673e-01j,
-1.904937999259502e-03 + 9.990680819857982e-01j,
-1.904937999259502e-03 - 9.990680819857982e-01j,
-6.859439885466834e-04 + 9.996492201426826e-01j,
-6.859439885466834e-04 - 9.996492201426826e-01j,
-2.269936267937089e-04 + 9.998686250679161e-01j,
-2.269936267937089e-04 - 9.998686250679161e-01j,
-5.687071588789117e-05 + 9.999527573294513e-01j,
-5.687071588789117e-05 - 9.999527573294513e-01j,
-6.948417068525226e-07 + 9.999882737700173e-01j,
-6.948417068525226e-07 - 9.999882737700173e-01j]
k2 = 1.220910020289434e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
def test_bandpass(self):
z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999991e-01,
6.856610961780020e-01 + 7.279209168501619e-01j,
6.856610961780020e-01 - 7.279209168501619e-01j,
7.850346167691289e-01 + 6.194518952058737e-01j,
7.850346167691289e-01 - 6.194518952058737e-01j,
7.999038743173071e-01 + 6.001281461922627e-01j,
7.999038743173071e-01 - 6.001281461922627e-01j,
9.999999999999999e-01,
9.862938983554124e-01 + 1.649980183725925e-01j,
9.862938983554124e-01 - 1.649980183725925e-01j,
9.788558330548762e-01 + 2.045513580850601e-01j,
9.788558330548762e-01 - 2.045513580850601e-01j,
9.771155231720003e-01 + 2.127093189691258e-01j,
9.771155231720003e-01 - 2.127093189691258e-01j]
p2 = [8.063992755498643e-01 + 5.858071374778874e-01j,
8.063992755498643e-01 - 5.858071374778874e-01j,
8.050395347071724e-01 + 5.639097428109795e-01j,
8.050395347071724e-01 - 5.639097428109795e-01j,
8.113124936559144e-01 + 4.855241143973142e-01j,
8.113124936559144e-01 - 4.855241143973142e-01j,
8.665595314082394e-01 + 3.334049560919331e-01j,
8.665595314082394e-01 - 3.334049560919331e-01j,
9.412369011968871e-01 + 2.457616651325908e-01j,
9.412369011968871e-01 - 2.457616651325908e-01j,
9.679465190411238e-01 + 2.228772501848216e-01j,
9.679465190411238e-01 - 2.228772501848216e-01j,
9.747235066273385e-01 + 2.178937926146544e-01j,
9.747235066273385e-01 - 2.178937926146544e-01j]
k2 = 8.354782670263239e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk')
z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j,
-5.583607317695175e-14 - 1.433755965989225e+02j,
5.740106416459296e-14 + 1.261678754570291e+02j,
5.740106416459296e-14 - 1.261678754570291e+02j,
-2.199676239638652e-14 + 6.974861996895196e+01j,
-2.199676239638652e-14 - 6.974861996895196e+01j,
-3.372595657044283e-14 + 7.926145989044531e+01j,
-3.372595657044283e-14 - 7.926145989044531e+01j,
0]
p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j,
-8.814960004852743e-01 - 1.104124501436066e+02j,
-2.477372459140184e+00 + 1.065638954516534e+02j,
-2.477372459140184e+00 - 1.065638954516534e+02j,
-3.072156842945799e+00 + 9.995404870405324e+01j,
-3.072156842945799e+00 - 9.995404870405324e+01j,
-2.180456023925693e+00 + 9.379206865455268e+01j,
-2.180456023925693e+00 - 9.379206865455268e+01j,
-7.230484977485752e-01 + 9.056598800801140e+01j,
-7.230484977485752e-01 - 9.056598800801140e+01j]
k2 = 3.774571622827070e-02
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-4)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-6)
assert_allclose(k, k2, rtol=1e-3)
def test_bandstop(self):
z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk')
z2 = [3.528578094286510e-01 + 9.356769561794296e-01j,
3.528578094286510e-01 - 9.356769561794296e-01j,
3.769716042264783e-01 + 9.262248159096587e-01j,
3.769716042264783e-01 - 9.262248159096587e-01j,
4.406101783111199e-01 + 8.976985411420985e-01j,
4.406101783111199e-01 - 8.976985411420985e-01j,
5.539386470258847e-01 + 8.325574907062760e-01j,
5.539386470258847e-01 - 8.325574907062760e-01j,
6.748464963023645e-01 + 7.379581332490555e-01j,
6.748464963023645e-01 - 7.379581332490555e-01j,
7.489887970285254e-01 + 6.625826604475596e-01j,
7.489887970285254e-01 - 6.625826604475596e-01j,
7.913118471618432e-01 + 6.114127579150699e-01j,
7.913118471618432e-01 - 6.114127579150699e-01j,
7.806804740916381e-01 + 6.249303940216475e-01j,
7.806804740916381e-01 - 6.249303940216475e-01j]
p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j,
-1.025299146693730e-01 - 5.662682444754943e-01j,
1.698463595163031e-01 + 8.926678667070186e-01j,
1.698463595163031e-01 - 8.926678667070186e-01j,
2.750532687820631e-01 + 9.351020170094005e-01j,
2.750532687820631e-01 - 9.351020170094005e-01j,
3.070095178909486e-01 + 9.457373499553291e-01j,
3.070095178909486e-01 - 9.457373499553291e-01j,
7.695332312152288e-01 + 2.792567212705257e-01j,
7.695332312152288e-01 - 2.792567212705257e-01j,
8.083818999225620e-01 + 4.990723496863960e-01j,
8.083818999225620e-01 - 4.990723496863960e-01j,
8.066158014414928e-01 + 5.649811440393374e-01j,
8.066158014414928e-01 - 5.649811440393374e-01j,
8.062787978834571e-01 + 5.855780880424964e-01j,
8.062787978834571e-01 - 5.855780880424964e-01j]
k2 = 2.068622545291259e-01
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-6)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-5)
assert_allclose(k, k2, rtol=1e-5)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = ellip(5, 1, 40, [201, 240], 'stop', True)
b2 = [
1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13,
2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08,
2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03,
1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01,
2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06,
2.612380874940186e+23
]
a2 = [
1.000000000000000e+00, 1.337266601804649e+02,
2.486725353510667e+05, 2.628059713728125e+07,
2.436169536928770e+10, 1.913554568577315e+12,
1.175208184614438e+15, 6.115751452473410e+16,
2.791577695211466e+19, 7.241811142725384e+20,
2.612380874940182e+23
]
assert_allclose(b, b2, rtol=1e-6)
assert_allclose(a, a2, rtol=1e-4)
def test_sos_consistency():
# Consistency checks of output='sos' for the specialized IIR filter
# design functions.
design_funcs = [(bessel, (0.1,)),
(butter, (0.1,)),
(cheby1, (45.0, 0.1)),
(cheby2, (0.087, 0.1)),
(ellip, (0.087, 45, 0.1))]
for func, args in design_funcs:
name = func.__name__
b, a = func(2, *args, output='ba')
sos = func(2, *args, output='sos')
assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name)
zpk = func(3, *args, output='zpk')
sos = func(3, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name)
zpk = func(4, *args, output='zpk')
sos = func(4, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name)
class TestIIRFilter(TestCase):
def test_symmetry(self):
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
for N in np.arange(1, 26):
for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'):
z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='zpk')
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='ba')
assert_(issubclass(b.dtype.type, np.floating))
assert_(issubclass(a.dtype.type, np.floating))
def test_int_inputs(self):
# Using integer frequency arguments and large N should not produce
# np.ints that wraparound to negative numbers
k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel',
output='zpk')[2]
k2 = 9.999999999999989e+47
assert_allclose(k, k2)
def test_invalid_wn_size(self):
# low and high have 1 Wn, band and stop have 2 Wn
assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low')
assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high')
assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp')
assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True)
def test_invalid_wn_range(self):
# For digital filters, 0 <= Wn <= 1
assert_raises(ValueError, iirfilter, 1, 2, btype='low')
assert_raises(ValueError, iirfilter, 1, -1, btype='high')
assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band')
assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop')
class TestGroupDelay(TestCase):
def test_identity_filter(self):
w, gd = group_delay((1, 1))
assert_array_almost_equal(w, pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
w, gd = group_delay((1, 1), whole=True)
assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
def test_fir(self):
# Let's design linear phase FIR and check that the group delay
# is constant.
N = 100
b = firwin(N + 1, 0.1)
w, gd = group_delay((b, 1))
assert_allclose(gd, 0.5 * N)
def test_iir(self):
# Let's design Butterworth filter and test the group delay at
# some points against MATLAB answer.
b, a = butter(4, 0.1)
w = np.linspace(0, pi, num=10, endpoint=False)
w, gd = group_delay((b, a), w=w)
matlab_gd = np.array([8.249313898506037, 11.958947880907104,
2.452325615326005, 1.048918665702008,
0.611382575635897, 0.418293269460578,
0.317932917836572, 0.261371844762525,
0.229038045801298, 0.212185774208521])
assert_array_almost_equal(gd, matlab_gd)
def test_singular(self):
# Let's create a filter with zeros and poles on the unit circle and
# check if warning is raised and the group delay is set to zero at
# these frequencies.
z1 = np.exp(1j * 0.1 * pi)
z2 = np.exp(1j * 0.25 * pi)
p1 = np.exp(1j * 0.5 * pi)
p2 = np.exp(1j * 0.8 * pi)
b = np.convolve([1, -z1], [1, -z2])
a = np.convolve([1, -p1], [1, -p2])
w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_warns(UserWarning, group_delay, (b, a), w=w)
w, gd = group_delay((b, a), w=w)
assert_allclose(gd, 0)
if __name__ == "__main__":
run_module_suite()
|
py
|
1a5821c4b5c97e8f8694cfb31d3133ec3f893e01
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import homebrew_calc
|
py
|
1a5822405dd52e54f4dc36baae08f893bdb599c6
|
import torch
from torch import nn
import torch.nn.functional as F
from .utils import SQRT2, deg2rad
class Stackgram(nn.Module):
def __init__(self, out_size, theta=None, circle=True, mode='nearest'):
super(Stackgram, self).__init__()
self.circle = circle
self.theta = theta
if theta is None:
self.theta = torch.arange(180)
self.out_size = out_size
self.in_size = in_size = out_size if circle else int((SQRT2*out_size).ceil())
self.all_grids = self._create_grids(self.theta, in_size)
self.mode = mode
def forward(self, x):
stackgram = torch.zeros(x.shape[0], len(self.theta), self.in_size, self.in_size).to(x.device)
for i_theta in range(len(self.theta)):
repline = x[...,i_theta]
repline = repline.unsqueeze(-1).repeat(1,1,1,repline.shape[2])
linogram = F.grid_sample(repline, self.all_grids[i_theta].repeat(x.shape[0],1,1,1).to(x.device), mode=self.mode)
stackgram[:,i_theta] = linogram
return stackgram
def _create_grids(self, angles, grid_size):
all_grids = []
for i_theta in range(len(angles)):
t = deg2rad(angles[i_theta])
R = torch.tensor([[t.sin(), t.cos(), 0.],[t.cos(), -t.sin(), 0.]]).unsqueeze(0)
all_grids.append(F.affine_grid(R, torch.Size([1,1,grid_size,grid_size])))
return all_grids
class IStackgram(nn.Module):
def __init__(self, out_size, theta=None, circle=True, mode='bilinear'):
super(IStackgram, self).__init__()
self.circle = circle
self.theta = theta
if theta is None:
self.theta = torch.arange(180)
self.out_size = out_size
self.in_size = in_size = out_size if circle else int((SQRT2*out_size).ceil())
self.all_grids = self._create_grids(self.theta, in_size)
self.mode = mode
def forward(self, x):
sinogram = torch.zeros(x.shape[0], 1, self.in_size, len(self.theta)).to(x.device)
for i_theta in range(len(self.theta)):
linogram = x[:,i_theta].unsqueeze(1)
repline = F.grid_sample(linogram, self.all_grids[i_theta].repeat(x.shape[0],1,1,1).to(x.device), mode=self.mode)
repline = repline[...,repline.shape[-1]//2]
sinogram[...,i_theta] = repline
return sinogram
def _create_grids(self, angles, grid_size):
all_grids = []
for i_theta in range(len(angles)):
t = deg2rad(angles[i_theta])
R = torch.tensor([[t.sin(), t.cos(), 0.],[t.cos(), -t.sin(), 0.]]).unsqueeze(0)
all_grids.append(F.affine_grid(R, torch.Size([1,1,grid_size,grid_size])))
return all_grids
|
py
|
1a5822511a8dcf66a08b8169e864df1fc9f94f5e
|
import pygame
import sys; sys.path.insert(0, "..")
import tools_for_pygame as pgt
pygame.init()
__test_name__ = "animations.TextureAni"
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption(__test_name__)
clock = pygame.time.Clock()
fps = pgt.gui.Label(pos=0, font="consolas", text_size=20, color=pgt.WHITE)
t1 = pgt.filled_surface((100, 100), pgt.RED)
t2 = pgt.filled_surface((100, 100), pgt.GREEN)
t3 = pgt.filled_surface((100, 100), pgt.BLUE)
base = pygame.Surface((100, 100))
base.fill(pgt.SALMON)
e = pgt.AniElement(
pos=(100, 100),
size=(100, 100),
image=base,
animations=[
pgt.ani.TextureAni(
name="flash",
frames=[t1, t2, t3],
time=.5,
loop=False,
queued_ani=pgt.ani.TextureAni(
name="queued_flash",
frames=[t1, t2, t3],
time=1
)
)
],
rotation=45,
alpha=152
)
e.flash.start()
while True:
clock.tick()
fps.text = int(clock.get_fps())
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill(pgt.GRAY(50))
fps.draw(screen)
e.draw(screen)
pygame.display.update()
|
py
|
1a582290f1d08294f8b59f9aa4d41138267d9e73
|
import pickle
import pandas as pd
swbd=pd.read_csv('swbd_DB_disf1_tags_table.csv')
# df1=pickle.load(open('DB_disf1_tags.pkl', "rb"))
# df1=pickle.load(open('swbd_DB_disf1_tags.pkl', "rb"))
# print(df1)
f = open("swbd_DB_disf1_tags.pkl",'wb')
pickle.dump(swbd,f)
f.close()
# df2=pickle.load(open('DB_disf1.pkl', "rb"))
# print(df2)
|
py
|
1a5822a62d6f320c10435e5c2ec589d98e7445a5
|
import contextlib
import os
import shutil
import subprocess
from tests import constants
def file_is_immutable(path):
"""Whether a file has the immutable attribute set.
Parameters
----------
path : str
An absolute path to a file.
Returns
-------
bool
True if the file's immmutable attribute is set, False if it is not.
Raises
------
CalledProcessError
If the exit status of the chattr command is non-zero.
"""
# Run the lsattr command.
lsattr_result = subprocess.run(
["lsattr", path],
check=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
universal_newlines=True,
)
# Extract the immutable attribute from the command output.
attributes = lsattr_result.stdout.split()[0]
immutable_flag = list(attributes)[4]
return immutable_flag == "i"
def set_file_immutable_attribute(path, immutable):
"""Set or unset the immutable attribute for a file.
Parameters
----------
path : str
The absolute path of a file.
immutable: bool
Set immutable attribute if True, unset immutable attribute if False.
Returns
-------
None
Raises
------
CalledProcessError
If the exit status of the chattr command is non-zero.
"""
operation = "+i" if immutable else "-i"
subprocess.run(
["sudo", "chattr", operation, path],
check=True,
stderr=subprocess.DEVNULL,
)
def set_up():
"""Create temporary directories and files.
Returns
-------
None
"""
# Ensure that tests start with a clean slate.
tear_down()
# Create testing directories.
os.makedirs(constants.EMPTY_SUBDIRECTORY_PATH)
os.makedirs(constants.GIT_SUBDIRECTORY_PATH)
os.makedirs(constants.SUBDIRECTORY_PATH)
# Create testing files.
open(constants.GIT_DIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.GIT_SUBDIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.IMMUTABLE_FILE_PATH, "x").close()
open(constants.MUTABLE_FILE_PATH, "x").close()
open(constants.SUBDIRECTORY_IMMUTABLE_FILE_PATH, "x").close()
open(constants.SUBDIRECTORY_MUTABLE_FILE_PATH, "x").close()
open(constants.READABLE_BY_ROOT_FILE_PATH, "x").close()
# Create testing named pipe.
os.mkfifo(constants.NAMED_PIPE_PATH)
# Create testing links.
os.symlink(constants.MUTABLE_FILE_PATH, constants.LINK_PATH)
os.symlink(
constants.SUBDIRECTORY_MUTABLE_FILE_PATH,
constants.SUBDIRECTORY_LINK_PATH,
)
# Set immutability for some testing files.
set_file_immutable_attribute(constants.IMMUTABLE_FILE_PATH, immutable=True)
set_file_immutable_attribute(
constants.SUBDIRECTORY_IMMUTABLE_FILE_PATH,
immutable=True,
)
# Set ownership and permissions of the file which is readable only by root.
os.chmod(constants.READABLE_BY_ROOT_FILE_PATH, 0o400)
subprocess.run(
["sudo", "chown", "root:root", constants.READABLE_BY_ROOT_FILE_PATH],
check=True,
)
def tear_down():
"""Delete temporary directories and files.
Returns
-------
None
"""
# Ensure all testing files are mutable, or they won't able to be deleted.
for root_dir, _, filenames in os.walk(constants.DIRECTORY_PATH):
for filename in filenames:
file_path = os.path.join(root_dir, filename)
with contextlib.suppress(subprocess.CalledProcessError):
set_file_immutable_attribute(file_path, immutable=False)
# Remove the testing directory.
try:
shutil.rmtree(constants.DIRECTORY_PATH)
except FileNotFoundError:
pass
|
py
|
1a5822d85489c041d117a747027747a11d9d3ab5
|
import numpy as np
import yt
from yt.data_objects.level_sets.api import Clump, find_clumps
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
data_source = ds.disk([0.5, 0.5, 0.5], [0.0, 0.0, 1.0], (8, "kpc"), (1, "kpc"))
# the field to be used for contouring
field = ("gas", "density")
# This is the multiplicative interval between contours.
step = 2.0
# Now we set some sane min/max values between which we want to find contours.
# This is how we tell the clump finder what to look for -- it won't look for
# contours connected below or above these threshold values.
c_min = 10 ** np.floor(np.log10(data_source[field]).min())
c_max = 10 ** np.floor(np.log10(data_source[field]).max() + 1)
# Now find get our 'base' clump -- this one just covers the whole domain.
master_clump = Clump(data_source, field)
# Add a "validator" to weed out clumps with less than 20 cells.
# As many validators can be added as you want.
master_clump.add_validator("min_cells", 20)
# Calculate center of mass for all clumps.
master_clump.add_info_item("center_of_mass")
# Begin clump finding.
find_clumps(master_clump, c_min, c_max, step)
# Save the clump tree as a reloadable dataset
fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
leaf_clumps = master_clump.leaves
# Get total cell and particle masses for each leaf clump
leaf_masses = [leaf.quantities.total_mass() for leaf in leaf_clumps]
# If you'd like to visualize these clumps, a list of clumps can be supplied to
# the "clumps" callback on a plot. First, we create a projection plot:
prj = yt.ProjectionPlot(ds, 2, field, center="c", width=(20, "kpc"))
# Next we annotate the plot with contours on the borders of the clumps
prj.annotate_clumps(leaf_clumps)
# Save the plot to disk.
prj.save("clumps")
# Reload the clump dataset.
cds = yt.load(fn)
# Clump annotation can also be done with the reloaded clump dataset.
# Remove the original clump annotation
prj.clear_annotations()
# Get the leaves and add the callback.
leaf_clumps_reloaded = cds.leaves
prj.annotate_clumps(leaf_clumps_reloaded)
prj.save("clumps_reloaded")
# Query fields for clumps in the tree.
print(cds.tree["clump", "center_of_mass"])
print(cds.tree.children[0]["grid", "density"])
print(cds.tree.children[1]["all", "particle_mass"])
# Get all of the leaf clumps.
print(cds.leaves)
print(cds.leaves[0]["clump", "cell_mass"])
|
py
|
1a58231b681762bcc7553a69717443049d5e197c
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import binascii
from collections import defaultdict
import re
import subprocess
from typing import Optional, Dict, List, TYPE_CHECKING
# Prevent circular import
if TYPE_CHECKING:
from magma.pipelined.service_manager import Tables
class DatapathLookupError(Exception):
pass
class BridgeTools:
"""
BridgeTools
Use ovs-vsctl commands to get bridge info and setup bridges for testing.
"""
TABLE_NUM_REGEX = r'table=(\d+)'
@staticmethod
def get_datapath_id(bridge_name):
"""
Gets the datapath_id by bridge_name
Hacky, call vsctl, decode output to str, strip '\n', remove '' around
the output, convert to int.
This gives the integer datapath_id that we want to run apps on, this is
needed when 2 bridges are setup, gtp_br0(main bridge) and testing_br)
"""
try:
output = subprocess.check_output(["ovs-vsctl", "get", "bridge",
bridge_name, "datapath_id"])
output_str = str(output, 'utf-8').strip()[1:-1]
output_hex = int(output_str, 16)
except subprocess.CalledProcessError as e:
raise DatapathLookupError(
'Error: ovs-vsctl bridge({}) datapath id lookup: {}'.format(
bridge_name, e
)
)
return output_hex
@staticmethod
def get_ofport(interface_name):
"""
Gets the ofport name ofport number of a interface
"""
try:
port_num = subprocess.check_output(["ovs-vsctl", "get", "interface",
interface_name, "ofport"])
except subprocess.CalledProcessError as e:
raise DatapathLookupError(
'Error: ovs-vsctl interface({}) of port lookup: {}'.format(
interface_name, e
)
)
return int(port_num)
@staticmethod
def create_internal_iface(bridge_name, iface_name, ip):
"""
Creates a simple bridge, sets up an interface.
Used when running unit tests
"""
subprocess.Popen(["ovs-vsctl", "add-port", bridge_name, iface_name,
"--", "set", "Interface", iface_name,
"type=internal"]).wait()
if ip is not None:
subprocess.Popen(["ifconfig", iface_name, ip]).wait()
@staticmethod
def create_bridge(bridge_name, iface_name):
"""
Creates a simple bridge, sets up an interface.
Used when running unit tests
"""
subprocess.Popen(["ovs-vsctl", "--if-exists", "del-br",
bridge_name]).wait()
subprocess.Popen(["ovs-vsctl", "add-br", bridge_name]).wait()
subprocess.Popen(["ovs-vsctl", "set", "bridge", bridge_name,
"protocols=OpenFlow10,OpenFlow13,OpenFlow14",
"other-config:disable-in-band=true"]).wait()
subprocess.Popen(["ovs-vsctl", "set-controller", bridge_name,
"tcp:127.0.0.1:6633", "tcp:127.0.0.1:6654"]).wait()
subprocess.Popen(["ifconfig", iface_name, "192.168.1.1/24"]).wait()
@staticmethod
def flush_conntrack():
"""
Cleanup the conntrack state
"""
subprocess.Popen(["ovs-dpctl", "flush-conntrack"]).wait()
@staticmethod
def destroy_bridge(bridge_name):
"""
Removes the bridge.
Used when unit test finishes
"""
subprocess.Popen(["ovs-vsctl", "del-br", bridge_name]).wait()
@staticmethod
def get_controllers_for_bridge(bridge_name):
curr_controllers = subprocess.check_output(
["ovs-vsctl", "get-controller", bridge_name],
).decode("utf-8").replace(' ', '').split('\n')
return list(filter(None, curr_controllers))
@staticmethod
def add_controller_to_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = "tcp:127.0.0.1:{}".format(port_num)
if ctlr_ip in curr_controllers:
return
curr_controllers.append(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def remove_controller_from_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = 'tcp:127.0.0.1:{}'.format(port_num)
curr_controllers.remove(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def set_controllers_for_bridge(bridge_name, ctlr_list):
set_cmd = ["ovs-vsctl", "set-controller", bridge_name]
set_cmd.extend(ctlr_list)
subprocess.Popen(set_cmd).wait()
@staticmethod
def get_flows_for_bridge(bridge_name, table_num=None, include_stats=True):
"""
Returns a flow dump of the given bridge from ovs-ofctl. If table_num is
specified, then only the flows for the table will be returned.
"""
if include_stats:
set_cmd = ["ovs-ofctl", "dump-flows", bridge_name]
else:
set_cmd = ["ovs-ofctl", "dump-flows", bridge_name, "--no-stats"]
if table_num:
set_cmd.append("table=%s" % table_num)
flows = \
subprocess.check_output(set_cmd).decode('utf-8').split('\n')
flows = list(filter(lambda x: (x is not None and
x != '' and
x.find("NXST_FLOW") == -1),
flows))
return flows
@staticmethod
def _get_annotated_name_by_table_num(
table_assignments: 'Dict[str, Tables]') -> Dict[int, str]:
annotated_tables = {}
# A main table may be used by multiple apps
apps_by_main_table_num = defaultdict(list)
for name in table_assignments:
apps_by_main_table_num[table_assignments[name].main_table].append(
name)
# Scratch tables are used for only one app
for ind, scratch_num in enumerate(
table_assignments[name].scratch_tables):
annotated_tables[scratch_num] = '{}(scratch_table_{})'.format(
name,
ind)
for table, apps in apps_by_main_table_num.items():
annotated_tables[table] = '{}(main_table)'.format(
'/'.join(sorted(apps)))
return annotated_tables
@classmethod
def get_annotated_flows_for_bridge(cls, bridge_name: str,
table_assignments: 'Dict[str, Tables]',
apps: Optional[List[str]] = None,
include_stats: bool = True
) -> List[str]:
"""
Returns an annotated flow dump of the given bridge from ovs-ofctl.
table_assignments is used to annotate table number with its
corresponding app. If a note exists, the note will be decoded.
If apps is not None, then only the flows for the given apps will be
returned.
"""
annotated_tables = cls._get_annotated_name_by_table_num(
table_assignments)
def annotated_table_num(num):
if int(num) in annotated_tables:
return annotated_tables[int(num)]
return num
def parse_resubmit_action(match):
"""
resubmit(port,1) => resubmit(port,app_name(main_table))
"""
ret = ''
# We can have more than one resubmit per flow
actions = [a for a in match.group().split('resubmit') if a]
for action in actions:
resubmit_tokens = re.search(r'\((.*?)\)', action)\
.group(1).split(',')
in_port, table = resubmit_tokens[0], resubmit_tokens[1]
if ret:
ret += ','
ret += 'resubmit({},{})'.format(in_port,
annotated_table_num(table))
return ret
def parse_flow(flow):
sub_rules = [
# Annotate table number with app name
(cls.TABLE_NUM_REGEX,
lambda match: 'table={}'.format(annotated_table_num(
match.group(1)))),
(r'resubmit\((.*)\)', parse_resubmit_action),
# Decode the note
(r'note:([\d\.a-fA-F]*)',
lambda match: 'note:{}'.format(
str(binascii.unhexlify(match.group(1)
.replace('00', '')
.replace('.', ''))))),
]
for rule in sub_rules:
flow = re.sub(rule[0], rule[1], flow)
return flow
def filter_apps(flows):
if apps is None:
yield from flows
return
selected_tables = []
for app in apps:
selected_tables.append(table_assignments[app].main_table)
selected_tables.extend(table_assignments[app].scratch_tables)
for flow in flows:
table_num = int(re.search(cls.TABLE_NUM_REGEX, flow).group(1))
if table_num in selected_tables or not selected_tables:
yield flow
return [parse_flow(flow) for flow in
filter_apps(cls.get_flows_for_bridge(bridge_name,
include_stats=include_stats))]
|
py
|
1a5823bf87a3f849348248d825ce090b65803767
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_saas_client.configuration import Configuration
class SystemSettingLdap(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ldap_enabled': 'bool',
'default_domain': 'str',
'domains': 'list[LdapDomainSettings]'
}
attribute_map = {
'ldap_enabled': 'ldapEnabled',
'default_domain': 'defaultDomain',
'domains': 'domains'
}
def __init__(self, ldap_enabled=None, default_domain=None, domains=None, _configuration=None): # noqa: E501
"""SystemSettingLdap - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._ldap_enabled = None
self._default_domain = None
self._domains = None
self.discriminator = None
self.ldap_enabled = ldap_enabled
if default_domain is not None:
self.default_domain = default_domain
if domains is not None:
self.domains = domains
@property
def ldap_enabled(self):
"""Gets the ldap_enabled of this SystemSettingLdap. # noqa: E501
:return: The ldap_enabled of this SystemSettingLdap. # noqa: E501
:rtype: bool
"""
return self._ldap_enabled
@ldap_enabled.setter
def ldap_enabled(self, ldap_enabled):
"""Sets the ldap_enabled of this SystemSettingLdap.
:param ldap_enabled: The ldap_enabled of this SystemSettingLdap. # noqa: E501
:type: bool
"""
if self._configuration.client_side_validation and ldap_enabled is None:
raise ValueError("Invalid value for `ldap_enabled`, must not be `None`") # noqa: E501
self._ldap_enabled = ldap_enabled
@property
def default_domain(self):
"""Gets the default_domain of this SystemSettingLdap. # noqa: E501
:return: The default_domain of this SystemSettingLdap. # noqa: E501
:rtype: str
"""
return self._default_domain
@default_domain.setter
def default_domain(self, default_domain):
"""Sets the default_domain of this SystemSettingLdap.
:param default_domain: The default_domain of this SystemSettingLdap. # noqa: E501
:type: str
"""
self._default_domain = default_domain
@property
def domains(self):
"""Gets the domains of this SystemSettingLdap. # noqa: E501
:return: The domains of this SystemSettingLdap. # noqa: E501
:rtype: list[LdapDomainSettings]
"""
return self._domains
@domains.setter
def domains(self, domains):
"""Sets the domains of this SystemSettingLdap.
:param domains: The domains of this SystemSettingLdap. # noqa: E501
:type: list[LdapDomainSettings]
"""
self._domains = domains
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemSettingLdap, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SystemSettingLdap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SystemSettingLdap):
return True
return self.to_dict() != other.to_dict()
|
py
|
1a5824362abfc1e4ddf769dd54e4e83a0c8b6f8d
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Danny Goodall
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import input
from codefurther.directions import GetDirections
# Ask for starting point, destination and the mode of travel
starting_point = input("What is the starting point of your journey? (Southampton) : ")
end_point = input("What is the destination of your journey? (Winchester) : ")
travel_mode = input("What is mode of travel ? (walking) : ")
# Set the defaults for the starting and end points
starting_point = starting_point if starting_point else "southampton, uk"
end_point = end_point if end_point else "winchester, uk"
# Set the travel mode to walking unless it is valid
travel_mode = travel_mode if travel_mode and travel_mode.lower() in GetDirections.valid_modes else "walking"
# Let's create a directions object that we can then interact with
directions = GetDirections(starting_point, end_point, travel_mode)
# Was this route found?
if directions.found:
# Yes, so let's print out a heading...
print(directions.heading)
# Followed by each of the steps...
for step in directions.steps:
print(step)
# Followed by a footer
print(directions.footer)
else:
# If the route wasn't found, then explain to the user.
print("We couldn't find a ({}) route from {}, to {}.".format(travel_mode, starting_point, end_point))
from codefurther.directions import GetDirections
directions = GetDirections("123l123", "345345l34")
print(directions.heading)
for step in directions.steps:
print(step)
print(directions.footer)
|
py
|
1a5824f0f67bbd4e92b595a704ce2c5fff59a29e
|
# Copyright 2021 Beijing DP Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pipeline for model features."""
|
py
|
1a582504a67fd71b2c82852a70771866f361894c
|
from datetime import date
from decimal import Decimal
from django.utils.six import text_type
from silver.tests.api.specs.utils import ResourceDefinition
unaltered = lambda input_value: input_value
# required is True by default, (a default must be specified otherwise)
# read_only is False by default,
# write_only is False by default,
document_entry_definition = ResourceDefinition("document_entry", {
'id': {
'read_only': True,
'output': lambda entry: int(entry.id),
},
'description': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.description
},
'unit': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.unit,
},
'unit_price': {
'expected_input_types': (int, float, text_type),
'output': lambda entry: "%.4f" % Decimal(entry.unit_price)
},
'quantity': {
'expected_input_types': (int, float, text_type),
'output': lambda entry: "%.4f" % Decimal(entry.quantity)
},
'total_before_tax': {
'read_only': True,
'output': lambda entry: "%.2f" % (entry.unit_price * entry.quantity)
},
'total': {
'read_only': True,
'output': lambda entry: "%.2f" % (
entry.total_before_tax * Decimal(1 + entry.document.sales_tax_percent / 100)
)
},
'start_date': {
'required': False,
'expected_input_types': date,
'output': lambda entry: entry.start_date,
},
'end_date': {
'required': False,
'expected_input_types': date,
'output': lambda entry: entry.end_date,
},
'prorated': {
'required': False,
'expected_input_types': bool,
'output': lambda entry: entry.prorated,
},
'product_code': {
'required': False,
'expected_input_types': text_type,
'output': lambda entry: entry.product_code,
}
})
def spec_document_entry(entry):
return document_entry_definition.generate(entry)
|
py
|
1a58268017fe7d24c2714e4276d8a4ae54193831
|
import os
import unittest
import shutil
import yaml
from sherlock import transient_classifier, cl_utils
from sherlock.utKit import utKit
from fundamentals import tools
su = tools(
arguments={"settingsFile": None},
docString=__doc__,
logLevel="DEBUG",
options_first=False,
projectName="sherlock"
)
arguments, settings, log, dbConn = su.setup()
# # load settings
# stream = file(
# "/Users/Dave/.config/sherlock/sherlock.yaml", 'r')
# settings = yaml.load(stream)
# stream.close()
# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE
moduleDirectory = os.path.dirname(__file__)
utKit = utKit(moduleDirectory)
log, dbConn, pathToInputDir, pathToOutputDir = utKit.setupModule()
utKit.tearDownModule()
# load settings
stream = file(
pathToInputDir + "/example_settings2.yaml", 'r')
stream = file(
"/Users/Dave/Dropbox/config/dave-macbook/sherlock/sherlock_mac_marshall.yaml")
stream = file(
"/Users/Dave/Desktop/sherlock_ps1_mops.yaml")
settings = yaml.load(stream)
stream.close()
import shutil
try:
shutil.rmtree(pathToOutputDir)
except:
pass
# COPY INPUT TO OUTPUT DIR
shutil.copytree(pathToInputDir, pathToOutputDir)
# Recursively create missing directories
if not os.path.exists(pathToOutputDir):
os.makedirs(pathToOutputDir)
# from fundamentals.mysql import directory_script_runner
# directory_script_runner(
# log=log,
# pathToScriptDirectory=pathToInputDir.replace(
# "/input", "/resources") + "/transient_database",
# databaseName=settings["database settings"]["db"],
# loginPath=settings["database settings"]["loginPath"],
# successRule="delete",
# failureRule="failed"
# )
# xt-setup-unit-testing-files-and-folders
class test_transient_classifier(unittest.TestCase):
def test_transient_update_classified_annotations_function(self):
from sherlock import transient_classifier
this = transient_classifier(
log=log,
settings=settings,
update=True,
fast=True
)
# this.update_peak_magnitudes()
this.update_classification_annotations_and_summaries()
def test_transient_classifier_function(self):
from sherlock import transient_classifier
this = transient_classifier(
log=log,
settings=settings,
update=True,
updateNed=False,
oneRun=True
)
this.classify()
def test_transient_classifier_single_source_function(self):
from sherlock import transient_classifier
this = transient_classifier(
log=log,
settings=settings,
ra="08:57:57.19",
dec="+43:25:44.1",
name="PS17gx",
verbose=0
)
classifications, crossmatches = this.classify()
def test_get_transient_metadata_from_database_list(self):
from sherlock import transient_classifier
classifier = transient_classifier(
log=log,
settings=settings
)
transientsMetadataList = classifier._get_transient_metadata_from_database_list()
classifier._update_ned_stream(
transientsMetadataList=transientsMetadataList
)
def test_crossmatching(self):
# SETUP ALL DATABASE CONNECTIONS
from sherlock import database
db = database(
log=log,
settings=settings
)
dbConns, dbVersions = db.connect()
transientsDbConn = dbConns["transients"]
cataloguesDbConn = dbConns["catalogues"]
pmDbConn = dbConns["marshall"]
from sherlock.commonutils import get_crossmatch_catalogues_column_map
colMaps = get_crossmatch_catalogues_column_map(
log=log,
dbConn=cataloguesDbConn
)
from sherlock import transient_classifier
classifier = transient_classifier(
log=log,
settings=settings
)
transientsMetadataList = classifier._get_transient_metadata_from_database_list()
crossmatches = classifier._crossmatch_transients_against_catalogues(
colMaps=colMaps,
transientsMetadataList=transientsMetadataList
)
classifications, crossmatches = classifier._rank_classifications(
colMaps=colMaps,
crossmatches=crossmatches
)
classifier._update_transient_database(
classifications=classifications,
transientsMetadataList=transientsMetadataList,
colMaps=colMaps,
crossmatches=crossmatches)
def test_classification_annotations(self):
from sherlock import database
db = database(
log=log,
settings=settings
)
dbConns, dbVersions = db.connect()
transientsDbConn = dbConns["transients"]
cataloguesDbConn = dbConns["catalogues"]
pmDbConn = dbConns["marshall"]
from sherlock.commonutils import get_crossmatch_catalogues_column_map
colMaps = get_crossmatch_catalogues_column_map(
log=log,
dbConn=cataloguesDbConn
)
from sherlock import transient_classifier
classifier = transient_classifier(
log=log,
settings=settings
)
classifier.classification_annotations()
def test_transient_classifier_function_exception(self):
from sherlock import transient_classifier
try:
this = transient_classifier(
log=log,
settings=settings,
fakeKey="break the code"
)
this.get()
assert False
except Exception, e:
assert True
print str(e)
# x-print-testpage-for-pessto-marshall-web-object
# x-class-to-test-named-worker-function
|
py
|
1a5828d5a46cf81cfac7b2ae2b0a072dcde83d11
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, (@yungezz)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_roledefinition
version_added: "2.8"
short_description: Manage Azure Role Definition.
description:
- Create, update and delete instance of Azure Role Definition.
options:
name:
description:
- Unique name of role definition.
required: True
permissions:
description:
- Set of role definition peremissions.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
actions:
description:
- List of allowed actions.
type: list
not_actions:
description:
- List of denied actions.
type: list
data_actions:
description:
- List of allowed data actions.
type: list
not_data_actions:
description:
- List of denied data actions.
type: list
assignable_scopes:
description: List of assignable scope of this definition.
scope:
description: The scope of the role definition.
description:
description:
- The role definition description.
state:
description:
- Assert the state of the role definition.
- Use 'present' to create or update a role definition and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu(@yungezz)"
'''
EXAMPLES = '''
- name: Create a role definition
azure_rm_roledefinition:
name: myTestRole
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup
permissions:
- actions:
- "Microsoft.Compute/virtualMachines/read"
data_actions:
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
assignable_scopes:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
'''
RETURN = '''
id:
description: Id of current role definition.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/roleDefinitionId"
'''
import uuid
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.mgmt.authorization.model import (RoleDefinition, Permission)
except ImportError:
# This is handled in azure_rm_common
pass
permission_spec = dict(
actions=dict(
type='list',
options=dict(type='str')
),
not_actions=dict(
type='list',
options=dict(type='str')
),
data_actions=dict(
type='list',
options=dict(type='str')
),
not_data_actions=dict(
type='list',
options=dict(type='str')
),
)
def roledefinition_to_dict(role):
result = dict(
id=role.id,
name=role.name,
type=role.role_type,
assignable_scopes=role.assignable_scopes,
description=role.description,
role_name=role.role_name
)
if role.permissions:
result['permissions'] = [dict(
actions=p.actions,
not_actions=p.not_actions,
data_actions=p.data_actions,
not_data_actions=p.not_data_actions
) for p in role.permissions]
return result
class Actions:
NoAction, CreateOrUpdate, Delete = range(3)
class AzureRMRoleDefinition(AzureRMModuleBase):
"""Configuration class for an Azure RM Role definition resource"""
def __init__(self):
self.module_arg_spec = dict(
name=dict(
type='str',
required=True
),
scope=dict(
type='str'
),
permissions=dict(
type='list',
elements='dict',
options=permission_spec
),
assignable_scopes=dict(
type='list',
elements='str'
),
description=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.name = None
self.scope = None
self.permissions = None
self.description = None
self.assignable_scopes = None
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.role = None
self._client = None
super(AzureRMRoleDefinition, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
# get management client
self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-01-01-preview")
self.scope = self.build_scope()
# get existing role definition
old_response = self.get_roledefinition()
if old_response:
self.results['id'] = old_response['id']
self.role = old_response
if self.state == 'present':
# check if the role definition exists
if not old_response:
self.log("Role definition doesn't exist in this scope")
self.to_do = Actions.CreateOrUpdate
else:
# existing role definition, do update
self.log("Role definition already exists")
self.log('Result: {0}'.format(old_response))
# compare if role definition changed
if self.check_update(old_response):
self.to_do = Actions.CreateOrUpdate
elif self.state == 'absent':
if old_response:
self.log("Delete role defintion")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_roledefinition(old_response['name'])
self.log('role definition deleted')
else:
self.log("role definition {0} not exists.".format(self.name))
if self.to_do == Actions.CreateOrUpdate:
self.log('Need to Create/Update role definition')
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_roledefinition()
self.results['id'] = response['id']
return self.results
# build scope
def build_scope(self):
subscription_scope = '/subscriptions/' + self.subscription_id
if self.scope is None:
return subscription_scope
return self.scope
# check update
def check_update(self, old_definition):
if self.description and self.description != old_definition['properties']['description']:
return True
if self.permissions:
if len(self.permissions) != len(old_definition['permissions']):
return True
existing_permissions = self.permissions_to_set(old_definition['permissions'])
new_permissions = self.permissions_to_set(self.permissions)
if existing_permissions != new_permissions:
return True
if self.assignable_scopes and self.assignable_scopes != old_definition['assignable_scopes']:
return True
return False
def permissions_to_set(self, permissions):
new_permissions = [str(dict(
actions=(set([to_native(a) for a in item.get('actions')]) if item.get('actions') else None),
not_actions=(set([to_native(a) for a in item.get('not_actions')]) if item.get('not_actions') else None),
data_actions=(set([to_native(a) for a in item.get('data_actions')]) if item.get('data_actions') else None),
not_data_actions=(set([to_native(a) for a in item.get('not_data_actions')]) if item.get('not_data_actions') else None),
)) for item in permissions]
return set(new_permissions)
def create_update_roledefinition(self):
'''
Creates or updates role definition.
:return: deserialized role definition
'''
self.log("Creating / Updating role definition {0}".format(self.name))
try:
permissions = None
if self.permissions:
permissions = [AuthorizationManagementClient.models("2018-01-01-preview").Permission(
actions=p.get('actions', None),
not_actions=p.get('not_actions', None),
data_actions=p.get('data_actions', None),
not_data_actions=p.get('not_data_actions', None)
) for p in self.permissions]
role_definition = AuthorizationManagementClient.models("2018-01-01-preview").RoleDefinition(
role_name=self.name,
description=self.description,
permissions=permissions,
assignable_scopes=self.assignable_scopes,
role_type='CustomRole')
if self.role:
role_definition.name = self.role['name']
response = self._client.role_definitions.create_or_update(role_definition_id=self.role['name'] if self.role else str(uuid.uuid4()),
scope=self.scope,
role_definition=role_definition)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create role definition.')
self.fail("Error creating role definition: {0}".format(str(exc)))
return roledefinition_to_dict(response)
def delete_roledefinition(self, role_definition_id):
'''
Deletes specified role definition.
:return: True
'''
self.log("Deleting the role definition {0}".format(self.name))
scope = self.build_scope()
try:
response = self._client.role_definitions.delete(scope=scope,
role_definition_id=role_definition_id)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as e:
self.log('Error attempting to delete the role definition.')
self.fail("Error deleting the role definition: {0}".format(str(e)))
return True
def get_roledefinition(self):
'''
Gets the properties of the specified role definition.
:return: deserialized role definition state dictionary
'''
self.log("Checking if the role definition {0} is present".format(self.name))
response = None
try:
response = list(self._client.role_definitions.list(scope=self.scope))
if len(response) > 0:
self.log("Response : {0}".format(response))
roles = []
for r in response:
if r.role_name == self.name:
roles.append(r)
if len(roles) == 1:
self.log("role definition : {0} found".format(self.name))
return roledefinition_to_dict(roles[0])
if len(roles) > 1:
self.fail("Found multiple role definitions: {0}".format(roles))
except CloudError as ex:
self.log("Didn't find role definition {0}".format(self.name))
return False
def main():
"""Main execution"""
AzureRMRoleDefinition()
if __name__ == '__main__':
main()
|
py
|
1a5828e8f4f841c47eebcfd3990bca7cbe871be9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets, ModelChoiceField
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from filer.models.imagemodels import Image
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.mixins import ImagePropertyMixin
from cmsplugin_cascade.widgets import MultipleCascadingSizeWidget
from cmsplugin_cascade.link.config import LinkPluginBase, LinkElementMixin, LinkForm
from .image import ImageFormMixin, ImageAnnotationMixin
from .utils import BS3_BREAKPOINT_KEYS, reduce_breakpoints, get_picture_elements
class BootstrapPicturePlugin(ImageAnnotationMixin, LinkPluginBase):
name = _("Picture")
model_mixins = (ImagePropertyMixin, LinkElementMixin,)
module = 'Bootstrap'
parent_classes = ['BootstrapColumnPlugin', 'SimpleWrapperPlugin']
require_parent = True
allow_children = False
raw_id_fields = ('image_file',)
admin_preview = False
ring_plugin = 'PicturePlugin'
render_template = 'cascade/bootstrap3/linked-picture.html'
default_css_class = 'img-responsive'
default_css_attributes = ('image_shapes',)
html_tag_attributes = {'image_title': 'title', 'alt_tag': 'tag'}
fields = ('image_file',) + LinkPluginBase.fields
RESIZE_OPTIONS = (('upscale', _("Upscale image")), ('crop', _("Crop image")),
('subject_location', _("With subject location")),
('high_resolution', _("Optimized for Retina")),)
responsive_heights = GlossaryField(
MultipleCascadingSizeWidget(BS3_BREAKPOINT_KEYS, allowed_units=['px', '%'], required=False),
label=_("Adapt Picture Heights"),
initial={'xs': '100%', 'sm': '100%', 'md': '100%', 'lg': '100%'},
help_text=_("Heights of picture in percent or pixels for distinct Bootstrap's breakpoints."),
)
responsive_zoom = GlossaryField(
MultipleCascadingSizeWidget(BS3_BREAKPOINT_KEYS, allowed_units=['%'], required=False),
label=_("Adapt Picture Zoom"),
initial={'xs': '0%', 'sm': '0%', 'md': '0%', 'lg': '0%'},
help_text=_("Magnification of picture in percent for distinct Bootstrap's breakpoints."),
)
resize_options = GlossaryField(
widgets.CheckboxSelectMultiple(choices=RESIZE_OPTIONS),
label=_("Resize Options"),
help_text=_("Options to use when resizing the image."),
initial=['subject_location', 'high_resolution']
)
class Media:
js = ['cascade/js/admin/pictureplugin.js']
def get_form(self, request, obj=None, **kwargs):
reduce_breakpoints(self, 'responsive_heights')
image_file = ModelChoiceField(queryset=Image.objects.all(), required=False, label=_("Image"))
Form = type(str('ImageForm'), (ImageFormMixin, getattr(LinkForm, 'get_form_class')(),),
{'LINK_TYPE_CHOICES': ImageFormMixin.LINK_TYPE_CHOICES, 'image_file': image_file})
kwargs.update(form=Form)
return super(BootstrapPicturePlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
# image shall be rendered in a responsive context using the picture element
elements = get_picture_elements(context, instance)
fluid = instance.get_complete_glossary().get('fluid') == 'on'
context.update({
'is_responsive': True,
'instance': instance,
'is_fluid': fluid,
'placeholder': placeholder,
'elements': elements,
})
return context
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapPicturePlugin, cls).get_css_classes(obj)
css_class = obj.glossary.get('css_class')
if css_class:
css_classes.append(css_class)
return css_classes
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapPicturePlugin, cls).get_identifier(obj)
try:
content = force_text(obj.image)
except AttributeError:
content = _("No Picture")
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapPicturePlugin)
|
py
|
1a5829338cfc334ef56d733accdd03e4ca2d6141
|
"""
WSGI config for jarvis_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jarvis_server.settings')
application = get_wsgi_application()
|
py
|
1a58297f7f41dc30bfa4b462b74c2cb744b79cab
|
import os
import sys
import webbrowser
from contextlib import contextmanager
import six
class CodeHandler(object):
"""
Generic handler for the code returned by the Native App Auth Flow in
Globus Auth. It's intended to be subclassed to define the behavior for
how the code gets from Globus Auth to the Native App.
"""
def __init__(self, paste_url_in_browser_msg=None):
self.paste_url_in_browser_msg = (
paste_url_in_browser_msg or
'Please paste the following URL in a browser: \n{}'
)
@contextmanager
def start(self):
"""
An extra method to do any extra startup before calling authenticate()
For local_sever, this is a time to start a thread for a local TCP
server for handling the auth code. For simple handlers like the
InputCodeHandler, this can be safely ignored.
"""
yield
def get_redirect_uri(self):
"""
For use with code handlers that don't know their redirect_uri until
start() is called. For local_server, this is needed to find an open
port number to return something like http://localhost:<PORT>/
Return None to use the default Globus helper page
"""
return None
def set_app_name(self, app_name):
"""
Optional method for setting the app name, if this is useful to the
code handler. For local server, this is displayed on the local server's
page.
:param app_name: String to use for the app name.
"""
pass
def authenticate(self, url, no_browser=False):
"""
Use the given url to direct the user to Globus Auth so they can login.
:param url: URL to Globus Auth.
:param no_browser: Don't automatically open the user's browser.
:return:
"""
if no_browser is False and not self.is_remote_session():
webbrowser.open(url, new=1)
else:
self.write_message(self.paste_url_in_browser_msg)
try:
return self.get_code()
except KeyboardInterrupt:
self.write_message('Interrupt Received. '
'Canceling authentication...')
sys.exit(-1)
def write_message(self, message):
"""
This will likely be the only place where output needs to be directly
written to the user. Some CLIs like Click may prefer click.echo here
rather than print.
:param message: Direct Standard Output message for user consumption
"""
print(message)
def get_code(self):
"""
Override in child. Get the code returned by Globus Auth to complete
the Native App Auth Flow.
:return: Code returned by Globus Auth
"""
raise NotImplemented()
def is_remote_session(self):
"""
Check if this is being run from an ssh shell.
:return: True if ssh shell, false otherwise
"""
return os.environ.get('SSH_TTY', os.environ.get('SSH_CONNECTION'))
class InputCodeHandler(CodeHandler):
def get_code(self):
self.write_message('Please Paste your Auth Code Below: ')
return six.moves.input()
|
py
|
1a582980a364847ba51ee8a719edf5efff3c8b5f
|
from .models import *
from .serializers import *
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from research.filter import IsOwnerFilterBackend
class THLIC_CertificateView(viewsets.ModelViewSet):
serializer_class = THLIC_CertificateSerializer
permission_classes = [IsAuthenticated]
queryset = THLIC_Certificate.objects.all()
filter_backends = [IsOwnerFilterBackend]
class Teaching_portofolioView(viewsets.ModelViewSet):
serializer_class = Teaching_portofolioSerializer
permission_classes = [IsAuthenticated]
queryset = Teaching_portofolio.objects.all()
filter_backends = [IsOwnerFilterBackend]
|
py
|
1a582a2432461aa927ec2826840ea03ceb93787f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sms_service.Errors import ErrorTypeError, ErrorTokenExpired, ErrorWebServiceError
from sms_service.Interfaces import InterfaceSmsSenderAdapter
from .EnumCallturkEndpoints import EnumCallturkEndpoint
from .xml.authentication.AuthenticationXmlController import AuthenticationXmlController
from .xml.sms_send.SmsSendController import SmsSendXmlController
class CallturkSmsSender(InterfaceSmsSenderAdapter):
error_auth_instance = "`self.call_turk_auth` should be instance of `AuthenticationXmlController` not {}"
error_number_list_instance = '`number_list` type is should be `list` not {}'
REGISTERED_TOKENS = {}
def __init__(self, username=None, password=None, organization_name=None):
"""
(string, string, string) -> instance
:param username: Callturk Username
:param password: Callturk Userpassword
:param organization_name: Calltruk Company Name
"""
self.call_turk_auth = AuthenticationXmlController(username, password, organization_name)
self.registered_token_key = "{}-{}".format(username, organization_name)
self.sms_sender_ctrl = None
self.response_tuple = {}, -1
def send_sms(self):
try:
self.auth_token = CallturkSmsSender.REGISTERED_TOKENS.get(self.registered_token_key, None)
self.sms_sender_ctrl = SmsSendXmlController(self.auth_token, self.number_list, self.content)
sms_send_request_id = self.sms_sender_ctrl.send_sms()
self.response_tuple = self.handle_sms_response(sms_send_request_id), 201
except ErrorTokenExpired:
del self.auth_token
return self.send_sms()
except ErrorWebServiceError as e:
self.response_tuple = self.handle_sms_error_response(e), 200
return self.response_tuple
def handle_sms_response(self, response):
return dict(id=response)
def handle_sms_error_response(self, exception):
return dict(
message=exception.__class__.__name__,
description=str(exception),
errors=list(exception.args),
status_code=200
)
def get_log_data(self):
""" VOID
Log sms actions
"""
return dict(
method='POST',
url='{}'.format(EnumCallturkEndpoint.SMS_SEND.value),
body=dict(number_list=self.number_list, content=self.content),
is_failed=False if self.response_tuple[1] == 201 else True,
status_code=self.response_tuple[1],
response=[self.response_tuple[0]]
)
@property
def auth_token(self):
return self.__auth_token
@auth_token.setter
def auth_token(self, value):
if isinstance(value, str):
self.__auth_token = value
CallturkSmsSender.REGISTERED_TOKENS.update({self.registered_token_key: value})
else:
self.auth_token = self.call_turk_auth.get_auth_token()
@auth_token.deleter
def auth_token(self):
del self.__auth_token
del CallturkSmsSender.REGISTERED_TOKENS[self.registered_token_key]
@property
def call_turk_auth(self):
return self.__auth
@call_turk_auth.setter
def call_turk_auth(self, value):
if not isinstance(value, AuthenticationXmlController):
raise TypeError(CallturkSmsSender.error_auth_instance.format(type(value)))
self.__auth = value
@property
def number_list(self):
return self.__number_list
@number_list.setter
def number_list(self, number_list):
if not isinstance(number_list, list):
raise ErrorTypeError(CallturkSmsSender.error_number_list_instance.format(type(number_list)))
self.__number_list = number_list
@property
def content(self):
return self.__content
@content.setter
def content(self, sms_content):
self.__content = sms_content
|
py
|
1a582a62eef524a9731500e7dadb7d8042e68c68
|
import re
import copy
import time
import json
import requests
from unshortenit.module import UnshortenModule
from unshortenit.exceptions import UnshortenFailed
class ShorteSt(UnshortenModule):
name = 'shortest'
domains = ['sh.st', 'festyy.com', 'ceesty.com']
def __init__(self, headers: dict = None, timeout: int = 30):
super().__init__(headers, timeout)
def unshorten(self, uri: str) -> str:
res = self.get(uri)
session_id = re.findall(r'sessionId\:(.*?)\"\,', res.text)
if len(session_id) == 0:
raise UnshortenFailed('No sessionId variable found.')
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = copy.copy(self.headers or {})
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
time.sleep(5)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = requests.get(
'http://sh.st/shortest-url/end-adsession',
params=payload,
headers=http_header,
timeout=self.timeout
)
response = r.content[6:-2].decode('utf-8')
if r.status_code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
raise UnshortenFailed('Error extracting url.')
else:
raise UnshortenFailed('Error extracting url.')
return uri
|
py
|
1a582ac3f923481fdadc32fca55fb54f3e8dab63
|
import itertools
import os
from collections import defaultdict
import dbt.utils
import dbt.include
import dbt.tracking
from dbt.utils import get_materialization, NodeType, is_type
from dbt.linker import Linker
import dbt.context.runtime
import dbt.contracts.project
import dbt.exceptions
import dbt.flags
import dbt.loader
import dbt.config
from dbt.contracts.graph.compiled import InjectedCTE, COMPILED_TYPES
from dbt.contracts.graph.parsed import ParsedNode
from dbt.logger import GLOBAL_LOGGER as logger
graph_file_name = 'graph.gpickle'
def _compiled_type_for(model: ParsedNode):
if model.resource_type not in COMPILED_TYPES:
raise dbt.exceptions.InternalException(
'Asked to compile {} node, but it has no compiled form'
.format(model.resource_type)
)
return COMPILED_TYPES[model.resource_type]
def print_compile_stats(stats):
names = {
NodeType.Model: 'model',
NodeType.Test: 'test',
NodeType.Snapshot: 'snapshot',
NodeType.Analysis: 'analyse',
NodeType.Macro: 'macro',
NodeType.Operation: 'operation',
NodeType.Seed: 'seed file',
NodeType.Source: 'source',
}
results = {k: 0 for k in names.keys()}
results.update(stats)
stat_line = ", ".join(
[dbt.utils.pluralize(ct, names.get(t)) for t, ct in results.items()])
logger.info("Found {}".format(stat_line))
def _add_prepended_cte(prepended_ctes, new_cte):
for cte in prepended_ctes:
if cte.id == new_cte.id:
cte.sql = new_cte.sql
return
prepended_ctes.append(new_cte)
def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
for new_cte in new_prepended_ctes:
_add_prepended_cte(prepended_ctes, new_cte)
def prepend_ctes(model, manifest):
model, _, manifest = recursively_prepend_ctes(model, manifest)
return (model, manifest)
def recursively_prepend_ctes(model, manifest):
if model.extra_ctes_injected:
return (model, model.extra_ctes, manifest)
if dbt.flags.STRICT_MODE:
assert isinstance(model, tuple(COMPILED_TYPES.values())), \
'Bad model type: {}'.format(type(model))
prepended_ctes = []
for cte in model.extra_ctes:
cte_id = cte.id
cte_to_add = manifest.nodes.get(cte_id)
cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
cte_to_add, manifest)
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.name)
sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte_id, sql=sql))
model.prepend_ctes(prepended_ctes)
manifest.update_node(model)
return (model, prepended_ctes, manifest)
class Compiler:
def __init__(self, config):
self.config = config
def initialize(self):
dbt.clients.system.make_directory(self.config.target_path)
dbt.clients.system.make_directory(self.config.modules_path)
def compile_node(self, node, manifest, extra_context=None):
if extra_context is None:
extra_context = {}
logger.debug("Compiling {}".format(node.unique_id))
data = node.to_dict()
data.update({
'compiled': False,
'compiled_sql': None,
'extra_ctes_injected': False,
'extra_ctes': [],
'injected_sql': None,
})
compiled_node = _compiled_type_for(node).from_dict(data)
context = dbt.context.runtime.generate(
compiled_node, self.config, manifest)
context.update(extra_context)
compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
node.raw_sql,
context,
node)
compiled_node.compiled = True
injected_node, _ = prepend_ctes(compiled_node, manifest)
should_wrap = {NodeType.Test, NodeType.Operation}
if injected_node.resource_type in should_wrap:
# data tests get wrapped in count(*)
# TODO : move this somewhere more reasonable
if 'data' in injected_node.tags and \
is_type(injected_node, NodeType.Test):
injected_node.wrapped_sql = (
"select count(*) as errors "
"from (\n{test_sql}\n) sbq").format(
test_sql=injected_node.injected_sql)
else:
# don't wrap schema tests or analyses.
injected_node.wrapped_sql = injected_node.injected_sql
elif is_type(injected_node, NodeType.Snapshot):
# unfortunately we do everything automagically for
# snapshots. in the future it'd be nice to generate
# the SQL at the parser level.
pass
elif(is_type(injected_node, NodeType.Model) and
get_materialization(injected_node) == 'ephemeral'):
pass
else:
injected_node.wrapped_sql = None
return injected_node
def write_graph_file(self, linker, manifest):
filename = graph_file_name
graph_path = os.path.join(self.config.target_path, filename)
if dbt.flags.WRITE_JSON:
linker.write_graph(graph_path, manifest)
def link_node(self, linker, node, manifest):
linker.add_node(node.unique_id)
for dependency in node.depends_on_nodes:
if manifest.nodes.get(dependency):
linker.dependency(
node.unique_id,
(manifest.nodes.get(dependency).unique_id))
else:
dbt.exceptions.dependency_not_found(node, dependency)
def link_graph(self, linker, manifest):
for node in manifest.nodes.values():
self.link_node(linker, node, manifest)
cycle = linker.find_cycles()
if cycle:
raise RuntimeError("Found a cycle: {}".format(cycle))
def compile(self, manifest, write=True):
linker = Linker()
self.link_graph(linker, manifest)
stats = defaultdict(int)
for node_name, node in itertools.chain(
manifest.nodes.items(),
manifest.macros.items()):
stats[node.resource_type] += 1
if write:
self.write_graph_file(linker, manifest)
print_compile_stats(stats)
return linker
def compile_manifest(config, manifest, write=True):
compiler = Compiler(config)
compiler.initialize()
return compiler.compile(manifest, write=write)
def _is_writable(node):
if not node.injected_sql:
return False
if dbt.utils.is_type(node, NodeType.Snapshot):
return False
return True
def compile_node(adapter, config, node, manifest, extra_context, write=True):
compiler = Compiler(config)
node = compiler.compile_node(node, manifest, extra_context)
node = _inject_runtime_config(adapter, node, extra_context)
if write and _is_writable(node):
logger.debug('Writing injected SQL for node "{}"'.format(
node.unique_id))
written_path = dbt.writer.write_node(
node,
config.target_path,
'compiled',
node.injected_sql)
node.build_path = written_path
return node
def _inject_runtime_config(adapter, node, extra_context):
wrapped_sql = node.wrapped_sql
context = _node_context(adapter, node)
context.update(extra_context)
sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)
node.wrapped_sql = sql
return node
def _node_context(adapter, node):
return {
"run_started_at": dbt.tracking.active_user.run_started_at,
"invocation_id": dbt.tracking.active_user.invocation_id,
}
|
py
|
1a582d4a5ae6a7eded948f3239938dbbcb19f6e3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: View components for Python SDK sample.
"""
import wx
import wx.lib.agw.labelbook as LB
from wx.lib.agw.fmresources import INB_FIT_LABELTEXT
from wx.lib.agw.fmresources import INB_LEFT
from wx.lib.agw.fmresources import INB_NO_RESIZE
from view.panel_detection import DetectionPanel
from view.panel_subscription import SubscriptionPanel
from view.panel_find_similar import FindSimilarPanel
from view.panel_group import GroupPanel
from view.panel_identification import IdentificationPanel
from view.panel_verification import VerificationPanel
TITLE = u"Microsoft Cognitive Services Face Samples"
class MyLabelBook(LB.LabelBook):
"""LabelBook part in Main Frame."""
def __init__(self, parent):
agw_style = INB_LEFT | INB_FIT_LABELTEXT | INB_NO_RESIZE
super(MyLabelBook, self).__init__(parent, agwStyle=agw_style)
subscription_panel = SubscriptionPanel(self)
subscription_text = u"Subscription Key Management"
self.AddPage(subscription_panel, subscription_text, True)
self.AddPage(wx.Panel(self), u"Select a scenario:")
self.EnableTab(1, False)
self.AddPage(DetectionPanel(self), u" - Face Detection")
self.AddPage(FindSimilarPanel(self), u" - Face Find Similar")
self.AddPage(GroupPanel(self), u" - Face Grouping")
self.AddPage(IdentificationPanel(self), u" - Face Identification")
self.AddPage(VerificationPanel(self), u" - Face Verification")
class MyTitle(wx.Panel):
"""Title part in Main Frame."""
def __init__(self, parent):
super(MyTitle, self).__init__(parent)
self.SetBackgroundColour('#00b294')
self.SetMinSize((-1, 80))
sizer = wx.BoxSizer()
sizer.AddStretchSpacer()
family = wx.FONTFAMILY_DEFAULT
style = wx.FONTSTYLE_NORMAL
weight = wx.FONTWEIGHT_NORMAL
font = wx.Font(20, family, style, weight)
self.text = wx.StaticText(self, label=TITLE, style=wx.ALIGN_CENTER)
self.text.SetFont(font)
sizer.Add(self.text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.AddStretchSpacer()
self.SetSizer(sizer)
class MyFrame(wx.Frame):
"""Main Frame."""
def __init__(self, parent):
super(MyFrame, self).__init__(parent, title=TITLE, size=(1280, 768))
icon_path = 'Assets/Microsoft-logo_rgb_c-gray.png'
self.SetIcon(wx.Icon(icon_path))
sizer = wx.BoxSizer(wx.VERTICAL)
self.title = MyTitle(self)
sizer.Add(self.title, flag=wx.EXPAND)
self.book = MyLabelBook(self)
sizer.Add(self.book, 1, flag=wx.EXPAND)
status_text = (
'Microsoft will receive the images you upload and may use them to '
'improve Face API and related services. By submitting an image, '
'you confirm you have consent from everyone in it.'
)
self.status = wx.StatusBar(self)
self.status.SetStatusText(status_text)
sizer.Add(self.status, flag=wx.EXPAND)
self.SetSizer(sizer)
self.Layout()
class MyApp(wx.App):
"""The whole app."""
def OnInit(self):
"""Show main frame."""
frame = MyFrame(None)
frame.Show()
return True
|
py
|
1a582d831c273542464dacde66dc1d0c5f173c3e
|
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from faker import Factory
from app_dir.factories import OrderFactory
faker = Factory.create()
class CreateOrderRevolut(TestCase):
def setUp(self):
self.order = OrderFactory()
self.client = APIClient()
self.namespace = 'revolut_integration_api'
self.body = {
'amount': faker.amount(),
'capture_mode': faker.capture_mode(),
'merchant_order_ext_ref': faker.merchant_order_ext_ref(),
'email': faker.email(),
'currency': faker.currency(),
}
self.create_url = reverse(self.namespace + ':create-order')
def test_create_order(self):
response = self.client.post(self.create_url, self.body, format='json')
self.assertEqual(201, response.status_code)
|
py
|
1a582db43012c4299882f92c0fb1b981a9b70752
|
import numpy as np
import time, sys, math
from collections import deque
import sounddevice as sd
from src.utils import *
class Stream_Reader:
"""
The Stream_Reader continuously reads data from a selected sound source using PyAudio
Arguments:
device: int or None: Select which audio stream to read .
rate: float or None: Sample rate to use. Defaults to something supported.
updatesPerSecond: int: How often to record new data.
"""
def __init__(self,
device = None,
rate = None,
updates_per_second = 1000,
FFT_window_size = None,
verbose = False):
print("Available audio devices:")
device_dict = sd.query_devices()
print(device_dict)
try:
sd.check_input_settings(device=device, channels=1, dtype=np.float32, extra_settings=None, samplerate=rate)
except:
print("Input sound settings for device %s and samplerate %s Hz not supported, using defaults..." %(str(device), str(rate)))
rate = None
device = None
self.rate = rate
if rate is not None:
sd.default.samplerate = rate
self.device = device
if device is not None:
sd.default.device = device
self.verbose = verbose
self.data_buffer = None
# This part is a bit hacky, need better solution for this:
# Determine what the optimal buffer shape is by streaming some test audio
self.optimal_data_lengths = []
with sd.InputStream(samplerate=self.rate,
blocksize=0,
device=self.device,
channels=1,
dtype=np.float32,
latency='low',
callback=self.test_stream_read):
time.sleep(0.2)
self.update_window_n_frames = max(self.optimal_data_lengths)
del self.optimal_data_lengths
#Alternative:
#self.update_window_n_frames = round_up_to_even(44100 / updates_per_second)
self.stream = sd.InputStream(
samplerate=self.rate,
blocksize=self.update_window_n_frames,
device=2, # TODO: this needs tweak based on the actual devices on PC
channels=1,
dtype=np.float32,
latency='low',
extra_settings=None,
callback=self.non_blocking_stream_read)
self.rate = self.stream.samplerate
self.device = self.stream.device
self.updates_per_second = self.rate / self.update_window_n_frames
self.info = ''
self.data_capture_delays = deque(maxlen=20)
self.new_data = False
if self.verbose:
self.data_capture_delays = deque(maxlen=20)
self.num_data_captures = 0
self.device_latency = device_dict[self.device]['default_low_input_latency']
print("\n##################################################################################################")
print("\nDefaulted to using first working mic, Running on mic %s with properties:" %str(self.device))
print(device_dict[self.device])
print('Which has a latency of %.2f ms' %(1000*self.device_latency))
print("\n##################################################################################################")
print('Recording audio at %d Hz\nUsing (non-overlapping) data-windows of %d samples (updating at %.2ffps)'
%(self.rate, self.update_window_n_frames, self.updates_per_second))
def non_blocking_stream_read(self, indata, frames, time_info, status):
if self.verbose:
start = time.time()
if status:
print(status)
if self.data_buffer is not None:
self.data_buffer.append_data(indata[:,0])
self.new_data = True
if self.verbose:
self.num_data_captures += 1
self.data_capture_delays.append(time.time() - start)
return
def test_stream_read(self, indata, frames, time_info, status):
'''
Dummy function to determine what blocksize the stream is using
'''
self.optimal_data_lengths.append(len(indata[:,0]))
return
def stream_start(self, data_windows_to_buffer = None):
self.data_windows_to_buffer = data_windows_to_buffer
if data_windows_to_buffer is None:
self.data_windows_to_buffer = int(self.updates_per_second / 2) #By default, buffer 0.5 second of audio
else:
self.data_windows_to_buffer = data_windows_to_buffer
self.data_buffer = numpy_data_buffer(self.data_windows_to_buffer, self.update_window_n_frames)
print("\n--🎙 -- Starting live audio stream...\n")
self.stream.start()
self.stream_start_time = time.time()
def terminate(self):
print("👋 Sending stream termination command...")
self.stream.stop()
|
py
|
1a582df849123670edd78dd1c6dc75073c6c3dc4
|
"""
Stack-In-A-Box: Add Credentials to User
"""
import json
import unittest
import requests
import stackinabox.util.requests_mock.core
from stackinabox.stack import StackInABox
from openstackinabox.services.keystone import KeystoneV2Service
class TestKeystoneV2UserAddCredentials(unittest.TestCase):
def setUp(self):
super(TestKeystoneV2UserAddCredentials, self).setUp()
self.keystone = KeystoneV2Service()
self.headers = {
'x-auth-token': self.keystone.model.tokens.admin_token
}
self.tenant_id = self.keystone.model.tenants.add(
tenant_name='neo',
description='The One'
)
self.user_info = {
'user': {
'username': 'trinity',
'enabled': True,
'email': '[email protected]',
'password': 'Inl0veWithNeo'
}
}
self.user_info['user']['userid'] = self.keystone.model.users.add(
tenant_id=self.tenant_id,
username=self.user_info['user']['username'],
email=self.user_info['user']['email'],
password=self.user_info['user']['password'],
enabled=self.user_info['user']['enabled']
)
self.keystone.model.tokens.add(
tenant_id=self.tenant_id,
user_id=self.user_info['user']['userid']
)
self.keystone.model.roles.add_user_role_by_role_name(
tenant_id=self.tenant_id,
user_id=self.user_info['user']['userid'],
role_name=self.keystone.model.roles.IDENTITY_ADMIN_ROLE
)
StackInABox.register_service(self.keystone)
def tearDown(self):
super(TestKeystoneV2UserAddCredentials, self).tearDown()
StackInABox.reset_services()
@staticmethod
def get_userid_url(host, userid):
return 'http://{0}/keystone/v2.0/users/{1}/OS-KSADM/credentials'\
.format(host, userid)
def test_user_add_credentials_basic(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
user_id=self.user_info['user']['userid']
)
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
self.user_info['user']['userid']
)
user_info = {
'passwordCredentials': {
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
self.headers['x-auth-token'] = user_data['token']
res = requests.post(url,
headers=self.headers,
data=json_data)
self.assertEqual(res.status_code, 201)
def test_user_add_credentials_too_many_parameters(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid']
)
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
self.user_info['user']['userid'])
user_info = {
'passwordCredentials': {
'enabled': False,
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
self.headers['x-auth-token'] = user_data['token']
res = requests.post(url,
headers=self.headers,
data=json_data)
self.assertEqual(res.status_code, 201)
def test_user_add_credentials_no_token(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
self.user_info['user']['userid'])
user_info = {
'passwordCredentials': {
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
res = requests.post(url, headers=None, data=json_data)
self.assertEqual(res.status_code, 403)
def test_user_add_credentials_invalid_token(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost'
)
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
self.user_info['user']['userid']
)
user_info = {
'passwordCredentials': {
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
self.headers['x-auth-token'] = 'new_token'
res = requests.post(
url,
headers=self.headers,
data=json_data
)
self.assertEqual(res.status_code, 401)
def test_user_add_credentials_invalid_token_2(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost'
)
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
self.user_info['user']['userid']
)
user_info = {
'credentials': {
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
res = requests.post(url,
headers=self.headers,
data=json_data)
self.assertEqual(res.status_code, 400)
def test_user_add_credentials_invalid_user_id(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost')
user_data = self.keystone.model.tokens.get_by_user_id(
self.user_info['user']['userid']
)
url = TestKeystoneV2UserAddCredentials.get_userid_url(
'localhost',
(int(self.user_info['user']['userid']) + 1)
)
user_info = {
'passwordCredentials': {
'username': self.user_info['user']['username'],
'password': 'Tr1n1tyR0ck$'
}
}
json_data = json.dumps(user_info)
self.headers['x-auth-token'] = user_data['token']
res = requests.post(url,
headers=self.headers,
data=json_data)
self.assertEqual(res.status_code, 404)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.