max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
program/program/trackers/TrackerCorrelation.py | JankaSvK/thesis | 1 | 8100 | <filename>program/program/trackers/TrackerCorrelation.py
import dlib
class CorrelationTracker(object):
def init(self, image, bbox):
self.tracker = dlib.correlation_tracker()
x, y, x2, y2 = bbox
x2 += x
y2 += y
self.tracker.start_track(image, dlib.rectangle(x, y, x2, y2))
return True
def update(self, image):
self.tracker.update(image)
out = self.tracker.get_position()
return True, (out.left(), out.top(), out.right() - out.left(), out.bottom() - out.top())
| <filename>program/program/trackers/TrackerCorrelation.py
import dlib
class CorrelationTracker(object):
def init(self, image, bbox):
self.tracker = dlib.correlation_tracker()
x, y, x2, y2 = bbox
x2 += x
y2 += y
self.tracker.start_track(image, dlib.rectangle(x, y, x2, y2))
return True
def update(self, image):
self.tracker.update(image)
out = self.tracker.get_position()
return True, (out.left(), out.top(), out.right() - out.left(), out.bottom() - out.top())
| none | 1 | 2.937498 | 3 |
|
examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py | rilango/NeMo | 0 | 8101 | <reponame>rilango/NeMo<filename>examples/nlp/language_modeling/megatron_gpt_ckpt_to_nemo.py<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch.multiprocessing as mp
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import AppState, logging
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=True,
help="Path to PTL checkpoints saved during training. Ex: /raid/nemo_experiments/megatron_gpt/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default=None,
required=True,
help="Name of checkpoint to be used. Ex: megatron_gpt--val_loss=6.34-step=649-last.ckpt",
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--nemo_file_path", type=str, default=None, required=True, help="Path to output .nemo file.")
parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None)
args = parser.parse_args()
return args
def convert(rank, world_size, args):
app_state = AppState()
app_state.data_parallel_rank = 0
trainer = Trainer(gpus=args.tensor_model_parallel_size)
# TODO: reach out to PTL For an API-safe local rank override
trainer.accelerator.training_type_plugin._local_rank = rank
if args.tensor_model_parallel_size is not None and args.tensor_model_parallel_size > 1:
# inject model parallel rank
checkpoint_path = os.path.join(args.checkpoint_folder, f'mp_rank_{rank:02d}', args.checkpoint_name)
else:
checkpoint_path = os.path.join(args.checkpoint_folder, args.checkpoint_name)
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
model._save_restore_connector = NLPSaveRestoreConnector()
model.save_to(args.nemo_file_path)
logging.info(f'NeMo model saved to: {args.nemo_file_path}')
def main() -> None:
args = get_args()
world_size = args.tensor_model_parallel_size
mp.spawn(convert, args=(world_size, args), nprocs=world_size, join=True)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch.multiprocessing as mp
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import AppState, logging
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=True,
help="Path to PTL checkpoints saved during training. Ex: /raid/nemo_experiments/megatron_gpt/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default=None,
required=True,
help="Name of checkpoint to be used. Ex: megatron_gpt--val_loss=6.34-step=649-last.ckpt",
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--nemo_file_path", type=str, default=None, required=True, help="Path to output .nemo file.")
parser.add_argument("--tensor_model_parallel_size", type=int, required=True, default=None)
args = parser.parse_args()
return args
def convert(rank, world_size, args):
app_state = AppState()
app_state.data_parallel_rank = 0
trainer = Trainer(gpus=args.tensor_model_parallel_size)
# TODO: reach out to PTL For an API-safe local rank override
trainer.accelerator.training_type_plugin._local_rank = rank
if args.tensor_model_parallel_size is not None and args.tensor_model_parallel_size > 1:
# inject model parallel rank
checkpoint_path = os.path.join(args.checkpoint_folder, f'mp_rank_{rank:02d}', args.checkpoint_name)
else:
checkpoint_path = os.path.join(args.checkpoint_folder, args.checkpoint_name)
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
model._save_restore_connector = NLPSaveRestoreConnector()
model.save_to(args.nemo_file_path)
logging.info(f'NeMo model saved to: {args.nemo_file_path}')
def main() -> None:
args = get_args()
world_size = args.tensor_model_parallel_size
mp.spawn(convert, args=(world_size, args), nprocs=world_size, join=True)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter | en | 0.820553 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: reach out to PTL For an API-safe local rank override # inject model parallel rank # noqa pylint: disable=no-value-for-parameter | 1.900189 | 2 |
sdk/python/pulumi_aws/apigateway/api_key.py | dixler/pulumi-aws | 0 | 8102 | <filename>sdk/python/pulumi_aws/apigateway/api_key.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ApiKey(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
created_date: pulumi.Output[str]
"""
The creation date of the API key
"""
description: pulumi.Output[str]
"""
The API key description. Defaults to "Managed by Pulumi".
"""
enabled: pulumi.Output[bool]
"""
Specifies whether the API key can be used by callers. Defaults to `true`.
"""
last_updated_date: pulumi.Output[str]
"""
The last update date of the API key
"""
name: pulumi.Output[str]
"""
The name of the API key
"""
tags: pulumi.Output[dict]
"""
Key-value mapping of resource tags
"""
value: pulumi.Output[str]
"""
The value of the API key. If not specified, it will be automatically generated by AWS on creation.
"""
def __init__(__self__, resource_name, opts=None, description=None, enabled=None, name=None, tags=None, value=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway API Key.
> **NOTE:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if description is None:
description = 'Managed by Pulumi'
__props__['description'] = description
__props__['enabled'] = enabled
__props__['name'] = name
__props__['tags'] = tags
__props__['value'] = value
__props__['arn'] = None
__props__['created_date'] = None
__props__['last_updated_date'] = None
super(ApiKey, __self__).__init__(
'aws:apigateway/apiKey:ApiKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, created_date=None, description=None, enabled=None, last_updated_date=None, name=None, tags=None, value=None):
"""
Get an existing ApiKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] created_date: The creation date of the API key
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] last_updated_date: The last update date of the API key
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["created_date"] = created_date
__props__["description"] = description
__props__["enabled"] = enabled
__props__["last_updated_date"] = last_updated_date
__props__["name"] = name
__props__["tags"] = tags
__props__["value"] = value
return ApiKey(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| <filename>sdk/python/pulumi_aws/apigateway/api_key.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ApiKey(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
created_date: pulumi.Output[str]
"""
The creation date of the API key
"""
description: pulumi.Output[str]
"""
The API key description. Defaults to "Managed by Pulumi".
"""
enabled: pulumi.Output[bool]
"""
Specifies whether the API key can be used by callers. Defaults to `true`.
"""
last_updated_date: pulumi.Output[str]
"""
The last update date of the API key
"""
name: pulumi.Output[str]
"""
The name of the API key
"""
tags: pulumi.Output[dict]
"""
Key-value mapping of resource tags
"""
value: pulumi.Output[str]
"""
The value of the API key. If not specified, it will be automatically generated by AWS on creation.
"""
def __init__(__self__, resource_name, opts=None, description=None, enabled=None, name=None, tags=None, value=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway API Key.
> **NOTE:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if description is None:
description = 'Managed by Pulumi'
__props__['description'] = description
__props__['enabled'] = enabled
__props__['name'] = name
__props__['tags'] = tags
__props__['value'] = value
__props__['arn'] = None
__props__['created_date'] = None
__props__['last_updated_date'] = None
super(ApiKey, __self__).__init__(
'aws:apigateway/apiKey:ApiKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, created_date=None, description=None, enabled=None, last_updated_date=None, name=None, tags=None, value=None):
"""
Get an existing ApiKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] created_date: The creation date of the API key
:param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi".
:param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`.
:param pulumi.Input[str] last_updated_date: The last update date of the API key
:param pulumi.Input[str] name: The name of the API key
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
:param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["created_date"] = created_date
__props__["description"] = description
__props__["enabled"] = enabled
__props__["last_updated_date"] = last_updated_date
__props__["name"] = name
__props__["tags"] = tags
__props__["value"] = value
return ApiKey(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| en | 0.673187 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Amazon Resource Name (ARN) The creation date of the API key The API key description. Defaults to "Managed by Pulumi". Specifies whether the API key can be used by callers. Defaults to `true`. The last update date of the API key The name of the API key Key-value mapping of resource tags The value of the API key. If not specified, it will be automatically generated by AWS on creation. Provides an API Gateway API Key. > **NOTE:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi". :param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`. :param pulumi.Input[str] name: The name of the API key :param pulumi.Input[dict] tags: Key-value mapping of resource tags :param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown. Get an existing ApiKey resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] arn: Amazon Resource Name (ARN) :param pulumi.Input[str] created_date: The creation date of the API key :param pulumi.Input[str] description: The API key description. Defaults to "Managed by Pulumi". :param pulumi.Input[bool] enabled: Specifies whether the API key can be used by callers. Defaults to `true`. :param pulumi.Input[str] last_updated_date: The last update date of the API key :param pulumi.Input[str] name: The name of the API key :param pulumi.Input[dict] tags: Key-value mapping of resource tags :param pulumi.Input[str] value: The value of the API key. If not specified, it will be automatically generated by AWS on creation. > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/api_gateway_api_key.html.markdown. | 1.765791 | 2 |
SROMPy/optimize/ObjectiveFunction.py | jwarner308/SROMPy | 23 | 8103 | <gh_stars>10-100
# Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from SROMPy.target import RandomVector
from SROMPy.target.RandomEntity import RandomEntity
class ObjectiveFunction:
"""
Defines the objective function for optimizing SROM parameters. Calculates
errors between the statistics of the SROM and the target random vector
being model by it.
Will create objective function for optimization library (e.g. scipy) that
essentially wraps this class's evaluate function
"""
def __init__(self, srom, target, obj_weights=None, error='mean',
max_moment=5, num_cdf_grid_points=100):
"""
Initialize objective function. Pass in SROM & target random vector
objects that have been previously initialized. Objective function
calculates the errors between the statistics of this SROM and the
target random vector (these objects must have compute_moments,CDF,
corr_mat functions defined).
inputs:
-SROM - initialized SROM object
-targetRV - initialized RandomVector object (either
AnalyticRandomVector or SampleRandomVector) with same
dimension as SROM
-obj_weights - array of floats defining the relative weight of the
terms in the objective function. Terms are error in moments,
CDFs, and correlation matrix in that order. Default is equal
weights ([1.0,1.0,1.0])
-error - string 'mean','max', or 'sse' defining how error is defined
between the statistics of the SROM & target
-max_moment - int, max order to evaluate moment errors up to
-num_cdf_grid_points - int, # pts to evaluate CDF errors on
"""
self.__test_init_params(srom, target, obj_weights, error,
max_moment, num_cdf_grid_points)
self._SROM = srom
self._target = target
self._x_grid = None
# Generate grids for evaluating CDFs based on target RV's range
self.generate_cdf_grids(num_cdf_grid_points)
self._metric = error.upper()
self._max_moment = max_moment
def get_moment_error(self, samples, probabilities):
"""
Returns moment error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_moment_error()
def get_cdf_error(self, samples, probabilities):
"""
Returns CDF error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_cdf_error()
def get_corr_error(self, samples, probabilities):
"""
Returns correlation error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_correlation_error()
def evaluate(self, samples, probabilities):
"""
Evaluates the objective function for the specified SROM samples &
probabilities. Calculates errrors in statistics between SROM/target
"""
error = 0.0
# SROM is by the current values of samples/probabilities for stats.
self._SROM.set_params(samples, probabilities)
if self._weights[0] > 0.0:
cdf_error = self.compute_cdf_error()
error += cdf_error * self._weights[0]
if self._weights[1] > 0.0:
moment_error = self.compute_moment_error()
error += moment_error * self._weights[1]
if self._weights[2] > 0.0:
corr_error = self.compute_correlation_error()
error += corr_error * self._weights[2]
return error
def compute_moment_error(self):
"""
Calculate error in moments between SROM & target
"""
srom_moments = self._SROM.compute_moments(self._max_moment)
target_moments = self._target.compute_moments(self._max_moment)
# Reshape to 2D if returned as 1D for scalar RV.
if len(target_moments.shape) == 1:
target_moments = target_moments.reshape((self._max_moment, 1))
# Prevent divide by zero.
zero_indices = np.where(np.abs(target_moments) <= 1e-12)[0]
target_moments[zero_indices] = 1.0
# Squared relative difference:
if self._metric == "SSE":
rel_diffs = ((srom_moments-target_moments)/target_moments)**2.0
error = 0.5*np.sum(rel_diffs)
# Max absolute value:
elif self._metric == "MAX":
diffs = np.abs(srom_moments - target_moments)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_moments - target_moments)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_cdf_error(self):
"""
Calculate error in CDFs between SROM & target at pts in x_grid
"""
srom_cdfs = self._SROM.compute_cdf(self._x_grid)
target_cdfs = self._target.compute_cdf(self._x_grid)
# Check for 0 cdf values to prevent divide by zero.
nonzero_indices = np.where(target_cdfs[:, 0] > 0)[0]
srom_cdfs = srom_cdfs[nonzero_indices, :]
target_cdfs = target_cdfs[nonzero_indices, :]
if self._metric == "SSE":
squared_diffs = (srom_cdfs - target_cdfs)**2.0
rel_diffs = squared_diffs / target_cdfs**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_correlation_error(self):
"""
Calculate error in correlation matrix between SROM & target
"""
# Neglect for 1D random variable:
if self._target._dim == 1:
return 0.0
srom_corr = self._SROM.compute_corr_mat()
target_corr = self._target.compute_correlation_matrix()
if self._metric == "SSE":
squared_diffs = (srom_corr - target_corr)**2.0
rel_diffs = squared_diffs / target_corr**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_corr - target_corr)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_corr - target_corr)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def generate_cdf_grids(self, num_cdf_grid_points):
"""
Generate numerical grids for evaluating the CDF errors based on the
range of the target random vector. Create x_grid member variable with
num_cdf_grid_points along each dimension of the random vector.
"""
self._x_grid = np.zeros((num_cdf_grid_points, self._target._dim))
for i in range(self._target._dim):
grid = np.linspace(self._target.mins[i],
self._target.maxs[i],
num_cdf_grid_points)
self._x_grid[:, i] = grid
def __test_init_params(self, srom, target, obj_weights, error, max_moment,
num_cdf_grid_points):
"""
Due to the large numbers of parameters passed into __init__() that
need to be tested, the testing is done in this utility function
instead of __init__().
"""
# Test target.
if not (isinstance(target, RandomEntity)):
raise TypeError("target must inherit from RandomEntity.")
# Test srom.
from SROMPy.srom import SROM
if not isinstance(srom, SROM):
raise TypeError("srom must be of type SROM.")
# Ensure srom and target have same dimensions if target is RandomVector.
if isinstance(target, RandomVector):
if target._dim != srom._dim:
raise ValueError("target and srom must have same dimensions.")
# Test obj_weights.
if obj_weights is not None:
if isinstance(obj_weights, list):
obj_weights = np.array(obj_weights)
if not isinstance(obj_weights, np.ndarray):
raise TypeError("obj_weights must be of type ndarray or list.")
if len(obj_weights.shape) != 1:
raise ValueError("obj_weights must be a one dimensional array.")
if obj_weights.shape[0] != 3:
raise ValueError("obj_weights must have exactly 3 elements.")
if np.min(obj_weights) < 0.:
raise ValueError("obj_weights cannot be less than zero.")
self._weights = obj_weights
else:
self._weights = np.ones((3,))
# Test error function name.
if not isinstance(error, str):
raise TypeError("error must be a string: 'MEAN', 'MAX', or 'SSE'.")
if error.upper() not in ["MEAN", "MAX", "SSE"]:
raise ValueError("error must be either 'mean', 'max', or 'SSE'.")
# Test max_moment.
if not isinstance(max_moment, int):
raise TypeError("max_moment must be a positive integer.")
if max_moment < 1:
raise ValueError("max_moment must be a positive integer.")
# Test num_cdf_grid_points.
if not isinstance(num_cdf_grid_points, int):
raise TypeError("cf_grid_pts must be a positive integer.")
if num_cdf_grid_points < 1:
raise ValueError("num_cdf_grid_points must be a positive integer.")
| # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
from SROMPy.target import RandomVector
from SROMPy.target.RandomEntity import RandomEntity
class ObjectiveFunction:
"""
Defines the objective function for optimizing SROM parameters. Calculates
errors between the statistics of the SROM and the target random vector
being model by it.
Will create objective function for optimization library (e.g. scipy) that
essentially wraps this class's evaluate function
"""
def __init__(self, srom, target, obj_weights=None, error='mean',
max_moment=5, num_cdf_grid_points=100):
"""
Initialize objective function. Pass in SROM & target random vector
objects that have been previously initialized. Objective function
calculates the errors between the statistics of this SROM and the
target random vector (these objects must have compute_moments,CDF,
corr_mat functions defined).
inputs:
-SROM - initialized SROM object
-targetRV - initialized RandomVector object (either
AnalyticRandomVector or SampleRandomVector) with same
dimension as SROM
-obj_weights - array of floats defining the relative weight of the
terms in the objective function. Terms are error in moments,
CDFs, and correlation matrix in that order. Default is equal
weights ([1.0,1.0,1.0])
-error - string 'mean','max', or 'sse' defining how error is defined
between the statistics of the SROM & target
-max_moment - int, max order to evaluate moment errors up to
-num_cdf_grid_points - int, # pts to evaluate CDF errors on
"""
self.__test_init_params(srom, target, obj_weights, error,
max_moment, num_cdf_grid_points)
self._SROM = srom
self._target = target
self._x_grid = None
# Generate grids for evaluating CDFs based on target RV's range
self.generate_cdf_grids(num_cdf_grid_points)
self._metric = error.upper()
self._max_moment = max_moment
def get_moment_error(self, samples, probabilities):
"""
Returns moment error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_moment_error()
def get_cdf_error(self, samples, probabilities):
"""
Returns CDF error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_cdf_error()
def get_corr_error(self, samples, probabilities):
"""
Returns correlation error for given samples & probabilities
"""
self._SROM.set_params(samples, probabilities)
return self.compute_correlation_error()
def evaluate(self, samples, probabilities):
"""
Evaluates the objective function for the specified SROM samples &
probabilities. Calculates errrors in statistics between SROM/target
"""
error = 0.0
# SROM is by the current values of samples/probabilities for stats.
self._SROM.set_params(samples, probabilities)
if self._weights[0] > 0.0:
cdf_error = self.compute_cdf_error()
error += cdf_error * self._weights[0]
if self._weights[1] > 0.0:
moment_error = self.compute_moment_error()
error += moment_error * self._weights[1]
if self._weights[2] > 0.0:
corr_error = self.compute_correlation_error()
error += corr_error * self._weights[2]
return error
def compute_moment_error(self):
"""
Calculate error in moments between SROM & target
"""
srom_moments = self._SROM.compute_moments(self._max_moment)
target_moments = self._target.compute_moments(self._max_moment)
# Reshape to 2D if returned as 1D for scalar RV.
if len(target_moments.shape) == 1:
target_moments = target_moments.reshape((self._max_moment, 1))
# Prevent divide by zero.
zero_indices = np.where(np.abs(target_moments) <= 1e-12)[0]
target_moments[zero_indices] = 1.0
# Squared relative difference:
if self._metric == "SSE":
rel_diffs = ((srom_moments-target_moments)/target_moments)**2.0
error = 0.5*np.sum(rel_diffs)
# Max absolute value:
elif self._metric == "MAX":
diffs = np.abs(srom_moments - target_moments)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_moments - target_moments)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_cdf_error(self):
"""
Calculate error in CDFs between SROM & target at pts in x_grid
"""
srom_cdfs = self._SROM.compute_cdf(self._x_grid)
target_cdfs = self._target.compute_cdf(self._x_grid)
# Check for 0 cdf values to prevent divide by zero.
nonzero_indices = np.where(target_cdfs[:, 0] > 0)[0]
srom_cdfs = srom_cdfs[nonzero_indices, :]
target_cdfs = target_cdfs[nonzero_indices, :]
if self._metric == "SSE":
squared_diffs = (srom_cdfs - target_cdfs)**2.0
rel_diffs = squared_diffs / target_cdfs**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_cdfs - target_cdfs)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def compute_correlation_error(self):
"""
Calculate error in correlation matrix between SROM & target
"""
# Neglect for 1D random variable:
if self._target._dim == 1:
return 0.0
srom_corr = self._SROM.compute_corr_mat()
target_corr = self._target.compute_correlation_matrix()
if self._metric == "SSE":
squared_diffs = (srom_corr - target_corr)**2.0
rel_diffs = squared_diffs / target_corr**2.0
error = 0.5*np.sum(rel_diffs)
elif self._metric == "MAX":
diffs = np.abs(srom_corr - target_corr)
error = np.max(diffs)
elif self._metric == "MEAN":
diffs = np.abs(srom_corr - target_corr)
error = np.mean(diffs)
else:
raise ValueError("Invalid error metric")
return error
def generate_cdf_grids(self, num_cdf_grid_points):
"""
Generate numerical grids for evaluating the CDF errors based on the
range of the target random vector. Create x_grid member variable with
num_cdf_grid_points along each dimension of the random vector.
"""
self._x_grid = np.zeros((num_cdf_grid_points, self._target._dim))
for i in range(self._target._dim):
grid = np.linspace(self._target.mins[i],
self._target.maxs[i],
num_cdf_grid_points)
self._x_grid[:, i] = grid
def __test_init_params(self, srom, target, obj_weights, error, max_moment,
num_cdf_grid_points):
"""
Due to the large numbers of parameters passed into __init__() that
need to be tested, the testing is done in this utility function
instead of __init__().
"""
# Test target.
if not (isinstance(target, RandomEntity)):
raise TypeError("target must inherit from RandomEntity.")
# Test srom.
from SROMPy.srom import SROM
if not isinstance(srom, SROM):
raise TypeError("srom must be of type SROM.")
# Ensure srom and target have same dimensions if target is RandomVector.
if isinstance(target, RandomVector):
if target._dim != srom._dim:
raise ValueError("target and srom must have same dimensions.")
# Test obj_weights.
if obj_weights is not None:
if isinstance(obj_weights, list):
obj_weights = np.array(obj_weights)
if not isinstance(obj_weights, np.ndarray):
raise TypeError("obj_weights must be of type ndarray or list.")
if len(obj_weights.shape) != 1:
raise ValueError("obj_weights must be a one dimensional array.")
if obj_weights.shape[0] != 3:
raise ValueError("obj_weights must have exactly 3 elements.")
if np.min(obj_weights) < 0.:
raise ValueError("obj_weights cannot be less than zero.")
self._weights = obj_weights
else:
self._weights = np.ones((3,))
# Test error function name.
if not isinstance(error, str):
raise TypeError("error must be a string: 'MEAN', 'MAX', or 'SSE'.")
if error.upper() not in ["MEAN", "MAX", "SSE"]:
raise ValueError("error must be either 'mean', 'max', or 'SSE'.")
# Test max_moment.
if not isinstance(max_moment, int):
raise TypeError("max_moment must be a positive integer.")
if max_moment < 1:
raise ValueError("max_moment must be a positive integer.")
# Test num_cdf_grid_points.
if not isinstance(num_cdf_grid_points, int):
raise TypeError("cf_grid_pts must be a positive integer.")
if num_cdf_grid_points < 1:
raise ValueError("num_cdf_grid_points must be a positive integer.") | en | 0.839372 | # Copyright 2018 United States Government as represented by the Administrator of # the National Aeronautics and Space Administration. No copyright is claimed in # the United States under Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed # under the Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Defines the objective function for optimizing SROM parameters. Calculates errors between the statistics of the SROM and the target random vector being model by it. Will create objective function for optimization library (e.g. scipy) that essentially wraps this class's evaluate function Initialize objective function. Pass in SROM & target random vector objects that have been previously initialized. Objective function calculates the errors between the statistics of this SROM and the target random vector (these objects must have compute_moments,CDF, corr_mat functions defined). inputs: -SROM - initialized SROM object -targetRV - initialized RandomVector object (either AnalyticRandomVector or SampleRandomVector) with same dimension as SROM -obj_weights - array of floats defining the relative weight of the terms in the objective function. Terms are error in moments, CDFs, and correlation matrix in that order. Default is equal weights ([1.0,1.0,1.0]) -error - string 'mean','max', or 'sse' defining how error is defined between the statistics of the SROM & target -max_moment - int, max order to evaluate moment errors up to -num_cdf_grid_points - int, # pts to evaluate CDF errors on # Generate grids for evaluating CDFs based on target RV's range Returns moment error for given samples & probabilities Returns CDF error for given samples & probabilities Returns correlation error for given samples & probabilities Evaluates the objective function for the specified SROM samples & probabilities. Calculates errrors in statistics between SROM/target # SROM is by the current values of samples/probabilities for stats. Calculate error in moments between SROM & target # Reshape to 2D if returned as 1D for scalar RV. # Prevent divide by zero. # Squared relative difference: # Max absolute value: Calculate error in CDFs between SROM & target at pts in x_grid # Check for 0 cdf values to prevent divide by zero. Calculate error in correlation matrix between SROM & target # Neglect for 1D random variable: Generate numerical grids for evaluating the CDF errors based on the range of the target random vector. Create x_grid member variable with num_cdf_grid_points along each dimension of the random vector. Due to the large numbers of parameters passed into __init__() that need to be tested, the testing is done in this utility function instead of __init__(). # Test target. # Test srom. # Ensure srom and target have same dimensions if target is RandomVector. # Test obj_weights. # Test error function name. # Test max_moment. # Test num_cdf_grid_points. | 2.223747 | 2 |
test/utils.py | vasili-v/distcovery | 0 | 8104 | import os
import errno
import sys
def mock_directory_tree(tree):
tree = dict([(os.path.join(*key), value) \
for key, value in tree.iteritems()])
def listdir(path):
try:
names = tree[path]
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
if names is None:
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
return names
def isfile(path):
try:
item = tree[path]
except KeyError:
return False
return item is None
def isdir(path):
try:
item = tree[path]
except KeyError:
return False
return item is not None
return listdir, isfile, isdir
class PreserveOs(object):
def setUp(self):
super(PreserveOs, self).setUp()
self.__listdir = os.listdir
self.__isfile = os.path.isfile
self.__isdir = os.path.isdir
def tearDown(self):
os.path.isdir = self.__isdir
os.path.isfile = self.__isfile
os.listdir = self.__listdir
super(PreserveOs, self).tearDown()
def full_test_tree(self):
tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py',
'test_sub_first', 't_sub_first', 'test_sub_third'),
('.', '__init__.py'): None,
('.', 'test_first.py'): None,
('.', 'test_second.py'): None,
('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 'test_sub_first', '__init__.py'): None,
('.', 'test_sub_first', 'test_sub_first.py'): None,
('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 't_sub_first', '__init__.py'): None,
('.', 't_sub_first', 'test_sub_first.py'): None,
('.', 'test_sub_second'): ('test_sub_first.py',),
('.', 'test_sub_second', 'test_sub_first.py'): None,
('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py',
'test_sub_second'),
('.', 'test_sub_third', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second'): \
('__init__.py', 'test_sub_first.py', 't_sub_second.py'),
('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_second',
'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second',
't_sub_second.py'): None}
os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree)
self.expected_content = {'first': 'test_first',
'second': 'test_second',
'sub_first': 'test_sub_first',
'sub_first.sub_first': \
'test_sub_first.test_sub_first',
'sub_third': 'test_sub_third',
'sub_third.sub_first': \
'test_sub_third.test_sub_first',
'sub_third.sub_second': \
'test_sub_third.test_sub_second',
'sub_third.sub_second.sub_first': \
'test_sub_third.test_sub_second.' \
'test_sub_first'}
class ImportTrash(object):
def setUp(self):
self.modules_trash = []
self.meta_path_trash = []
def tearDown(self):
for item in self.meta_path_trash:
if item in sys.meta_path:
sys.meta_path.remove(item)
for name in self.modules_trash:
if name in sys.modules:
del sys.modules[name]
| import os
import errno
import sys
def mock_directory_tree(tree):
tree = dict([(os.path.join(*key), value) \
for key, value in tree.iteritems()])
def listdir(path):
try:
names = tree[path]
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
if names is None:
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
return names
def isfile(path):
try:
item = tree[path]
except KeyError:
return False
return item is None
def isdir(path):
try:
item = tree[path]
except KeyError:
return False
return item is not None
return listdir, isfile, isdir
class PreserveOs(object):
def setUp(self):
super(PreserveOs, self).setUp()
self.__listdir = os.listdir
self.__isfile = os.path.isfile
self.__isdir = os.path.isdir
def tearDown(self):
os.path.isdir = self.__isdir
os.path.isfile = self.__isfile
os.listdir = self.__listdir
super(PreserveOs, self).tearDown()
def full_test_tree(self):
tree = {('.',): ('__init__.py', 'test_first.py', 'test_second.py',
'test_sub_first', 't_sub_first', 'test_sub_third'),
('.', '__init__.py'): None,
('.', 'test_first.py'): None,
('.', 'test_second.py'): None,
('.', 'test_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 'test_sub_first', '__init__.py'): None,
('.', 'test_sub_first', 'test_sub_first.py'): None,
('.', 't_sub_first'): ('__init__.py', 'test_sub_first.py'),
('.', 't_sub_first', '__init__.py'): None,
('.', 't_sub_first', 'test_sub_first.py'): None,
('.', 'test_sub_second'): ('test_sub_first.py',),
('.', 'test_sub_second', 'test_sub_first.py'): None,
('.', 'test_sub_third'): ('__init__.py', 'test_sub_first.py',
'test_sub_second'),
('.', 'test_sub_third', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second'): \
('__init__.py', 'test_sub_first.py', 't_sub_second.py'),
('.', 'test_sub_third', 'test_sub_second', '__init__.py'): None,
('.', 'test_sub_third', 'test_sub_second',
'test_sub_first.py'): None,
('.', 'test_sub_third', 'test_sub_second',
't_sub_second.py'): None}
os.listdir, os.path.isfile, os.path.isdir = mock_directory_tree(tree)
self.expected_content = {'first': 'test_first',
'second': 'test_second',
'sub_first': 'test_sub_first',
'sub_first.sub_first': \
'test_sub_first.test_sub_first',
'sub_third': 'test_sub_third',
'sub_third.sub_first': \
'test_sub_third.test_sub_first',
'sub_third.sub_second': \
'test_sub_third.test_sub_second',
'sub_third.sub_second.sub_first': \
'test_sub_third.test_sub_second.' \
'test_sub_first'}
class ImportTrash(object):
def setUp(self):
self.modules_trash = []
self.meta_path_trash = []
def tearDown(self):
for item in self.meta_path_trash:
if item in sys.meta_path:
sys.meta_path.remove(item)
for name in self.modules_trash:
if name in sys.modules:
del sys.modules[name]
| none | 1 | 2.721076 | 3 |
|
var/spack/repos/builtin/packages/perl-ipc-run/package.py | adrianjhpc/spack | 2 | 8105 | <reponame>adrianjhpc/spack
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlIpcRun(PerlPackage):
"""IPC::Run allows you to run and interact with child processes using
files, pipes, and pseudo-ttys. Both system()-style and scripted usages are
supported and may be mixed. Likewise, functional and OO API styles are both
supported and may be mixed."""
homepage = "https://metacpan.org/pod/IPC::Run"
url = "https://cpan.metacpan.org/authors/id/T/TO/TODDR/IPC-Run-20180523.0.tar.gz"
version('20180523.0', sha256='3850d7edf8a4671391c6e99bb770698e1c45da55b323b31c76310913349b6c2f')
depends_on('perl-io-tty', type=('build', 'run'))
depends_on('perl-readonly', type='build')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlIpcRun(PerlPackage):
"""IPC::Run allows you to run and interact with child processes using
files, pipes, and pseudo-ttys. Both system()-style and scripted usages are
supported and may be mixed. Likewise, functional and OO API styles are both
supported and may be mixed."""
homepage = "https://metacpan.org/pod/IPC::Run"
url = "https://cpan.metacpan.org/authors/id/T/TO/TODDR/IPC-Run-20180523.0.tar.gz"
version('20180523.0', sha256='3850d7edf8a4671391c6e99bb770698e1c45da55b323b31c76310913349b6c2f')
depends_on('perl-io-tty', type=('build', 'run'))
depends_on('perl-readonly', type='build') | en | 0.840465 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) IPC::Run allows you to run and interact with child processes using files, pipes, and pseudo-ttys. Both system()-style and scripted usages are supported and may be mixed. Likewise, functional and OO API styles are both supported and may be mixed. | 1.274021 | 1 |
tests/test_parser_create_site_users.py | WillAyd/tabcmd | 0 | 8106 | import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
class CreateSiteUsersParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname)
CreateSiteUsersParser.create_site_user_parser(manager, mock_command)
def test_create_site_users_parser_users_file(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with('users.csv', 'r', -1, None, None)
def test_create_site_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_site_user_parser_role(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')):
mock_args = [commandname, "users.csv", '--site', 'site-name']
args = self.parser_under_test.parse_args(mock_args)
assert args.site == 'site-name', args
| import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
import argparse
from tabcmd.parsers.create_site_users_parser import CreateSiteUsersParser
from .common_setup import *
commandname = 'createsiteusers'
class CreateSiteUsersParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test, manager, mock_command = initialize_test_pieces(commandname)
CreateSiteUsersParser.create_site_user_parser(manager, mock_command)
def test_create_site_users_parser_users_file(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with('users.csv', 'r', -1, None, None)
def test_create_site_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_site_user_parser_role(self):
with mock.patch('builtins.open', mock.mock_open(read_data='test')):
mock_args = [commandname, "users.csv", '--site', 'site-name']
args = self.parser_under_test.parse_args(mock_args)
assert args.site == 'site-name', args
| none | 1 | 2.871102 | 3 |
|
secretsmanager_env.py | iarlyy/secretsmanager-env | 1 | 8107 | <filename>secretsmanager_env.py
#!/usr/bin/env python
import argparse
import json
import os
import boto3
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Output following the defined format.
Options are:
dotenv - dotenv style [default]
export - shell export style
stdout - secret plain value style'''
)
parser.add_argument(
'--output',
default='dotenv',
choices=['stdout', 'dotenv', 'export'],
)
args = parser.parse_args()
try:
secret_id = os.environ.get("ENV_SECRET_NAME")
secretsmanager = boto3.client('secretsmanager')
secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])
except:
print('Error getting secret')
raise
if args.output == 'export':
prefix = 'export '
else:
prefix = ''
if args.output != 'stdout':
for envvar in secret_values:
print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'")
else:
print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
| <filename>secretsmanager_env.py
#!/usr/bin/env python
import argparse
import json
import os
import boto3
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Output following the defined format.
Options are:
dotenv - dotenv style [default]
export - shell export style
stdout - secret plain value style'''
)
parser.add_argument(
'--output',
default='dotenv',
choices=['stdout', 'dotenv', 'export'],
)
args = parser.parse_args()
try:
secret_id = os.environ.get("ENV_SECRET_NAME")
secretsmanager = boto3.client('secretsmanager')
secret_values = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])
except:
print('Error getting secret')
raise
if args.output == 'export':
prefix = 'export '
else:
prefix = ''
if args.output != 'stdout':
for envvar in secret_values:
print(prefix+envvar+"=$'"+secret_values[envvar].replace('\\n', '\n')+"'")
else:
print(json.dumps(secret_values.replace('\\n', '\n'), indent=2, sort_keys=True))
| en | 0.279901 | #!/usr/bin/env python \ Output following the defined format. Options are: dotenv - dotenv style [default] export - shell export style stdout - secret plain value style | 2.661996 | 3 |
109.py | juandarr/ProjectEuler | 0 | 8108 | <reponame>juandarr/ProjectEuler
"""
Finds the number of distinct ways a player can checkout a score less than 100
Author: <NAME>
"""
import math
def checkout_solutions(checkout,sequence,idx_sq,d):
'''
returns the number of solution for a given checkout value
'''
counter = 0
for double in d:
if double>checkout:
break
res = checkout-double
if res==0:
counter +=1
continue
if res<=60:
if res in idx_sq:
index = idx_sq[res]
else:
index = len(sequence)-1
while res>sequence[index]:
index -=1
else:
index = len(sequence)-1
for idx in range(index,-1,-1):
a = sequence[idx]
if a==res:
counter+=1
continue
for idx2 in range(idx,-1,-1):
if a+sequence[idx2]==res:
counter +=1
elif a+sequence[idx2]<res:
break
return counter
def darts_checkout(limit_value):
s = [i for i in range(1,21)]+[25]
d = [2*i for i in range(1,21)]+[50]
t = [3*i for i in range(1,21)]
sequence = sorted(s+d+t)
idx_sq = {}
for idx in range(len(sequence)-1):
if sequence[idx]!=sequence[idx+1]:
idx_sq[sequence[idx]]=idx
idx_sq[sequence[-1]]=len(sequence)-1
n = limit_value
total = 0
for checkout in range(1,limit_value+1):
total += checkout_solutions(checkout,sequence,idx_sq,d)
return total
if __name__ == "__main__":
limit_value=99
print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value))) | """
Finds the number of distinct ways a player can checkout a score less than 100
Author: <NAME>
"""
import math
def checkout_solutions(checkout,sequence,idx_sq,d):
'''
returns the number of solution for a given checkout value
'''
counter = 0
for double in d:
if double>checkout:
break
res = checkout-double
if res==0:
counter +=1
continue
if res<=60:
if res in idx_sq:
index = idx_sq[res]
else:
index = len(sequence)-1
while res>sequence[index]:
index -=1
else:
index = len(sequence)-1
for idx in range(index,-1,-1):
a = sequence[idx]
if a==res:
counter+=1
continue
for idx2 in range(idx,-1,-1):
if a+sequence[idx2]==res:
counter +=1
elif a+sequence[idx2]<res:
break
return counter
def darts_checkout(limit_value):
s = [i for i in range(1,21)]+[25]
d = [2*i for i in range(1,21)]+[50]
t = [3*i for i in range(1,21)]
sequence = sorted(s+d+t)
idx_sq = {}
for idx in range(len(sequence)-1):
if sequence[idx]!=sequence[idx+1]:
idx_sq[sequence[idx]]=idx
idx_sq[sequence[-1]]=len(sequence)-1
n = limit_value
total = 0
for checkout in range(1,limit_value+1):
total += checkout_solutions(checkout,sequence,idx_sq,d)
return total
if __name__ == "__main__":
limit_value=99
print('The number of distinct ways a player can checkout a score less than 100 is {0}'.format(darts_checkout(limit_value))) | en | 0.781295 | Finds the number of distinct ways a player can checkout a score less than 100 Author: <NAME> returns the number of solution for a given checkout value | 3.546687 | 4 |
src/tevatron/tevax/loss.py | vjeronymo2/tevatron | 95 | 8109 | <filename>src/tevatron/tevax/loss.py
import jax.numpy as jnp
from jax import lax
import optax
import chex
def _onehot(labels: chex.Array, num_classes: int) -> chex.Array:
x = labels[..., None] == jnp.arange(num_classes).reshape((1,) * labels.ndim + (-1,))
x = lax.select(x, jnp.ones(x.shape), jnp.zeros(x.shape))
return x.astype(jnp.float32)
def p_contrastive_loss(ss: chex.Array, tt: chex.Array, axis: str = 'device') -> chex.Array:
per_shard_targets = tt.shape[0]
per_sample_targets = int(tt.shape[0] / ss.shape[0])
labels = jnp.arange(0, per_shard_targets, per_sample_targets) + per_shard_targets * lax.axis_index(axis)
tt = lax.all_gather(tt, axis).reshape((-1, ss.shape[-1]))
scores = jnp.dot(ss, jnp.transpose(tt))
return optax.softmax_cross_entropy(scores, _onehot(labels, scores.shape[-1]))
| <filename>src/tevatron/tevax/loss.py
import jax.numpy as jnp
from jax import lax
import optax
import chex
def _onehot(labels: chex.Array, num_classes: int) -> chex.Array:
x = labels[..., None] == jnp.arange(num_classes).reshape((1,) * labels.ndim + (-1,))
x = lax.select(x, jnp.ones(x.shape), jnp.zeros(x.shape))
return x.astype(jnp.float32)
def p_contrastive_loss(ss: chex.Array, tt: chex.Array, axis: str = 'device') -> chex.Array:
per_shard_targets = tt.shape[0]
per_sample_targets = int(tt.shape[0] / ss.shape[0])
labels = jnp.arange(0, per_shard_targets, per_sample_targets) + per_shard_targets * lax.axis_index(axis)
tt = lax.all_gather(tt, axis).reshape((-1, ss.shape[-1]))
scores = jnp.dot(ss, jnp.transpose(tt))
return optax.softmax_cross_entropy(scores, _onehot(labels, scores.shape[-1]))
| none | 1 | 2.025411 | 2 |
|
setup.py | kinnala/gammy | 0 | 8110 | import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
) | import os
from setuptools import setup, find_packages
import versioneer
if __name__ == "__main__":
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(base_dir, 'gammy', '_meta.py')) as fp:
exec(fp.read(), meta)
setup(
name = "gammy",
version = versioneer.get_version(),
author = meta["__author__"],
author_email = meta["__contact__"],
description = "Generalized additive models with a Bayesian twist",
url = "https://github.com/malmgrek/Gammy",
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = [
"attrs",
"bayespy",
"h5py",
"matplotlib",
"numpy",
"scipy"
],
extras_require = {
"dev": [
"versioneer",
"pytest",
"hypothesis",
],
},
keywords = [
"Statistical modeling",
"Bayesian statistics",
"Machine learning",
],
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: {0}".format(meta["__license__"]),
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
long_description = read('README.md'),
long_description_content_type = "text/markdown",
) | none | 1 | 1.437618 | 1 |
|
fast-ml/main.py | gabrielstork/fast-ml | 1 | 8111 | <gh_stars>1-10
import root
if __name__ == '__main__':
window = root.Root()
window.mainloop()
| import root
if __name__ == '__main__':
window = root.Root()
window.mainloop() | none | 1 | 1.514983 | 2 |
|
application/recommendations/__init__.py | QualiChain/qualichain_backend | 0 | 8112 | <gh_stars>0
from flask import Blueprint
recommendation_blueprint = Blueprint('recommendations', __name__)
from application.recommendations import routes | from flask import Blueprint
recommendation_blueprint = Blueprint('recommendations', __name__)
from application.recommendations import routes | none | 1 | 1.376477 | 1 |
|
predictors/scene_predictor.py | XenonLamb/higan | 83 | 8113 | # python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
class ScenePredictor(BasePredictor):
"""Defines the predictor class for scene analysis."""
def __init__(self):
super().__init__('scene')
def build(self):
self.net = resnet18(num_classes=NUM_CATEGORIES)
def load(self):
# Load category labels.
self.check_attr('category_anno_path')
self.category_name_to_idx = {}
self.category_idx_to_name = {}
with open(self.category_anno_path, 'r') as f:
for line in f:
name, idx = line.strip().split(' ')
name = name[3:].replace('/', '__')
idx = int(idx)
self.category_name_to_idx[name] = idx
self.category_idx_to_name[idx] = name
assert len(self.category_name_to_idx) == NUM_CATEGORIES
assert len(self.category_idx_to_name) == NUM_CATEGORIES
# Load attribute labels.
self.check_attr('attribute_anno_path')
self.attribute_name_to_idx = {}
self.attribute_idx_to_name = {}
with open(self.attribute_anno_path, 'r') as f:
for idx, line in enumerate(f):
name = line.strip().replace(' ', '_')
self.attribute_name_to_idx[name] = idx
self.attribute_idx_to_name[idx] = name
assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES
assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES
# Transform for input images.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load pre-trained weights for category prediction.
checkpoint = torch.load(self.weight_path,
map_location=lambda storage, loc: storage)
state_dict = {k.replace('module.', ''): v
for k, v in checkpoint['state_dict'].items()}
self.net.load_state_dict(state_dict)
fc_weight = list(self.net.parameters())[-2].data.numpy()
fc_weight[fc_weight < 0] = 0
# Load additional weights for attribute prediction.
self.check_attr('attribute_additional_weight_path')
self.attribute_weight = np.load(self.attribute_additional_weight_path)
assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM)
def _predict(self, images):
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
if images.dtype != np.uint8:
raise ValueError(f'Images should be with dtype `numpy.uint8`!')
if not (len(images.shape) == 4 and
0 < images.shape[0] <= self.batch_size and
images.shape[3] == self.image_channels):
raise ValueError(f'Images should be with shape [batch_size, height '
f'width, channel], where `batch_size` no larger than '
f'{self.batch_size}, and `channel` equals to '
f'{self.image_channels}!\n'
f'But {images.shape} received!')
xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images]
xs = torch.cat(xs, dim=0).to(self.run_device)
logits, features = self.net(xs)
category_scores = self.get_value(F.softmax(logits, dim=1))
features = self.get_value(features).squeeze(axis=(2, 3))
attribute_scores = features.dot(self.attribute_weight.T)
assert (len(category_scores.shape) == 2 and
category_scores.shape[1] == NUM_CATEGORIES)
assert (len(attribute_scores.shape) == 2 and
attribute_scores.shape[1] == NUM_ATTRIBUTES)
results = {
'category': category_scores,
'attribute': attribute_scores,
}
if self.use_cuda:
torch.cuda.empty_cache()
return results
def predict(self, images, **kwargs):
return self.batch_run(images, self._predict)
| # python 3.7
"""Predicts the scene category, attribute."""
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from .base_predictor import BasePredictor
from .scene_wideresnet import resnet18
__all__ = ['ScenePredictor']
NUM_CATEGORIES = 365
NUM_ATTRIBUTES = 102
FEATURE_DIM = 512
class ScenePredictor(BasePredictor):
"""Defines the predictor class for scene analysis."""
def __init__(self):
super().__init__('scene')
def build(self):
self.net = resnet18(num_classes=NUM_CATEGORIES)
def load(self):
# Load category labels.
self.check_attr('category_anno_path')
self.category_name_to_idx = {}
self.category_idx_to_name = {}
with open(self.category_anno_path, 'r') as f:
for line in f:
name, idx = line.strip().split(' ')
name = name[3:].replace('/', '__')
idx = int(idx)
self.category_name_to_idx[name] = idx
self.category_idx_to_name[idx] = name
assert len(self.category_name_to_idx) == NUM_CATEGORIES
assert len(self.category_idx_to_name) == NUM_CATEGORIES
# Load attribute labels.
self.check_attr('attribute_anno_path')
self.attribute_name_to_idx = {}
self.attribute_idx_to_name = {}
with open(self.attribute_anno_path, 'r') as f:
for idx, line in enumerate(f):
name = line.strip().replace(' ', '_')
self.attribute_name_to_idx[name] = idx
self.attribute_idx_to_name[idx] = name
assert len(self.attribute_name_to_idx) == NUM_ATTRIBUTES
assert len(self.attribute_idx_to_name) == NUM_ATTRIBUTES
# Transform for input images.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load pre-trained weights for category prediction.
checkpoint = torch.load(self.weight_path,
map_location=lambda storage, loc: storage)
state_dict = {k.replace('module.', ''): v
for k, v in checkpoint['state_dict'].items()}
self.net.load_state_dict(state_dict)
fc_weight = list(self.net.parameters())[-2].data.numpy()
fc_weight[fc_weight < 0] = 0
# Load additional weights for attribute prediction.
self.check_attr('attribute_additional_weight_path')
self.attribute_weight = np.load(self.attribute_additional_weight_path)
assert self.attribute_weight.shape == (NUM_ATTRIBUTES, FEATURE_DIM)
def _predict(self, images):
if not isinstance(images, np.ndarray):
raise ValueError(f'Images should be with type `numpy.ndarray`!')
if images.dtype != np.uint8:
raise ValueError(f'Images should be with dtype `numpy.uint8`!')
if not (len(images.shape) == 4 and
0 < images.shape[0] <= self.batch_size and
images.shape[3] == self.image_channels):
raise ValueError(f'Images should be with shape [batch_size, height '
f'width, channel], where `batch_size` no larger than '
f'{self.batch_size}, and `channel` equals to '
f'{self.image_channels}!\n'
f'But {images.shape} received!')
xs = [self.transform(Image.fromarray(img)).unsqueeze(0) for img in images]
xs = torch.cat(xs, dim=0).to(self.run_device)
logits, features = self.net(xs)
category_scores = self.get_value(F.softmax(logits, dim=1))
features = self.get_value(features).squeeze(axis=(2, 3))
attribute_scores = features.dot(self.attribute_weight.T)
assert (len(category_scores.shape) == 2 and
category_scores.shape[1] == NUM_CATEGORIES)
assert (len(attribute_scores.shape) == 2 and
attribute_scores.shape[1] == NUM_ATTRIBUTES)
results = {
'category': category_scores,
'attribute': attribute_scores,
}
if self.use_cuda:
torch.cuda.empty_cache()
return results
def predict(self, images, **kwargs):
return self.batch_run(images, self._predict)
| en | 0.6677 | # python 3.7 Predicts the scene category, attribute. Defines the predictor class for scene analysis. # Load category labels. # Load attribute labels. # Transform for input images. # Load pre-trained weights for category prediction. # Load additional weights for attribute prediction. | 2.634934 | 3 |
python_test.py | jackKiZhu/mypython | 0 | 8114 | <gh_stars>0
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:[email protected]:3306/python_github"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(64), unique=True)
user_password = db.Column(db.String(32))
def __repr__(self):
return "用户id:%s 用户名:%s" % (self.id, self.user_name)
@app.route("/", methods=["post", "get"])
def index():
index_meg = ""
if request.method == "POST":
user_name = request.form.get("user_name", "")
user_pwd = request.form.get("user_pwd", "")
if not all([user_name, user_pwd]):
index_meg = "请正确输入信息"
else:
print(request.get_data())
user_name_is_exits = User.query.filter(User.user_name == user_name).first()
if user_name_is_exits:
index_meg = "用户名已存在"
else:
user_obj = User(user_name=user_name, user_password=<PASSWORD>)
db.session.add(user_obj)
db.session.commit()
index_meg = "注册成功"
print("注册成功")
# user_name = request.args.get("user_name", "")
# user_pwd = request.args.get("user_pwd", "")
# user_is_login = User.query.filter_by(user_name=user_name, user_password=<PASSWORD>).first()
# if user_is_login:
# index_meg = "登陆成功"
# print("登陆成功")
# return render_template("login_ok.html", index_meg=index_meg)
# else:
# # index_meg = "登陆失败"
# print("登陆失败")
return render_template("index.html", index_meg=index_meg)
if __name__ == "__main__":
db.drop_all()
db.create_all()
app.run(debug=True)
| from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:[email protected]:3306/python_github"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(64), unique=True)
user_password = db.Column(db.String(32))
def __repr__(self):
return "用户id:%s 用户名:%s" % (self.id, self.user_name)
@app.route("/", methods=["post", "get"])
def index():
index_meg = ""
if request.method == "POST":
user_name = request.form.get("user_name", "")
user_pwd = request.form.get("user_pwd", "")
if not all([user_name, user_pwd]):
index_meg = "请正确输入信息"
else:
print(request.get_data())
user_name_is_exits = User.query.filter(User.user_name == user_name).first()
if user_name_is_exits:
index_meg = "用户名已存在"
else:
user_obj = User(user_name=user_name, user_password=<PASSWORD>)
db.session.add(user_obj)
db.session.commit()
index_meg = "注册成功"
print("注册成功")
# user_name = request.args.get("user_name", "")
# user_pwd = request.args.get("user_pwd", "")
# user_is_login = User.query.filter_by(user_name=user_name, user_password=<PASSWORD>).first()
# if user_is_login:
# index_meg = "登陆成功"
# print("登陆成功")
# return render_template("login_ok.html", index_meg=index_meg)
# else:
# # index_meg = "登陆失败"
# print("登陆失败")
return render_template("index.html", index_meg=index_meg)
if __name__ == "__main__":
db.drop_all()
db.create_all()
app.run(debug=True) | en | 0.158915 | # user_name = request.args.get("user_name", "") # user_pwd = request.args.get("user_pwd", "") # user_is_login = User.query.filter_by(user_name=user_name, user_password=<PASSWORD>).first() # if user_is_login: # index_meg = "登陆成功" # print("登陆成功") # return render_template("login_ok.html", index_meg=index_meg) # else: # # index_meg = "登陆失败" # print("登陆失败") | 2.72872 | 3 |
src/etc/gec/3.py | iml1111/algorithm-study | 0 | 8115 | from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | from collections import deque
def solution(N, bus_stop):
answer = [[1300 for _ in range(N)] for _ in range(N)]
bus_stop = [(x-1, y-1) for x,y in bus_stop]
q = deque(bus_stop)
for x,y in bus_stop:
answer[x][y] = 0
while q:
x, y = q.popleft()
for nx, ny in ((x-1, y), (x+1, y), (x, y+1), (x, y-1)):
if (
0 <= nx < N and 0 <= ny < N
and answer[nx][ny] > answer[x][y]
):
answer[nx][ny] = answer[x][y] + 1
q.append((nx, ny))
return answer
if __name__ == '__main__':
print(solution(
3, [[1,2],[3,3]],
)) | none | 1 | 3.29523 | 3 |
|
python/tree/0103_binary_tree_zigzag_level_order_traversal.py | linshaoyong/leetcode | 6 | 8116 | <gh_stars>1-10
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
a = [root]
b = []
c = []
r = [[root.val]]
i = 1
while True:
for n in a:
if n.left:
b.append(n.left)
c.append(n.left.val)
if n.right:
b.append(n.right)
c.append(n.right.val)
if not b:
break
else:
a = b
if i & 1 == 1:
c.reverse()
r.append(c)
b = []
c = []
i += 1
return r
def test_zigzag_level_order():
a = TreeNode(3)
b = TreeNode(9)
c = TreeNode(20)
d = TreeNode(15)
e = TreeNode(7)
a.left = b
a.right = c
c.left = d
c.right = e
assert Solution().zigzagLevelOrder(a) == [
[3],
[20, 9],
[15, 7]
]
| class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
a = [root]
b = []
c = []
r = [[root.val]]
i = 1
while True:
for n in a:
if n.left:
b.append(n.left)
c.append(n.left.val)
if n.right:
b.append(n.right)
c.append(n.right.val)
if not b:
break
else:
a = b
if i & 1 == 1:
c.reverse()
r.append(c)
b = []
c = []
i += 1
return r
def test_zigzag_level_order():
a = TreeNode(3)
b = TreeNode(9)
c = TreeNode(20)
d = TreeNode(15)
e = TreeNode(7)
a.left = b
a.right = c
c.left = d
c.right = e
assert Solution().zigzagLevelOrder(a) == [
[3],
[20, 9],
[15, 7]
] | en | 0.171152 | :type root: TreeNode :rtype: List[List[int]] | 3.713916 | 4 |
plaso/parsers/winreg_plugins/usbstor.py | berggren/plaso | 2 | 8117 | <filename>plaso/parsers/winreg_plugins/usbstor.py
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
from __future__ import unicode_literals
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class USBStorEventData(events.EventData):
"""USBStor event data attribute container.
Attributes:
device_type (str): type of USB device.
display_name (str): display name of the USB device.
key_path (str): Windows Registry key path.
parent_id_prefix (str): parent identifier prefix of the USB device.
product (str): product of the USB device.
serial (str): serial number of the USB device.
revision (str): revision number of the USB device.
subkey_name (str): name of the Windows Registry subkey.
vendor (str): vendor of the USB device.
"""
DATA_TYPE = 'windows:registry:usbstor'
def __init__(self):
"""Initializes event data."""
super(USBStorEventData, self).__init__(data_type=self.DATA_TYPE)
self.device_type = None
self.display_name = None
self.key_path = None
self.parent_id_prefix = None
self.product = None
self.revision = None
self.serial = None
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = None
self.vendor = None
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin.
Also see:
http://www.forensicswiki.org/wiki/USB_History_Viewing
"""
NAME = 'windows_usbstor_devices'
DESCRIPTION = 'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
for subkey in registry_key.GetSubkeys():
subkey_name = subkey.name
name_values = subkey_name.split('&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey_name))
event_data = USBStorEventData()
event_data.key_path = registry_key.path
event_data.subkey_name = subkey_name
if number_of_name_values >= 1:
event_data.device_type = name_values[0]
if number_of_name_values >= 2:
event_data.vendor = name_values[1]
if number_of_name_values >= 3:
event_data.product = name_values[2]
if number_of_name_values >= 4:
event_data.revision = name_values[3]
if subkey.number_of_subkeys == 0:
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
event_data.serial = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
event_data.display_name = friendly_name_value.GetDataAsObject()
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
event_data.parent_id_prefix = parent_id_prefix_value.GetDataAsObject()
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| <filename>plaso/parsers/winreg_plugins/usbstor.py
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
from __future__ import unicode_literals
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class USBStorEventData(events.EventData):
"""USBStor event data attribute container.
Attributes:
device_type (str): type of USB device.
display_name (str): display name of the USB device.
key_path (str): Windows Registry key path.
parent_id_prefix (str): parent identifier prefix of the USB device.
product (str): product of the USB device.
serial (str): serial number of the USB device.
revision (str): revision number of the USB device.
subkey_name (str): name of the Windows Registry subkey.
vendor (str): vendor of the USB device.
"""
DATA_TYPE = 'windows:registry:usbstor'
def __init__(self):
"""Initializes event data."""
super(USBStorEventData, self).__init__(data_type=self.DATA_TYPE)
self.device_type = None
self.display_name = None
self.key_path = None
self.parent_id_prefix = None
self.product = None
self.revision = None
self.serial = None
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = None
self.vendor = None
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin.
Also see:
http://www.forensicswiki.org/wiki/USB_History_Viewing
"""
NAME = 'windows_usbstor_devices'
DESCRIPTION = 'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
for subkey in registry_key.GetSubkeys():
subkey_name = subkey.name
name_values = subkey_name.split('&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey_name))
event_data = USBStorEventData()
event_data.key_path = registry_key.path
event_data.subkey_name = subkey_name
if number_of_name_values >= 1:
event_data.device_type = name_values[0]
if number_of_name_values >= 2:
event_data.vendor = name_values[1]
if number_of_name_values >= 3:
event_data.product = name_values[2]
if number_of_name_values >= 4:
event_data.revision = name_values[3]
if subkey.number_of_subkeys == 0:
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
event_data.serial = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
event_data.display_name = friendly_name_value.GetDataAsObject()
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
event_data.parent_id_prefix = parent_id_prefix_value.GetDataAsObject()
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| en | 0.739674 | # -*- coding: utf-8 -*- File containing a Windows Registry plugin to parse the USBStor key. USBStor event data attribute container. Attributes: device_type (str): type of USB device. display_name (str): display name of the USB device. key_path (str): Windows Registry key path. parent_id_prefix (str): parent identifier prefix of the USB device. product (str): product of the USB device. serial (str): serial number of the USB device. revision (str): revision number of the USB device. subkey_name (str): name of the Windows Registry subkey. vendor (str): vendor of the USB device. Initializes event data. # TODO: rename subkey_name to something that closer matches its purpose. USBStor key plugin. Also see: http://www.forensicswiki.org/wiki/USB_History_Viewing Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. # Normally we expect 4 fields here however that is not always the case. # Time last USB device of this class was first inserted. # ParentIdPrefix applies to Windows XP Only. # Time last USB device of this class was first inserted. # Win7 - Last Connection. # Vista/XP - Time of an insert. | 2.25435 | 2 |
damn_vulnerable_python/evil.py | CodyKochmann/damn_vulnerable_python | 1 | 8118 | ''' static analyzers are annoying so lets rename eval '''
evil = eval
| ''' static analyzers are annoying so lets rename eval '''
evil = eval
| en | 0.786376 | static analyzers are annoying so lets rename eval | 1.16442 | 1 |
rltoolkit/rltoolkit/acm/off_policy/ddpg_acm.py | MIMUW-RL/spp-rl | 7 | 8119 | import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit.acm.off_policy import AcMOffPolicy
from rltoolkit.algorithms import DDPG
from rltoolkit.algorithms.ddpg.models import Actor, Critic
class DDPG_AcM(AcMOffPolicy, DDPG):
def __init__(
self, unbiased_update: bool = False, custom_loss: float = 0.0,
custom_loss_target: float = 0.0, custom_loss_lr: float = 0.0001,
refill_buffer: bool = False,
lagrangian_custom_loss: bool = False, separate_custom_loss: bool = False,
cw_cl_targets: list = None, custom_loss_target_decay: int = None,
custom_loss_target_dfactor: float = None,
*args, **kwargs,
):
f"""DDPG with AcM class
Args:
unbiased_update (bool, optional): Use next_obs as action for update.
Defaults to { False }.
refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full
Defaults to {False}
"""
super().__init__(*args, **kwargs)
self.unbiased_update = unbiased_update
self.actor = Actor(
self.ob_dim, ac_lim=self.actor_ac_lim, ac_dim=self.actor_output_dim
)
if not self.acm_critic:
self.critic = Critic(self.ob_dim, ac_dim=self.actor_output_dim)
self.custom_loss = custom_loss
custom_loss_scaled = np.log(np.exp(custom_loss) - 1)
self.custom_loss_param = torch.tensor(custom_loss_scaled) if not separate_custom_loss else torch.Tensor([custom_loss_scaled] * self.actor_output_dim)
self.custom_loss_param.requires_grad = lagrangian_custom_loss
self.custom_loss_target = custom_loss_target
self.cw_cl_targets = cw_cl_targets
if lagrangian_custom_loss and cw_cl_targets:
self.custom_loss_target = cw_cl_targets
self.lagrangian_custom_loss = lagrangian_custom_loss
self.custom_loss_lr = custom_loss_lr
self.separate_custom_loss = separate_custom_loss
self.custom_loss_optimizer = self.opt([self.custom_loss_param], lr=custom_loss_lr)
self.refill_buffer = refill_buffer
self.custom_loss_target_decay = custom_loss_target_decay
self.custom_loss_target_dfactor = custom_loss_target_dfactor
if self.custom_loss:
self.loss["ddpg"] = 0.0
self.loss["dist"] = 0.0
if lagrangian_custom_loss:
if self.separate_custom_loss:
self.distances = []
for i in range(self.actor_output_dim):
self.loss[f"custom_loss_param/{i}"] = 0.0
else:
self.loss["custom_loss_param"] = 0.0
new_hparams = {
"hparams/unbiased_update": self.unbiased_update,
"hparams/custom_loss": self.custom_loss,
"hparams/lagrangian_cl": self.lagrangian_custom_loss,
"hparams/custom_loss_target_decay": self.custom_loss_target_decay,
"hparams/custom_loss_target_dfactor": self.custom_loss_target_dfactor,
}
if self.lagrangian_custom_loss:
if self.cw_cl_targets is None:
new_hparams["hparams/cl_target"] = self.custom_loss_target
new_hparams["hparams/cl_lr"] = self.custom_loss_lr
self.hparams_acm.update(new_hparams)
self.hparams.update(self.hparams_acm)
def noise_action(self, obs, act_noise, deterministic=False):
action, _ = self._actor.act(obs, deterministic)
noise = act_noise * torch.randn(self.actor_output_dim, device=self.device)
action += noise * self.actor_ac_lim
action = np.clip(
action.cpu(), -1.1 * self.actor_ac_lim.cpu(), 1.1 * self.actor_ac_lim.cpu()
)
action = action.to(self.device)
if self.denormalize_actor_out:
action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
return action
def custom_loss_target_decay_condition(self):
return(
self.custom_loss_target_decay is not None
and self.custom_loss_target_dfactor is not None
and self.iterations > 0
and self.stats_logger.frames % self.custom_loss_target_decay == 0
)
def acm_update_condition(self):
return (
self.iteration > 0
and self.acm_epochs > 0
and self.stats_logger.frames % self.acm_update_freq == 0
)
def make_unbiased_update(self):
if self.update_condition():
for _ in range(self.grad_steps):
batch = self.replay_buffer.sample_batch(
self.update_batch_size, self.device
)
obs, next_obs, _, reward, done, acm_action = batch
self.update(
obs=obs,
next_obs=next_obs,
action=next_obs,
reward=reward,
done=done,
acm_action=acm_action,
)
def make_update(self):
if self.unbiased_update:
self.make_unbiased_update()
else:
super().make_update()
if self.custom_loss_target_decay_condition():
self.custom_loss_target *= self.custom_loss_target_dfactor
print(f"CUSTOM LOSS TARTGET DECAY, CURRENT VALUE {self.custom_loss_target}")
if self.acm_update_condition():
if self.acm_update_batches:
self.update_acm_batches(self.acm_update_batches)
else:
self.update_acm(self.acm_epochs)
def collect_params_dict(self):
params_dict = super().collect_params_dict()
params_dict["acm"] = self.acm.state_dict()
return params_dict
def apply_params_dict(self, params_dict):
super().apply_params_dict(params_dict)
self.acm.load_state_dict(params_dict["acm"])
def save_model(self, save_path=None):
save_path = DDPG.save_model(self, save_path)
torch.save(self.acm.state_dict(), save_path + "_acm_model.pt")
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
next_action, _ = self.actor_targ(next_obs)
next_action = self.replay_buffer.denormalize(next_action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([next_obs, next_action], axis=1)
next_action = self.acm(acm_obs)
q_target = self.critic_targ(next_obs, next_action)
qfunc_target = reward + self.gamma * (1 - done) * q_target
return qfunc_target
def add_custom_loss(self, loss, action, denorm_action, next_obs):
if self.custom_loss:
self.loss["ddpg"] = loss.item()
if self.norm_closs:
next_obs = self.replay_buffer.normalize(next_obs, force=True)
else:
action = denorm_action
if not self.separate_custom_loss:
loss_dist = F.mse_loss(action, self.cut_obs(next_obs))
self.loss["dist"] = loss_dist.item()
if self.lagrangian_custom_loss:
loss += F.softplus(self.custom_loss_param) * (loss_dist - self.custom_loss_target)
else:
loss += self.custom_loss * loss_dist
if self.custom_loss_target_decay is not None:
self.loss["custom_loss_target"] = self.custom_loss_target
else:
distances = torch.mean(F.mse_loss(action, self.cut_obs(next_obs), reduction='none'), dim=0)
if self.cw_cl_targets is None:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - self.custom_loss_target))
else:
loss += torch.sum(F.softplus(self.custom_loss_param) * (distances - torch.Tensor(self.custom_loss_target)))
self.loss["dist"] = distances.detach()
if self.debug_mode:
for j in range(distances.shape[0]):
self.loss[f"dist/cw/{j}"] = distances[j]
return loss
def compute_pi_loss(self, obs, next_obs):
action, _ = self._actor(obs)
denorm_action = self.replay_buffer.denormalize(action, self.acm_ob_idx)
if self.acm_critic:
acm_obs = torch.cat([obs, denorm_action], axis=1)
critic_action = self.acm(acm_obs)
else:
critic_action = denorm_action
loss = -self._critic(obs, critic_action).mean()
return self.add_custom_loss(loss, action, denorm_action, next_obs)
def update_custom_loss_param_loss(self):
if not self.lagrangian_custom_loss:
return
dist_loss = self.loss["dist"]
if self.cw_cl_targets is None:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - self.custom_loss_target)
else:
loss = -F.softplus(self.custom_loss_param) * (dist_loss - torch.Tensor(self.custom_loss_target))
if self.separate_custom_loss:
for i in range(len(loss)):
self.loss[f"custom_loss_param/{i}"] = loss[i].item()
self.loss["dist"] = torch.mean(self.loss["dist"]).item()
loss = torch.sum(loss)
else:
self.loss["custom_loss_param"] = loss.item()
self.custom_loss_optimizer.zero_grad()
loss.backward()
self.custom_loss_optimizer.step()
def copy_offline_dataset(self, dataset, size):
"""copies the provided offlineRL dataset into the replay buffer.
for the moment assumes D4RL dataset format (a dictionary)
and copies elements one-by-one
"""
i = 0
traj = 0
while i < size:
traj += 1
done = torch.tensor(dataset['timeouts'][i] or dataset['terminals'][i])
obs = torch.tensor(dataset['observations'][i])
prev_idx = self.replay_buffer.add_obs(obs)
i += 1
ep_len = 0
while(not done and i < size):
nextobs = torch.tensor(dataset['observations'][i])
rew = torch.tensor( dataset['rewards'][i] )
done = torch.tensor( dataset['timeouts'][i] or dataset['terminals'][i] )
action = torch.tensor( dataset['actions'][i] )
end = torch.tensor( dataset['terminals'][i] )
next_idx = self.replay_buffer.add_obs(nextobs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, nextobs, rew, done, end
)
self.replay_buffer.add_acm_action(action)
prev_idx = next_idx
i += 1
ep_len += 1
print(f"copied offline dataset with {i} samples, contains {traj} trajectories")
#sets the internal variables according to the provided offline dataset
self.acm_pre_train_samples = i
self.buffer_size = i
self.max_frames = i
self.iterations = i / self.steps_per_epoch
#updates std/dev/min/max parameters of the dataset
self.update_obs_mean_std(self.replay_buffer)
def collect_batch_and_train(self, steps_per_epoch: int, *args, **kwargs):
"""SPP variant of rollouts and collect samples if there is enough samples
in replay buffer use existing samples to perform actor/critic update
otherwise generate new samples till steps_per_epoch number of steps
will be added to the replay buffer
Args:
steps_per_epoch (int): number of samples to collect and train
*args, **kwargs: arguments for make_update
"""
collected = 0
while collected < steps_per_epoch:
# important part,
# when the replay buffer is filled stop generating new frames, just use the existing buffer
# such that the number of used experience in learning is counted correctly
if (self.stats_logger.frames >= self.buffer_size - self.acm_pre_train_samples) and not self.refill_buffer:
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
continue
self.stats_logger.rollouts += 1
obs = self.env.reset()
# end - end of the episode from perspective of the simulation
# done - end of the episode from perspective of the model
end = False
obs = self.process_obs(obs)
prev_idx = self.replay_buffer.add_obs(obs)
ep_len = 0
while not end:
obs = self.replay_buffer.normalize(obs)
if (self.stats_logger.frames > self.acm_pre_train_samples) and (self.stats_logger.frames <= self.acm_pre_train_samples + self.random_frames):
action = self.initial_act(obs)
else:
action = self.noise_action(obs, self.act_noise)
action_proc = self.process_action(action, obs)
prev_obs = obs
obs, rew, done, _ = self.env.step(action_proc)
ep_len += 1
end = True if ep_len == self.max_ep_len else done
done = False if ep_len == self.max_ep_len else done
obs = self.process_obs(obs)
if self.next_obs_diff is not None:
obs = self.compute_next_obs_diff(prev_obs, obs)
next_idx = self.replay_buffer.add_obs(obs)
self.replay_buffer.add_timestep(
prev_idx, next_idx, action, rew, done, end
)
prev_idx = next_idx
self.stats_logger.frames += 1
collected += 1
self.make_update(*args, **kwargs)
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
acm_action: torch.Tensor,
):
"""DDPG update step
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
acm_action (torch.Tensor): tensor of acm actions
"""
for param in self.acm.parameters():
param.requires_grad = False
if self.acm_critic:
action = acm_action
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-function by one step
y_q = self._critic(obs, action)
loss_q = F.mse_loss(y_q, y)
self.loss["critic"] = loss_q.item()
self.critic_optimizer.zero_grad()
loss_q.backward()
self.critic_optimizer.step()
# Update policy by one step
self._critic.eval()
loss = self.compute_pi_loss(obs, next_obs)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
#update temperature of Lagrangian optimization obj
self.update_custom_loss_param_loss()
# Update target networks
self.update_target_nets()
self._critic.train()
for param in self.acm.parameters():
param.requires_grad = True
def add_tensorboard_logs(self, buffer, done):
super().add_tensorboard_logs(buffer, done)
if self.lagrangian_custom_loss:
self.tensorboard_writer.log_custom_loss_param(
self.iteration, self.custom_loss_param)
if __name__ == "__main__":
#with torch.cuda.device(0):
model = DDPG_AcM(
# unbiased_update=True,
# custom_loss=True,
# acm_update_batches=50,
# denormalize_actor_out=True,
env_name="Pendulum-v0",
buffer_size=50000,
act_noise=0.05,
iterations=100,
gamma=0.99,
steps_per_epoch=200,
stats_freq=5,
test_episodes=3,
custom_loss=1,
lagrangian_custom_loss=False,
# tensorboard_dir="logs_ddpg",
# tensorboard_comment="",
acm_update_freq=200,
acm_epochs=1,
acm_pre_train_epochs=10,
acm_pre_train_samples=10000,
use_gpu=False,
render=False,
)
model.pre_train()
model.train()
| en | 0.732886 | DDPG with AcM class Args: unbiased_update (bool, optional): Use next_obs as action for update. Defaults to { False }. refill_buffer (bool, optional): if buffer should be refilled with new observations, when its full Defaults to {False} Compute targets for Q-functions Args: reward (torch.Tensor): batch of rewards next_obs (torch.Tensor): batch of next observations done (torch.Tensor): batch of done Returns: torch.Tensor: Q-function targets for the batch copies the provided offlineRL dataset into the replay buffer. for the moment assumes D4RL dataset format (a dictionary) and copies elements one-by-one #sets the internal variables according to the provided offline dataset #updates std/dev/min/max parameters of the dataset SPP variant of rollouts and collect samples if there is enough samples in replay buffer use existing samples to perform actor/critic update otherwise generate new samples till steps_per_epoch number of steps will be added to the replay buffer Args: steps_per_epoch (int): number of samples to collect and train *args, **kwargs: arguments for make_update # important part, # when the replay buffer is filled stop generating new frames, just use the existing buffer # such that the number of used experience in learning is counted correctly # end - end of the episode from perspective of the simulation # done - end of the episode from perspective of the model DDPG update step Args: obs (torch.Tensor): observations tensor next_obs (torch.Tensor): next observations tensor action (torch.Tensor): actions tensor reward (torch.Tensor): rewards tensor done (torch.Tensor): dones tensor acm_action (torch.Tensor): tensor of acm actions # Update Q-function by one step # Update policy by one step #update temperature of Lagrangian optimization obj # Update target networks #with torch.cuda.device(0): # unbiased_update=True, # custom_loss=True, # acm_update_batches=50, # denormalize_actor_out=True, # tensorboard_dir="logs_ddpg", # tensorboard_comment="", | 2.145742 | 2 |
pyroute/poi_osm.py | ftrimble/route-grower | 0 | 8120 | #!/usr/bin/python
#----------------------------------------------------------------
# OSM POI handler for pyroute
#
#------------------------------------------------------
# Copyright 2007, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
from xml.sax import make_parser, handler
from poi_base import *
import os
from xml.sax._exceptions import SAXParseException
import urllib
class osmPoiModule(poiModule, handler.ContentHandler):
def __init__(self, modules):
poiModule.__init__(self, modules)
self.draw = False
self.loadPOIs("all", "amenity|shop=*")
def loadPOIs(self, name, search):
filename = os.path.join(os.path.dirname(__file__),
"data", "poi_%s.osm" % name)
url = "http://www.informationfreeway.org/api/0.5/node[%s][%s]" %(search,
self.bbox())
if(not os.path.exists(filename)):
print "Downloading POIs from OSM"
urllib.urlretrieve(url, filename)
self.load(filename, os.path.join(os.path.dirname(__file__),
"Setup", "poi.txt"))
def bbox(self):
# TODO: based on location!
return "bbox=-6,48,2.5,61"
def load(self, filename, listfile):
self.filters = []
print "Loading POIs from %s" % listfile
f = open(listfile,"r")
try:
for line in f:
if(len(line) > 1):
text = line.rstrip()
name, filter = text.split('|')
group = poiGroup(name)
self.groups.append(group)
self.filters.append({'name':name,'filter':filter,'group':group})
finally:
f.close()
if(not os.path.exists(filename)):
print "Can't load %s"%filename
return
elif not os.path.getsize(filename):
print "%s is empty"%filename
self.inNode = False
parser = make_parser()
parser.setContentHandler(self)
try:
parser.parse(filename)
except SAXParseException:
print "Error while parsing file"
#TODO: what should now happens?
def startElement(self, name, attrs):
if name == "node":
self.currentNode = { \
'lat': float(attrs.get('lat')),
'lon': float(attrs.get('lon'))}
self.inNode = True
if name == "tag" and self.inNode:
self.currentNode[attrs.get('k')] = attrs.get('v')
def endElement(self, name):
if(name == "node"):
self.storeNode(self.currentNode)
self.inNode = False
def passesFilter(self,n,f):
parts = f.split(';')
matched = True
for part in parts:
k,v = part.split('=',1)
if(n.get(k,'') != v):
matched = False
return(matched)
def storeNode(self, n):
for f in self.filters:
if(self.passesFilter(n,f['filter'])):
x = poi(n['lat'], n['lon'])
x.title = n.get('amenity','') + ': ' + n.get('name', '?')
#print "%s matches %s" % (x.title, f['name'])
f['group'].items.append(x)
def save(self):
# Default filename if none was loaded
if(self.filename == None):
self.filename = os.path.join(os.path.dirname(__file__),
"data", "poi.osm")
self.saveAs(self.filename)
def saveAs(self,filename):
if(filename == None):
return
pass
if __name__ == "__main__":
nodes = osmPoiModule(None)
nodes.sort({'valid':True,'lat':51.3,'lon':-0.2})
#nodes.report()
| #!/usr/bin/python
#----------------------------------------------------------------
# OSM POI handler for pyroute
#
#------------------------------------------------------
# Copyright 2007, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
from xml.sax import make_parser, handler
from poi_base import *
import os
from xml.sax._exceptions import SAXParseException
import urllib
class osmPoiModule(poiModule, handler.ContentHandler):
def __init__(self, modules):
poiModule.__init__(self, modules)
self.draw = False
self.loadPOIs("all", "amenity|shop=*")
def loadPOIs(self, name, search):
filename = os.path.join(os.path.dirname(__file__),
"data", "poi_%s.osm" % name)
url = "http://www.informationfreeway.org/api/0.5/node[%s][%s]" %(search,
self.bbox())
if(not os.path.exists(filename)):
print "Downloading POIs from OSM"
urllib.urlretrieve(url, filename)
self.load(filename, os.path.join(os.path.dirname(__file__),
"Setup", "poi.txt"))
def bbox(self):
# TODO: based on location!
return "bbox=-6,48,2.5,61"
def load(self, filename, listfile):
self.filters = []
print "Loading POIs from %s" % listfile
f = open(listfile,"r")
try:
for line in f:
if(len(line) > 1):
text = line.rstrip()
name, filter = text.split('|')
group = poiGroup(name)
self.groups.append(group)
self.filters.append({'name':name,'filter':filter,'group':group})
finally:
f.close()
if(not os.path.exists(filename)):
print "Can't load %s"%filename
return
elif not os.path.getsize(filename):
print "%s is empty"%filename
self.inNode = False
parser = make_parser()
parser.setContentHandler(self)
try:
parser.parse(filename)
except SAXParseException:
print "Error while parsing file"
#TODO: what should now happens?
def startElement(self, name, attrs):
if name == "node":
self.currentNode = { \
'lat': float(attrs.get('lat')),
'lon': float(attrs.get('lon'))}
self.inNode = True
if name == "tag" and self.inNode:
self.currentNode[attrs.get('k')] = attrs.get('v')
def endElement(self, name):
if(name == "node"):
self.storeNode(self.currentNode)
self.inNode = False
def passesFilter(self,n,f):
parts = f.split(';')
matched = True
for part in parts:
k,v = part.split('=',1)
if(n.get(k,'') != v):
matched = False
return(matched)
def storeNode(self, n):
for f in self.filters:
if(self.passesFilter(n,f['filter'])):
x = poi(n['lat'], n['lon'])
x.title = n.get('amenity','') + ': ' + n.get('name', '?')
#print "%s matches %s" % (x.title, f['name'])
f['group'].items.append(x)
def save(self):
# Default filename if none was loaded
if(self.filename == None):
self.filename = os.path.join(os.path.dirname(__file__),
"data", "poi.osm")
self.saveAs(self.filename)
def saveAs(self,filename):
if(filename == None):
return
pass
if __name__ == "__main__":
nodes = osmPoiModule(None)
nodes.sort({'valid':True,'lat':51.3,'lon':-0.2})
#nodes.report()
| en | 0.729736 | #!/usr/bin/python #---------------------------------------------------------------- # OSM POI handler for pyroute # #------------------------------------------------------ # Copyright 2007, <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------ # TODO: based on location! #TODO: what should now happens? #print "%s matches %s" % (x.title, f['name']) # Default filename if none was loaded #nodes.report() | 2.033282 | 2 |
pelutils/logger.py | peleiden/pelutils | 3 | 8121 | <gh_stars>1-10
from __future__ import annotations
import os
import traceback as tb
from collections import defaultdict
from enum import IntEnum
from functools import update_wrapper
from itertools import chain
from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional
from pelutils import get_timestamp, get_repo
from .format import RichString
class LogLevels(IntEnum):
""" Logging levels by priority. Don't set any to 0, as falsiness is used in the code """
SECTION = 6
CRITICAL = 5
ERROR = 4
WARNING = 3
INFO = 2
DEBUG = 1
_STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING }
# https://rich.readthedocs.io/en/stable/appendix/colors.html
_TIMESTAMP_COLOR = "#72b9e0"
_LEVEL_FORMAT = {
LogLevels.SECTION: "bright_yellow",
LogLevels.CRITICAL: "red1",
LogLevels.ERROR: "red3",
LogLevels.WARNING: "gold3",
LogLevels.INFO: "chartreuse3",
LogLevels.DEBUG: "deep_sky_blue1",
}
class _LevelManager:
"""
Used for disabling logging below a certain level
Example:
with log.level(Levels.WARNING):
log.error("This will be logged")
log.info("This will not be logged")
"""
level: LogLevels
is_active = False
def with_level(self, level: LogLevels | int) -> _LevelManager:
self.level = level
return self
def __enter__(self):
self.is_active = True
def __exit__(self, *args):
self.is_active = False
del self.level # Prevent silent failures by having level accidentally set
class _LogErrors:
"""
Used for catching exceptions with logger and logging them before reraising them
"""
def __init__(self, log):
self._log = log
def __enter__(self):
pass
def __exit__(self, et, ev, tb_):
if et and self._log._collect:
self._log.log_collected()
if et:
self._log._throw(ev, tb_)
class LoggingException(RuntimeError):
pass
class _Logger:
"""
A simple logger which creates a log file and pushes strings both to stdout and the log file
Sections, verbosity and error logging is supported
"""
_loggers: DefaultDict[str, dict[str, Any]]
_selected_logger: str
_maxlen = max(len(l.name) for l in LogLevels)
_spacing = 4 * " "
_yes = { "j", "y" }
_no = { "n" }
@property
def _logger(self) -> dict:
return self._loggers[self._selected_logger]
@property
def _fpath(self) -> str:
return self._logger["fpath"]
@property
def _default_sep(self) -> str:
return self._logger["default_sep"]
@property
def _include_micros(self) -> bool:
return self._logger["include_micros"]
@property
def _print_level(self) -> LogLevels:
return self._logger["print_level"]
@property
def _level_mgr(self) -> _LevelManager:
return self._logger["level_mgr"]
@property
def _level(self) -> LogLevels:
return self._level_mgr.level
def __init__(self):
self._log_errors = _LogErrors(self)
self._collect = False
self._collected_log: list[RichString] = list()
self._collected_print: list[RichString] = list()
self._loggers = defaultdict(dict)
self.clean()
self.configure(logger_name="print_only", print_level=LogLevels.DEBUG)
def configure(
self,
fpath: Optional[str] = None, # Path to place logger. Any missing directories are created
title: Optional[str] = None, # Title on first line of logfile
default_seperator = "\n",
include_micros = False, # Include microseconds in timestamps
log_commit = False, # Log commit of git repository
logger_name = "default", # Name of logger
append = False, # Set to True to append to old log file instead of overwriting it
print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print
):
""" Configure a logger. If not called, the logger will act like a print statement """
if logger_name in self._loggers:
raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name)
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger_name
self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None
self._loggers[logger_name]["default_sep"] = default_seperator
self._loggers[logger_name]["include_micros"] = include_micros
self._loggers[logger_name]["level_mgr"] = _LevelManager()
self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1
if fpath is not None:
dirs = os.path.split(fpath)[0]
if dirs:
os.makedirs(dirs, exist_ok=True)
exists = os.path.exists(fpath)
with open(fpath, "a" if append else "w", encoding="utf-8") as logfile:
logfile.write("\n\n" if append and exists else "")
if title is not None:
self.section(title + "\n")
if log_commit:
repo, commit = get_repo()
if repo is not None:
self.debug(
"Executing in repository %s" % repo,
"Commit: %s\n" % commit,
)
else:
self.debug("Unable to find repository that code was executed in")
def set_logger(self, logger: str):
if logger not in self._loggers:
raise LoggingException("Logger '%s' does not exist. Available loggers: %s" % (logger, list(self._loggers)))
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger
def level(self, level: LogLevels):
""" Log only at given level and above. Use with a with block """
return self._level_mgr.with_level(level)
@property
def no_log(self):
""" Disable logging inside a with block """
return self._level_mgr.with_level(max(LogLevels)+1)
@property
def log_errors(self):
return self._log_errors
def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO):
self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print)
def _write_to_log(self, content: RichString):
if self._fpath is not None:
with open(self._fpath, "a", encoding="utf-8") as logfile:
logfile.write(f"{content}\n")
@staticmethod
def _format(s: str, format: str) -> str:
return f"[{format}]{s}[/]"
def _log(self, *tolog, level=LogLevels.INFO, with_info=True, sep=None, with_print=None):
if not self._loggers:
return
if self._level_mgr.is_active and level < self._level_mgr.level:
return
sep = sep or self._default_sep
with_print = level >= self._print_level if with_print is None else with_print
time = get_timestamp()
tolog = sep.join([str(x) for x in tolog])
time_spaces = len(time) * " "
level_format = level.name + (self._maxlen - len(level.name)) * " "
space = self._spacing + self._maxlen * " " + self._spacing
logs = tolog.split("\n")
rs = RichString(stderr=level in _STDERR_LEVELS) # Send warning
if with_info and tolog:
rs.add_string(
f"{time}{self._spacing}{level_format}{self._spacing}",
self._format(time, _TIMESTAMP_COLOR) +\
self._spacing +\
self._format(level_format, _LEVEL_FORMAT[level]) +\
self._spacing,
)
rs.add_string(logs[0])
else:
rs.add_string(f"{time_spaces}{space}{logs[0]}".rstrip())
for i in range(1, len(logs)):
s = f"\n{time_spaces}{space}{logs[i]}".rstrip()
rs.add_string(
s if s.strip() else "\n"
)
if not self._collect:
self._write_to_log(rs)
if with_print:
rs.print()
else:
self._collected_log.append(rs)
if with_print:
self._collected_print.append(rs)
def _format_tb(self, error: Exception, tb_) -> list[str]:
stack = list(chain.from_iterable([elem.split("\n") for elem in tb.format_tb(tb_)]))
stack = [line for line in stack if line.strip()]
return [
"ERROR: %s thrown with stacktrace" % type(error).__name__,
*stack,
"%s: %s" % (type(error).__name__, error),
]
def _throw(self, error: Exception, tb_=None):
stack = list()
has_cause = error.__cause__ is not None
cur_error = error.__context__
while cur_error:
stack += self._format_tb(cur_error, cur_error.__traceback__)
if has_cause:
stack += ["", "The above exception was the direct cause of the following exception:", ""]
else:
stack += ["", "During handling of the above exception, another exception occurred:", ""]
has_cause = cur_error.__cause__ is not None
cur_error = cur_error.__context__
stack += self._format_tb(error, tb_)
self.critical(*stack, with_print=False)
raise error
def _input(self, prompt: str) -> str:
self.info("Prompt: '%s'" % prompt, with_print=False)
response = input(prompt)
self.info("Input: '%s'" % response, with_print=False)
return response
def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]:
"""
Get user input and log both prompt an input
If prompt is an iterable, a generator of user inputs will be returned
"""
self._log("Waiting for user input", with_print=False)
if isinstance(prompt, str):
return self._input(prompt)
else:
return (self._input(p) for p in prompt)
@classmethod
def bool_input(cls, inp: str, default=True) -> bool:
""" Parse a yes/no user input """
inp = inp.lower()
if default:
return inp[0] not in cls._no if inp else True
else:
return inp[0] in cls._yes if inp else False
def _reset_collected(self):
self._collected_log = list()
self._collected_print = list()
def set_collect_mode(self, collect: bool):
self._collect = collect
if not collect:
self._reset_collected()
def log_collected(self):
if self._collected_log:
logs = "\n".join(str(log) for log in self._collected_log)
self._write_to_log(logs)
if self._collected_print:
RichString.multiprint(self._collected_print)
def clean(self):
""" Resets the loggers and removes all existing logger configurations """
self._loggers = defaultdict(dict)
self._selected_logger = "default"
def section(self, *tolog, with_info=True, sep=None, with_print=None, newline=True):
if newline:
self._log("")
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.SECTION)
def critical(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.CRITICAL)
def error(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.ERROR)
def warning(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.WARNING)
def info(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.INFO)
def debug(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.DEBUG)
log = _Logger()
class collect_logs:
"""
Wrap functions with this class to have them output all their output at once
Useful with multiprocessing, e.g.
```
with mp.Pool() as p:
p.map(collect_logs(fun), ...)
```
Loggers cannot be changed or configured during this
"""
def __init__(self, fun: Callable):
self.fun = fun
update_wrapper(self, fun)
def __call__(self, *args, **kwargs):
log.set_collect_mode(True)
return_value = self.fun(*args, **kwargs)
log.log_collected()
log.set_collect_mode(False)
return return_value
| from __future__ import annotations
import os
import traceback as tb
from collections import defaultdict
from enum import IntEnum
from functools import update_wrapper
from itertools import chain
from typing import Any, Callable, DefaultDict, Generator, Iterable, Optional
from pelutils import get_timestamp, get_repo
from .format import RichString
class LogLevels(IntEnum):
""" Logging levels by priority. Don't set any to 0, as falsiness is used in the code """
SECTION = 6
CRITICAL = 5
ERROR = 4
WARNING = 3
INFO = 2
DEBUG = 1
_STDERR_LEVELS = { LogLevels.CRITICAL, LogLevels.ERROR, LogLevels.WARNING }
# https://rich.readthedocs.io/en/stable/appendix/colors.html
_TIMESTAMP_COLOR = "#72b9e0"
_LEVEL_FORMAT = {
LogLevels.SECTION: "bright_yellow",
LogLevels.CRITICAL: "red1",
LogLevels.ERROR: "red3",
LogLevels.WARNING: "gold3",
LogLevels.INFO: "chartreuse3",
LogLevels.DEBUG: "deep_sky_blue1",
}
class _LevelManager:
"""
Used for disabling logging below a certain level
Example:
with log.level(Levels.WARNING):
log.error("This will be logged")
log.info("This will not be logged")
"""
level: LogLevels
is_active = False
def with_level(self, level: LogLevels | int) -> _LevelManager:
self.level = level
return self
def __enter__(self):
self.is_active = True
def __exit__(self, *args):
self.is_active = False
del self.level # Prevent silent failures by having level accidentally set
class _LogErrors:
"""
Used for catching exceptions with logger and logging them before reraising them
"""
def __init__(self, log):
self._log = log
def __enter__(self):
pass
def __exit__(self, et, ev, tb_):
if et and self._log._collect:
self._log.log_collected()
if et:
self._log._throw(ev, tb_)
class LoggingException(RuntimeError):
pass
class _Logger:
"""
A simple logger which creates a log file and pushes strings both to stdout and the log file
Sections, verbosity and error logging is supported
"""
_loggers: DefaultDict[str, dict[str, Any]]
_selected_logger: str
_maxlen = max(len(l.name) for l in LogLevels)
_spacing = 4 * " "
_yes = { "j", "y" }
_no = { "n" }
@property
def _logger(self) -> dict:
return self._loggers[self._selected_logger]
@property
def _fpath(self) -> str:
return self._logger["fpath"]
@property
def _default_sep(self) -> str:
return self._logger["default_sep"]
@property
def _include_micros(self) -> bool:
return self._logger["include_micros"]
@property
def _print_level(self) -> LogLevels:
return self._logger["print_level"]
@property
def _level_mgr(self) -> _LevelManager:
return self._logger["level_mgr"]
@property
def _level(self) -> LogLevels:
return self._level_mgr.level
def __init__(self):
self._log_errors = _LogErrors(self)
self._collect = False
self._collected_log: list[RichString] = list()
self._collected_print: list[RichString] = list()
self._loggers = defaultdict(dict)
self.clean()
self.configure(logger_name="print_only", print_level=LogLevels.DEBUG)
def configure(
self,
fpath: Optional[str] = None, # Path to place logger. Any missing directories are created
title: Optional[str] = None, # Title on first line of logfile
default_seperator = "\n",
include_micros = False, # Include microseconds in timestamps
log_commit = False, # Log commit of git repository
logger_name = "default", # Name of logger
append = False, # Set to True to append to old log file instead of overwriting it
print_level = LogLevels.INFO, # Highest level that will be printed. All will be logged. None for no print
):
""" Configure a logger. If not called, the logger will act like a print statement """
if logger_name in self._loggers:
raise LoggingException("Logger '%s' already exists. Did you call log.configure(...) twice?" % logger_name)
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger_name
self._loggers[logger_name]["fpath"] = os.path.realpath(fpath) if fpath else None
self._loggers[logger_name]["default_sep"] = default_seperator
self._loggers[logger_name]["include_micros"] = include_micros
self._loggers[logger_name]["level_mgr"] = _LevelManager()
self._loggers[logger_name]["print_level"] = print_level or len(LogLevels) + 1
if fpath is not None:
dirs = os.path.split(fpath)[0]
if dirs:
os.makedirs(dirs, exist_ok=True)
exists = os.path.exists(fpath)
with open(fpath, "a" if append else "w", encoding="utf-8") as logfile:
logfile.write("\n\n" if append and exists else "")
if title is not None:
self.section(title + "\n")
if log_commit:
repo, commit = get_repo()
if repo is not None:
self.debug(
"Executing in repository %s" % repo,
"Commit: %s\n" % commit,
)
else:
self.debug("Unable to find repository that code was executed in")
def set_logger(self, logger: str):
if logger not in self._loggers:
raise LoggingException("Logger '%s' does not exist. Available loggers: %s" % (logger, list(self._loggers)))
if self._collect:
raise LoggingException("Cannot configure a new logger while using collect_logs")
self._selected_logger = logger
def level(self, level: LogLevels):
""" Log only at given level and above. Use with a with block """
return self._level_mgr.with_level(level)
@property
def no_log(self):
""" Disable logging inside a with block """
return self._level_mgr.with_level(max(LogLevels)+1)
@property
def log_errors(self):
return self._log_errors
def __call__(self, *tolog, with_info=True, sep=None, with_print=None, level: LogLevels=LogLevels.INFO):
self._log(*tolog, level=level, with_info=with_info, sep=sep, with_print=with_print)
def _write_to_log(self, content: RichString):
if self._fpath is not None:
with open(self._fpath, "a", encoding="utf-8") as logfile:
logfile.write(f"{content}\n")
@staticmethod
def _format(s: str, format: str) -> str:
return f"[{format}]{s}[/]"
def _log(self, *tolog, level=LogLevels.INFO, with_info=True, sep=None, with_print=None):
if not self._loggers:
return
if self._level_mgr.is_active and level < self._level_mgr.level:
return
sep = sep or self._default_sep
with_print = level >= self._print_level if with_print is None else with_print
time = get_timestamp()
tolog = sep.join([str(x) for x in tolog])
time_spaces = len(time) * " "
level_format = level.name + (self._maxlen - len(level.name)) * " "
space = self._spacing + self._maxlen * " " + self._spacing
logs = tolog.split("\n")
rs = RichString(stderr=level in _STDERR_LEVELS) # Send warning
if with_info and tolog:
rs.add_string(
f"{time}{self._spacing}{level_format}{self._spacing}",
self._format(time, _TIMESTAMP_COLOR) +\
self._spacing +\
self._format(level_format, _LEVEL_FORMAT[level]) +\
self._spacing,
)
rs.add_string(logs[0])
else:
rs.add_string(f"{time_spaces}{space}{logs[0]}".rstrip())
for i in range(1, len(logs)):
s = f"\n{time_spaces}{space}{logs[i]}".rstrip()
rs.add_string(
s if s.strip() else "\n"
)
if not self._collect:
self._write_to_log(rs)
if with_print:
rs.print()
else:
self._collected_log.append(rs)
if with_print:
self._collected_print.append(rs)
def _format_tb(self, error: Exception, tb_) -> list[str]:
stack = list(chain.from_iterable([elem.split("\n") for elem in tb.format_tb(tb_)]))
stack = [line for line in stack if line.strip()]
return [
"ERROR: %s thrown with stacktrace" % type(error).__name__,
*stack,
"%s: %s" % (type(error).__name__, error),
]
def _throw(self, error: Exception, tb_=None):
stack = list()
has_cause = error.__cause__ is not None
cur_error = error.__context__
while cur_error:
stack += self._format_tb(cur_error, cur_error.__traceback__)
if has_cause:
stack += ["", "The above exception was the direct cause of the following exception:", ""]
else:
stack += ["", "During handling of the above exception, another exception occurred:", ""]
has_cause = cur_error.__cause__ is not None
cur_error = cur_error.__context__
stack += self._format_tb(error, tb_)
self.critical(*stack, with_print=False)
raise error
def _input(self, prompt: str) -> str:
self.info("Prompt: '%s'" % prompt, with_print=False)
response = input(prompt)
self.info("Input: '%s'" % response, with_print=False)
return response
def input(self, prompt: str | Iterable[str] = "") -> str | Generator[str]:
"""
Get user input and log both prompt an input
If prompt is an iterable, a generator of user inputs will be returned
"""
self._log("Waiting for user input", with_print=False)
if isinstance(prompt, str):
return self._input(prompt)
else:
return (self._input(p) for p in prompt)
@classmethod
def bool_input(cls, inp: str, default=True) -> bool:
""" Parse a yes/no user input """
inp = inp.lower()
if default:
return inp[0] not in cls._no if inp else True
else:
return inp[0] in cls._yes if inp else False
def _reset_collected(self):
self._collected_log = list()
self._collected_print = list()
def set_collect_mode(self, collect: bool):
self._collect = collect
if not collect:
self._reset_collected()
def log_collected(self):
if self._collected_log:
logs = "\n".join(str(log) for log in self._collected_log)
self._write_to_log(logs)
if self._collected_print:
RichString.multiprint(self._collected_print)
def clean(self):
""" Resets the loggers and removes all existing logger configurations """
self._loggers = defaultdict(dict)
self._selected_logger = "default"
def section(self, *tolog, with_info=True, sep=None, with_print=None, newline=True):
if newline:
self._log("")
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.SECTION)
def critical(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.CRITICAL)
def error(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.ERROR)
def warning(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.WARNING)
def info(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.INFO)
def debug(self, *tolog, with_info=True, sep=None, with_print=None):
self._log(*tolog, with_info=with_info, sep=sep, with_print=with_print, level=LogLevels.DEBUG)
log = _Logger()
class collect_logs:
"""
Wrap functions with this class to have them output all their output at once
Useful with multiprocessing, e.g.
```
with mp.Pool() as p:
p.map(collect_logs(fun), ...)
```
Loggers cannot be changed or configured during this
"""
def __init__(self, fun: Callable):
self.fun = fun
update_wrapper(self, fun)
def __call__(self, *args, **kwargs):
log.set_collect_mode(True)
return_value = self.fun(*args, **kwargs)
log.log_collected()
log.set_collect_mode(False)
return return_value | en | 0.846948 | Logging levels by priority. Don't set any to 0, as falsiness is used in the code # https://rich.readthedocs.io/en/stable/appendix/colors.html Used for disabling logging below a certain level Example: with log.level(Levels.WARNING): log.error("This will be logged") log.info("This will not be logged") # Prevent silent failures by having level accidentally set Used for catching exceptions with logger and logging them before reraising them A simple logger which creates a log file and pushes strings both to stdout and the log file Sections, verbosity and error logging is supported # Path to place logger. Any missing directories are created # Title on first line of logfile # Include microseconds in timestamps # Log commit of git repository # Name of logger # Set to True to append to old log file instead of overwriting it # Highest level that will be printed. All will be logged. None for no print Configure a logger. If not called, the logger will act like a print statement Log only at given level and above. Use with a with block Disable logging inside a with block # Send warning Get user input and log both prompt an input If prompt is an iterable, a generator of user inputs will be returned Parse a yes/no user input Resets the loggers and removes all existing logger configurations Wrap functions with this class to have them output all their output at once Useful with multiprocessing, e.g. ``` with mp.Pool() as p: p.map(collect_logs(fun), ...) ``` Loggers cannot be changed or configured during this | 2.378285 | 2 |
tests/test_metrics.py | aaxelb/django-elasticsearch-metrics | 5 | 8122 | <gh_stars>1-10
import mock
import pytest
import datetime as dt
from django.utils import timezone
from elasticsearch_metrics import metrics
from elasticsearch_dsl import IndexTemplate
from elasticsearch_metrics import signals
from elasticsearch_metrics.exceptions import (
IndexTemplateNotFoundError,
IndexTemplateOutOfSyncError,
)
from tests.dummyapp.metrics import (
DummyMetric,
DummyMetricWithExplicitTemplateName,
DummyMetricWithExplicitTemplatePattern,
)
class PreprintView(metrics.Metric):
provider_id = metrics.Keyword(index=True)
user_id = metrics.Keyword(index=True)
preprint_id = metrics.Keyword(index=True)
class Index:
settings = {"refresh_interval": "-1"}
class Meta:
app_label = "dummyapp"
template_name = "osf_metrics_preprintviews"
template = "osf_metrics_preprintviews-*"
class TestGetIndexName:
def test_get_index_name(self):
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020.02.14"
)
def test_get_index_name_respects_date_format_setting(self, settings):
settings.ELASTICSEARCH_METRICS_DATE_FORMAT = "%Y-%m-%d"
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020-02-14"
)
def test_get_index_name_gets_index_for_today_by_default(self):
today = timezone.now().date()
today_formatted = today.strftime("%Y.%m.%d")
assert PreprintView.get_index_name() == "osf_metrics_preprintviews_{}".format(
today_formatted
)
class TestGetIndexTemplate:
def test_get_index_template_returns_template_with_correct_name_and_pattern(self):
template = PreprintView.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "osf_metrics_preprintviews"
assert "osf_metrics_preprintviews-*" in template.to_dict()["index_patterns"]
def test_get_index_template_respects_index_settings(self):
template = PreprintView.get_index_template()
assert template._index.to_dict()["settings"] == {"refresh_interval": "-1"}
def test_get_index_template_creates_template_with_mapping(self):
template = PreprintView.get_index_template()
mappings = template.to_dict()["mappings"]
assert mappings["doc"]["_source"]["enabled"] is False
properties = mappings["doc"]["properties"]
assert "timestamp" in properties
assert properties["timestamp"] == {"doc_values": True, "type": "date"}
assert properties["provider_id"] == {"type": "keyword", "index": True}
assert properties["user_id"] == {"type": "keyword", "index": True}
assert properties["preprint_id"] == {"type": "keyword", "index": True}
# regression test
def test_mappings_are_not_shared(self):
template1 = DummyMetric.get_index_template()
template2 = DummyMetricWithExplicitTemplateName.get_index_template()
assert "my_int" in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" not in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_int" not in template2.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" in template2.to_dict()["mappings"]["doc"]["properties"]
def test_declaring_metric_with_no_app_label_or_template_name_errors(self):
with pytest.raises(RuntimeError):
class BadMetric(metrics.Metric):
pass
with pytest.raises(RuntimeError):
class MyMetric(metrics.Metric):
class Meta:
template_name = "osf_metrics_preprintviews"
def test_get_index_template_default_template_name(self):
template = DummyMetric.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "dummyapp_dummymetric"
assert "dummyapp_dummymetric_*" in template.to_dict()["index_patterns"]
def test_get_index_template_uses_app_label_in_class_meta(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "myapp"
template = MyMetric.get_index_template()
assert template._template_name == "myapp_mymetric"
def test_template_name_defined_with_no_template_falls_back_to_default_template(
self
):
template = DummyMetricWithExplicitTemplateName.get_index_template()
# template name specified in class Meta
assert template._template_name == "dummymetric"
# template is not specified, so it's generated
assert (
"dummyapp_dummymetricwithexplicittemplatename_*"
in template.to_dict()["index_patterns"]
)
def test_template_defined_with_no_template_name_falls_back_to_default_name(self):
template = DummyMetricWithExplicitTemplatePattern.get_index_template()
# template name specified in class Meta
assert (
template._template_name == "dummyapp_dummymetricwithexplicittemplatepattern"
)
# template is not specified, so it's generated
assert "dummymetric-*" in template.to_dict()["index_patterns"]
def test_inheritance(self):
class MyBaseMetric(metrics.Metric):
user_id = metrics.Keyword(index=True)
class Index:
settings = {"number_of_shards": 2}
class Meta:
abstract = True
class ConcreteMetric(MyBaseMetric):
class Meta:
app_label = "dummyapp"
template = ConcreteMetric.get_index_template()
assert template._template_name == "dummyapp_concretemetric"
assert template._index.to_dict()["settings"] == {"number_of_shards": 2}
def test_source_may_be_enabled(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "dummyapp"
template_name = "mymetric"
template = "mymetric-*"
source = metrics.MetaField(enabled=True)
template = MyMetric.get_index_template()
template_dict = template.to_dict()
doc = template_dict["mappings"]["doc"]
assert doc["_source"]["enabled"] is True
class TestRecord:
def test_calls_save(self, mock_save):
timestamp = dt.datetime(2017, 8, 21)
p = PreprintView.record(timestamp=timestamp, provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == timestamp
assert p.provider_id == "abc12"
@mock.patch.object(timezone, "now")
def test_defaults_timestamp_to_now(self, mock_now, mock_save):
fake_now = dt.datetime(2016, 8, 21)
mock_now.return_value = fake_now
p = PreprintView.record(provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == fake_now
class TestSignals:
@mock.patch.object(PreprintView, "get_index_template")
def test_create_metric_sends_signals(self, mock_get_index_template):
mock_pre_index_template_listener = mock.Mock()
mock_post_index_template_listener = mock.Mock()
signals.pre_index_template_create.connect(mock_pre_index_template_listener)
signals.post_index_template_create.connect(mock_post_index_template_listener)
PreprintView.sync_index_template()
assert mock_pre_index_template_listener.call_count == 1
assert mock_post_index_template_listener.call_count == 1
pre_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in pre_call_kwargs
assert "using" in pre_call_kwargs
post_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in post_call_kwargs
assert "using" in post_call_kwargs
def test_save_sends_signals(self, mock_save):
mock_pre_save_listener = mock.Mock()
mock_post_save_listener = mock.Mock()
signals.pre_save.connect(mock_pre_save_listener, sender=PreprintView)
signals.post_save.connect(mock_post_save_listener, sender=PreprintView)
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
assert mock_pre_save_listener.call_count == 1
pre_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(pre_save_kwargs["instance"], PreprintView)
assert "index" in pre_save_kwargs
assert "using" in pre_save_kwargs
assert pre_save_kwargs["sender"] is PreprintView
assert mock_post_save_listener.call_count == 1
post_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(post_save_kwargs["instance"], PreprintView)
assert "index" in post_save_kwargs
assert "using" in post_save_kwargs
assert post_save_kwargs["sender"] is PreprintView
@pytest.mark.es
class TestIntegration:
def test_init(self, client):
PreprintView.init()
name = PreprintView.get_index_name()
mapping = client.indices.get_mapping(index=name)
properties = mapping[name]["mappings"]["doc"]["properties"]
assert properties["timestamp"] == {"type": "date"}
assert properties["provider_id"] == {"type": "keyword"}
assert properties["user_id"] == {"type": "keyword"}
assert properties["preprint_id"] == {"type": "keyword"}
def test_create_document(self, client):
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
document = PreprintView.get(id=doc.meta.id, index=PreprintView.get_index_name())
# TODO flesh out this test more. Try to query ES?
assert document is not None
def test_check_index_template(self):
with pytest.raises(IndexTemplateNotFoundError):
assert PreprintView.check_index_template() is False
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
# When settings change, template is out of sync
PreprintView._index.settings(
**{"refresh_interval": "1s", "number_of_shards": 1, "number_of_replicas": 2}
)
with pytest.raises(IndexTemplateOutOfSyncError) as excinfo:
assert PreprintView.check_index_template() is False
error = excinfo.value
assert error.settings_in_sync is False
assert error.mappings_in_sync is True
assert error.patterns_in_sync is True
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
| import mock
import pytest
import datetime as dt
from django.utils import timezone
from elasticsearch_metrics import metrics
from elasticsearch_dsl import IndexTemplate
from elasticsearch_metrics import signals
from elasticsearch_metrics.exceptions import (
IndexTemplateNotFoundError,
IndexTemplateOutOfSyncError,
)
from tests.dummyapp.metrics import (
DummyMetric,
DummyMetricWithExplicitTemplateName,
DummyMetricWithExplicitTemplatePattern,
)
class PreprintView(metrics.Metric):
provider_id = metrics.Keyword(index=True)
user_id = metrics.Keyword(index=True)
preprint_id = metrics.Keyword(index=True)
class Index:
settings = {"refresh_interval": "-1"}
class Meta:
app_label = "dummyapp"
template_name = "osf_metrics_preprintviews"
template = "osf_metrics_preprintviews-*"
class TestGetIndexName:
def test_get_index_name(self):
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020.02.14"
)
def test_get_index_name_respects_date_format_setting(self, settings):
settings.ELASTICSEARCH_METRICS_DATE_FORMAT = "%Y-%m-%d"
date = dt.date(2020, 2, 14)
assert (
PreprintView.get_index_name(date=date)
== "osf_metrics_preprintviews_2020-02-14"
)
def test_get_index_name_gets_index_for_today_by_default(self):
today = timezone.now().date()
today_formatted = today.strftime("%Y.%m.%d")
assert PreprintView.get_index_name() == "osf_metrics_preprintviews_{}".format(
today_formatted
)
class TestGetIndexTemplate:
def test_get_index_template_returns_template_with_correct_name_and_pattern(self):
template = PreprintView.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "osf_metrics_preprintviews"
assert "osf_metrics_preprintviews-*" in template.to_dict()["index_patterns"]
def test_get_index_template_respects_index_settings(self):
template = PreprintView.get_index_template()
assert template._index.to_dict()["settings"] == {"refresh_interval": "-1"}
def test_get_index_template_creates_template_with_mapping(self):
template = PreprintView.get_index_template()
mappings = template.to_dict()["mappings"]
assert mappings["doc"]["_source"]["enabled"] is False
properties = mappings["doc"]["properties"]
assert "timestamp" in properties
assert properties["timestamp"] == {"doc_values": True, "type": "date"}
assert properties["provider_id"] == {"type": "keyword", "index": True}
assert properties["user_id"] == {"type": "keyword", "index": True}
assert properties["preprint_id"] == {"type": "keyword", "index": True}
# regression test
def test_mappings_are_not_shared(self):
template1 = DummyMetric.get_index_template()
template2 = DummyMetricWithExplicitTemplateName.get_index_template()
assert "my_int" in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" not in template1.to_dict()["mappings"]["doc"]["properties"]
assert "my_int" not in template2.to_dict()["mappings"]["doc"]["properties"]
assert "my_keyword" in template2.to_dict()["mappings"]["doc"]["properties"]
def test_declaring_metric_with_no_app_label_or_template_name_errors(self):
with pytest.raises(RuntimeError):
class BadMetric(metrics.Metric):
pass
with pytest.raises(RuntimeError):
class MyMetric(metrics.Metric):
class Meta:
template_name = "osf_metrics_preprintviews"
def test_get_index_template_default_template_name(self):
template = DummyMetric.get_index_template()
assert isinstance(template, IndexTemplate)
assert template._template_name == "dummyapp_dummymetric"
assert "dummyapp_dummymetric_*" in template.to_dict()["index_patterns"]
def test_get_index_template_uses_app_label_in_class_meta(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "myapp"
template = MyMetric.get_index_template()
assert template._template_name == "myapp_mymetric"
def test_template_name_defined_with_no_template_falls_back_to_default_template(
self
):
template = DummyMetricWithExplicitTemplateName.get_index_template()
# template name specified in class Meta
assert template._template_name == "dummymetric"
# template is not specified, so it's generated
assert (
"dummyapp_dummymetricwithexplicittemplatename_*"
in template.to_dict()["index_patterns"]
)
def test_template_defined_with_no_template_name_falls_back_to_default_name(self):
template = DummyMetricWithExplicitTemplatePattern.get_index_template()
# template name specified in class Meta
assert (
template._template_name == "dummyapp_dummymetricwithexplicittemplatepattern"
)
# template is not specified, so it's generated
assert "dummymetric-*" in template.to_dict()["index_patterns"]
def test_inheritance(self):
class MyBaseMetric(metrics.Metric):
user_id = metrics.Keyword(index=True)
class Index:
settings = {"number_of_shards": 2}
class Meta:
abstract = True
class ConcreteMetric(MyBaseMetric):
class Meta:
app_label = "dummyapp"
template = ConcreteMetric.get_index_template()
assert template._template_name == "dummyapp_concretemetric"
assert template._index.to_dict()["settings"] == {"number_of_shards": 2}
def test_source_may_be_enabled(self):
class MyMetric(metrics.Metric):
class Meta:
app_label = "dummyapp"
template_name = "mymetric"
template = "mymetric-*"
source = metrics.MetaField(enabled=True)
template = MyMetric.get_index_template()
template_dict = template.to_dict()
doc = template_dict["mappings"]["doc"]
assert doc["_source"]["enabled"] is True
class TestRecord:
def test_calls_save(self, mock_save):
timestamp = dt.datetime(2017, 8, 21)
p = PreprintView.record(timestamp=timestamp, provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == timestamp
assert p.provider_id == "abc12"
@mock.patch.object(timezone, "now")
def test_defaults_timestamp_to_now(self, mock_now, mock_save):
fake_now = dt.datetime(2016, 8, 21)
mock_now.return_value = fake_now
p = PreprintView.record(provider_id="abc12")
assert mock_save.call_count == 1
assert p.timestamp == fake_now
class TestSignals:
@mock.patch.object(PreprintView, "get_index_template")
def test_create_metric_sends_signals(self, mock_get_index_template):
mock_pre_index_template_listener = mock.Mock()
mock_post_index_template_listener = mock.Mock()
signals.pre_index_template_create.connect(mock_pre_index_template_listener)
signals.post_index_template_create.connect(mock_post_index_template_listener)
PreprintView.sync_index_template()
assert mock_pre_index_template_listener.call_count == 1
assert mock_post_index_template_listener.call_count == 1
pre_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in pre_call_kwargs
assert "using" in pre_call_kwargs
post_call_kwargs = mock_pre_index_template_listener.call_args[1]
assert "index_template" in post_call_kwargs
assert "using" in post_call_kwargs
def test_save_sends_signals(self, mock_save):
mock_pre_save_listener = mock.Mock()
mock_post_save_listener = mock.Mock()
signals.pre_save.connect(mock_pre_save_listener, sender=PreprintView)
signals.post_save.connect(mock_post_save_listener, sender=PreprintView)
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
assert mock_pre_save_listener.call_count == 1
pre_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(pre_save_kwargs["instance"], PreprintView)
assert "index" in pre_save_kwargs
assert "using" in pre_save_kwargs
assert pre_save_kwargs["sender"] is PreprintView
assert mock_post_save_listener.call_count == 1
post_save_kwargs = mock_pre_save_listener.call_args[1]
assert isinstance(post_save_kwargs["instance"], PreprintView)
assert "index" in post_save_kwargs
assert "using" in post_save_kwargs
assert post_save_kwargs["sender"] is PreprintView
@pytest.mark.es
class TestIntegration:
def test_init(self, client):
PreprintView.init()
name = PreprintView.get_index_name()
mapping = client.indices.get_mapping(index=name)
properties = mapping[name]["mappings"]["doc"]["properties"]
assert properties["timestamp"] == {"type": "date"}
assert properties["provider_id"] == {"type": "keyword"}
assert properties["user_id"] == {"type": "keyword"}
assert properties["preprint_id"] == {"type": "keyword"}
def test_create_document(self, client):
provider_id = "12345"
user_id = "abcde"
preprint_id = "zyxwv"
doc = PreprintView(
provider_id=provider_id, user_id=user_id, preprint_id=preprint_id
)
doc.save()
document = PreprintView.get(id=doc.meta.id, index=PreprintView.get_index_name())
# TODO flesh out this test more. Try to query ES?
assert document is not None
def test_check_index_template(self):
with pytest.raises(IndexTemplateNotFoundError):
assert PreprintView.check_index_template() is False
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True
# When settings change, template is out of sync
PreprintView._index.settings(
**{"refresh_interval": "1s", "number_of_shards": 1, "number_of_replicas": 2}
)
with pytest.raises(IndexTemplateOutOfSyncError) as excinfo:
assert PreprintView.check_index_template() is False
error = excinfo.value
assert error.settings_in_sync is False
assert error.mappings_in_sync is True
assert error.patterns_in_sync is True
PreprintView.sync_index_template()
assert PreprintView.check_index_template() is True | en | 0.611583 | # regression test # template name specified in class Meta # template is not specified, so it's generated # template name specified in class Meta # template is not specified, so it's generated # TODO flesh out this test more. Try to query ES? # When settings change, template is out of sync | 2.082535 | 2 |
6 kyu/SumFibs.py | mwk0408/codewars_solutions | 6 | 8123 | <reponame>mwk0408/codewars_solutions<filename>6 kyu/SumFibs.py
from functools import lru_cache
@lru_cache
def fib(n):
return n if n<2 else fib(n-1)+fib(n-2)
def sum_fibs(n):
return sum(j for j in (fib(i) for i in range(n+1)) if j%2==0) | kyu/SumFibs.py
from functools import lru_cache
@lru_cache
def fib(n):
return n if n<2 else fib(n-1)+fib(n-2)
def sum_fibs(n):
return sum(j for j in (fib(i) for i in range(n+1)) if j%2==0) | none | 1 | 3.757506 | 4 |
|
tests/unit/test_iris_helpers.py | jvegreg/ESMValCore | 0 | 8124 | <filename>tests/unit/test_iris_helpers.py
"""Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.iris_helpers import date2num, var_name_constraint
@pytest.fixture
def cubes():
"""Test cubes."""
cubes = iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
return cubes
@pytest.fixture
def units():
return Unit('days since 0001-01-01', calendar='proleptic_gregorian')
@pytest.mark.parametrize("date, dtype, expected", [
(datetime.datetime(1, 1, 1), np.float64, 0.0),
(datetime.datetime(1, 1, 1), int, 0.0),
(datetime.datetime(1, 1, 2, 12), np.float64, 1.5),
])
def test_date2num_scalar(date, dtype, expected, units):
num = date2num(date, units, dtype=dtype)
assert num == expected
assert num.dtype == dtype
def test_var_name_constraint(cubes):
"""Test :func:`esmvalcore.iris_helpers.var_name_constraint`."""
out_cubes = cubes.extract(var_name_constraint('a'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
])
out_cubes = cubes.extract(var_name_constraint('b'))
assert out_cubes == iris.cube.CubeList([])
out_cubes = cubes.extract(var_name_constraint('c'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('a'))
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('b'))
out_cube = cubes.extract_cube(var_name_constraint('c'))
assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
| <filename>tests/unit/test_iris_helpers.py
"""Tests for :mod:`esmvalcore.iris_helpers`."""
import datetime
import iris
import numpy as np
import pytest
from cf_units import Unit
from esmvalcore.iris_helpers import date2num, var_name_constraint
@pytest.fixture
def cubes():
"""Test cubes."""
cubes = iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
return cubes
@pytest.fixture
def units():
return Unit('days since 0001-01-01', calendar='proleptic_gregorian')
@pytest.mark.parametrize("date, dtype, expected", [
(datetime.datetime(1, 1, 1), np.float64, 0.0),
(datetime.datetime(1, 1, 1), int, 0.0),
(datetime.datetime(1, 1, 2, 12), np.float64, 1.5),
])
def test_date2num_scalar(date, dtype, expected, units):
num = date2num(date, units, dtype=dtype)
assert num == expected
assert num.dtype == dtype
def test_var_name_constraint(cubes):
"""Test :func:`esmvalcore.iris_helpers.var_name_constraint`."""
out_cubes = cubes.extract(var_name_constraint('a'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='a', long_name='a'),
iris.cube.Cube(0.0, var_name='a', long_name='b'),
])
out_cubes = cubes.extract(var_name_constraint('b'))
assert out_cubes == iris.cube.CubeList([])
out_cubes = cubes.extract(var_name_constraint('c'))
assert out_cubes == iris.cube.CubeList([
iris.cube.Cube(0.0, var_name='c', long_name='d'),
])
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('a'))
with pytest.raises(iris.exceptions.ConstraintMismatchError):
cubes.extract_cube(var_name_constraint('b'))
out_cube = cubes.extract_cube(var_name_constraint('c'))
assert out_cube == iris.cube.Cube(0.0, var_name='c', long_name='d')
| en | 0.263058 | Tests for :mod:`esmvalcore.iris_helpers`. Test cubes. Test :func:`esmvalcore.iris_helpers.var_name_constraint`. | 2.531956 | 3 |
geo_regions.py | saeed-moghimi-noaa/Maxelev_plot | 0 | 8125 | <filename>geo_regions.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Geo regions for map plot
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "<EMAIL>"
import matplotlib.pyplot as plt
from collections import defaultdict
defs = defaultdict(dict)
defs['elev']['var'] = 'elev'
defs['elev']['vmin'] = -1
defs['elev']['vmax'] = 1
defs['elev']['label'] = 'Elev. [m]'
defs['elev']['format']= '%3.1g'
defs['elev']['cmap'] = plt.cm.jet_r
def get_region_extent(region = 'hsofs_region'):
if region == 'hsofs_region':
defs['lim']['xmin'] = -99.0
defs['lim']['xmax'] = -52.8
defs['lim']['ymin'] = 5.0
defs['lim']['ymax'] = 46.3
##IKE
elif region == 'caribbean':
defs['lim']['xmin'] = -78.
defs['lim']['xmax'] = -74.
defs['lim']['ymin'] = 20.
defs['lim']['ymax'] = 24.
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -71.
defs['lim']['ymin'] = 18.
defs['lim']['ymax'] = 26.
elif region == 'ike_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -84.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 31.5
elif region == 'caribbean_bigger':
defs['lim']['xmin'] = -78.0
defs['lim']['xmax'] = -58
defs['lim']['ymin'] = 10.0
defs['lim']['ymax'] = 28.
elif region == 'ike_local':
defs['lim']['xmin'] = -96
defs['lim']['xmax'] = -92
defs['lim']['ymin'] = 28.5
defs['lim']['ymax'] = 30.6
elif region == 'ike_wave':
defs['lim']['xmin'] = -95.63
defs['lim']['xmax'] = -88.0
defs['lim']['ymin'] = 28.37
defs['lim']['ymax'] = 30.50
elif region == 'ike_hwm':
defs['lim']['xmin'] = -96.15
defs['lim']['xmax'] = -88.5
defs['lim']['ymin'] = 28.45
defs['lim']['ymax'] = 30.7
elif region == 'ike_galv_bay':
defs['lim']['xmin'] = -95.92
defs['lim']['xmax'] = -94.81
defs['lim']['ymin'] = 29.37
defs['lim']['ymax'] = 29.96
elif region == 'ike_galv_nwm':
defs['lim']['xmin'] = -95.4
defs['lim']['xmax'] = -94.2
defs['lim']['ymin'] = 28.66
defs['lim']['ymax'] = 30.4
elif region == 'ike_wav_break':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -94.5
defs['lim']['ymin'] = 28.7 + 0.6
defs['lim']['ymax'] = 30.4 - 0.6
elif region == 'ike_f63_timeseries':
defs['lim']['xmin'] = -94.2579 - 0.1
defs['lim']['xmax'] = -94.2579 + 0.1
defs['lim']['ymin'] = 29.88642 - 0.1
defs['lim']['ymax'] = 29.88642 + 0.1
elif region == 'ike_f63_timeseries_det':
defs['lim']['xmin'] = -94.2300
defs['lim']['xmax'] = -94.1866
defs['lim']['ymin'] = 29.82030
defs['lim']['ymax'] = 29.84397+0.05
elif region == 'ike_cpl_paper':
defs['lim']['xmin'] = -95.127481
defs['lim']['xmax'] = -93.233053
defs['lim']['ymin'] = 29.198490
defs['lim']['ymax'] = 30.132224
##IRMA
elif region == 'carib_irma':
defs['lim']['xmin'] = -84.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 29.
elif region == 'burbuda':
defs['lim']['xmin'] = -65.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 19.
elif region == 'burbuda_zoom':
defs['lim']['xmin'] = -63.8
defs['lim']['xmax'] = -60.8
defs['lim']['ymin'] = 16.8
defs['lim']['ymax'] = 18.65
elif region == 'puertorico':
defs['lim']['xmin'] = -67.35
defs['lim']['xmax'] = -66.531
defs['lim']['ymin'] = 18.321
defs['lim']['ymax'] = 18.674
elif region == 'puertorico_shore':
defs['lim']['xmin'] = -67.284
defs['lim']['xmax'] = -66.350
defs['lim']['ymin'] = 18.360
defs['lim']['ymax'] = 18.890
elif region == 'key_west':
defs['lim']['xmin'] = -82.7
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.3
defs['lim']['ymax'] = 27.2
elif region == 'key_west_zoom':
defs['lim']['xmin'] = -82.2
defs['lim']['xmax'] = -79.4
defs['lim']['ymin'] = 24.1
defs['lim']['ymax'] = 26.1
elif region == 'cuba_zoom':
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -77.
defs['lim']['ymin'] = 21.5
defs['lim']['ymax'] = 23.5
elif region == 'key_west_timeseries':
defs['lim']['xmin'] = -84.62
defs['lim']['xmax'] = -79.2
defs['lim']['ymin'] = 23.6
defs['lim']['ymax'] = 30.0
elif region == 'pr_timeseries':
defs['lim']['xmin'] = -68
defs['lim']['xmax'] = -64
defs['lim']['ymin'] = 17.3
defs['lim']['ymax'] = 19.2
elif region == 'key_west_anim':
defs['lim']['xmin'] = -85.5
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.0
defs['lim']['ymax'] = 31.5
## ISABEL
elif region == 'isa_region':
defs['lim']['xmin'] = -80.2
defs['lim']['xmax'] = -71.6
defs['lim']['ymin'] = 31.9
defs['lim']['ymax'] = 41.9
elif region == 'isa_local':
defs['lim']['xmin'] = -77.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 34.5
defs['lim']['ymax'] = 40.0
defs['lim']['xmin'] = -78.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 33.5
defs['lim']['ymax'] = 39.5
elif region == 'isa_hwm':
defs['lim']['xmin'] = -76.01
defs['lim']['xmax'] = -75.93
defs['lim']['ymin'] = 36.74
defs['lim']['ymax'] = 36.93
elif region == 'isa_landfall':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 37.5
elif region == 'isa_landfall_zoom':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 36.0
## SANDY
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_area2':
defs['lim']['xmin'] = -75.9
defs['lim']['xmax'] = -73.3
defs['lim']['ymin'] = 38.5
defs['lim']['ymax'] = 41.3
elif region == 'san_newyork':
defs['lim']['xmin'] = -74.5
defs['lim']['xmax'] = -73.55
defs['lim']['ymin'] = 40.35
defs['lim']['ymax'] = 41.2
elif region == 'san_delaware':
defs['lim']['xmin'] = -75.87
defs['lim']['xmax'] = -74.31
defs['lim']['ymin'] = 38.26
defs['lim']['ymax'] = 40.51
elif region == 'san_jamaica_bay':
defs['lim']['xmin'] = -73.963520
defs['lim']['xmax'] = -73.731455
defs['lim']['ymin'] = 40.518074
defs['lim']['ymax'] = 40.699618
elif region == 'irn_region':
defs['lim']['xmin'] = -78.41
defs['lim']['xmax'] = -73.48
defs['lim']['ymin'] = 33.55
defs['lim']['ymax'] = 41.31
elif region == 'irn_hwm':
defs['lim']['xmin'] = -78.64
defs['lim']['xmax'] = -69.54
defs['lim']['ymin'] = 33.80
defs['lim']['ymax'] = 41.82
## ANDREW
elif region == 'and_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -77.5
defs['lim']['ymin'] = 23.
defs['lim']['ymax'] = 32.
elif region == 'and_fl_lu':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -76.5
defs['lim']['ymin'] = 21.
defs['lim']['ymax'] = 32.
elif region == 'and_local_lu':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -86
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 32
elif region == 'and_local_fl':
defs['lim']['xmin'] = -86
defs['lim']['xmax'] = -79.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 34
elif region == 'and_local_lu_landfall':
defs['lim']['xmin'] = -92.4
defs['lim']['xmax'] = -87.5
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 31.
elif region == 'and_local_fl_landfall':
defs['lim']['xmin'] = -80.0
defs['lim']['xmax'] = -80.5
defs['lim']['ymin'] = 25.34
defs['lim']['ymax'] = 25.8
## operational upgrade
# NYC area: -74.027725,40.596099
elif region == 'NYC_area':
defs['lim']['xmin'] = -74.027725 - 0.25
defs['lim']['xmax'] = -74.027725 + 0.25
defs['lim']['ymin'] = 40.596099 - 0.2
defs['lim']['ymax'] = 40.596099 + 0.2
# Tampa area: -82.455511,27.921438
elif region == 'Tampa_area':
defs['lim']['xmin'] = -82.455511 - 0.25
defs['lim']['xmax'] = -82.455511 + 0.25
defs['lim']['ymin'] = 27.921438 - 0.2
defs['lim']['ymax'] = 27.921438 + 0.2
# Marshall Islands: 169.107299,7.906637
elif region == 'Marshall':
defs['lim']['xmin'] = 169.107299 - 0.25
defs['lim']['xmax'] = 169.107299 + 0.25
defs['lim']['ymin'] = 7.906637 - 0.2
defs['lim']['ymax'] = 7.906637 + 0.2
# Palau: 134.461436,7.436438
elif region == 'Palau':
defs['lim']['xmin'] = 134.461436 - 0.25
defs['lim']['xmax'] = 134.461436 + 0.25
defs['lim']['ymin'] = 7.436438 - 0.2
defs['lim']['ymax'] = 7.436438 + 0.2
elif region == 'NYC_Area_m':
defs['lim']['xmin'] = -73.55
defs['lim']['xmax'] = -74.26
defs['lim']['ymin'] = 40.55
defs['lim']['ymax'] = 40.91
elif region == 'Tampa_Area_m':
defs['lim']['xmin'] = -82.37
defs['lim']['xmax'] = -82.75
defs['lim']['ymin'] = 27.63
defs['lim']['ymax'] = 28.05
elif region == 'Marshall_Islands_m':
defs['lim']['xmin'] = 164.92
defs['lim']['xmax'] = 173.45
defs['lim']['ymin'] = 5.10
defs['lim']['ymax'] = 11.90
elif region == 'Palau_m':
defs['lim']['xmin'] = 134.01
defs['lim']['xmax'] = 134.78
defs['lim']['ymin'] = 6.78
defs['lim']['ymax'] = 8.52
elif region == 'Port_Arthur_m':
defs['lim']['xmin'] = -93.60
defs['lim']['xmax'] = -94.24
defs['lim']['ymin'] = 29.62
defs['lim']['ymax'] = 30.14
return defs['lim']
| <filename>geo_regions.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Geo regions for map plot
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "<EMAIL>"
import matplotlib.pyplot as plt
from collections import defaultdict
defs = defaultdict(dict)
defs['elev']['var'] = 'elev'
defs['elev']['vmin'] = -1
defs['elev']['vmax'] = 1
defs['elev']['label'] = 'Elev. [m]'
defs['elev']['format']= '%3.1g'
defs['elev']['cmap'] = plt.cm.jet_r
def get_region_extent(region = 'hsofs_region'):
if region == 'hsofs_region':
defs['lim']['xmin'] = -99.0
defs['lim']['xmax'] = -52.8
defs['lim']['ymin'] = 5.0
defs['lim']['ymax'] = 46.3
##IKE
elif region == 'caribbean':
defs['lim']['xmin'] = -78.
defs['lim']['xmax'] = -74.
defs['lim']['ymin'] = 20.
defs['lim']['ymax'] = 24.
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -71.
defs['lim']['ymin'] = 18.
defs['lim']['ymax'] = 26.
elif region == 'ike_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -84.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 31.5
elif region == 'caribbean_bigger':
defs['lim']['xmin'] = -78.0
defs['lim']['xmax'] = -58
defs['lim']['ymin'] = 10.0
defs['lim']['ymax'] = 28.
elif region == 'ike_local':
defs['lim']['xmin'] = -96
defs['lim']['xmax'] = -92
defs['lim']['ymin'] = 28.5
defs['lim']['ymax'] = 30.6
elif region == 'ike_wave':
defs['lim']['xmin'] = -95.63
defs['lim']['xmax'] = -88.0
defs['lim']['ymin'] = 28.37
defs['lim']['ymax'] = 30.50
elif region == 'ike_hwm':
defs['lim']['xmin'] = -96.15
defs['lim']['xmax'] = -88.5
defs['lim']['ymin'] = 28.45
defs['lim']['ymax'] = 30.7
elif region == 'ike_galv_bay':
defs['lim']['xmin'] = -95.92
defs['lim']['xmax'] = -94.81
defs['lim']['ymin'] = 29.37
defs['lim']['ymax'] = 29.96
elif region == 'ike_galv_nwm':
defs['lim']['xmin'] = -95.4
defs['lim']['xmax'] = -94.2
defs['lim']['ymin'] = 28.66
defs['lim']['ymax'] = 30.4
elif region == 'ike_wav_break':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -94.5
defs['lim']['ymin'] = 28.7 + 0.6
defs['lim']['ymax'] = 30.4 - 0.6
elif region == 'ike_f63_timeseries':
defs['lim']['xmin'] = -94.2579 - 0.1
defs['lim']['xmax'] = -94.2579 + 0.1
defs['lim']['ymin'] = 29.88642 - 0.1
defs['lim']['ymax'] = 29.88642 + 0.1
elif region == 'ike_f63_timeseries_det':
defs['lim']['xmin'] = -94.2300
defs['lim']['xmax'] = -94.1866
defs['lim']['ymin'] = 29.82030
defs['lim']['ymax'] = 29.84397+0.05
elif region == 'ike_cpl_paper':
defs['lim']['xmin'] = -95.127481
defs['lim']['xmax'] = -93.233053
defs['lim']['ymin'] = 29.198490
defs['lim']['ymax'] = 30.132224
##IRMA
elif region == 'carib_irma':
defs['lim']['xmin'] = -84.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 29.
elif region == 'burbuda':
defs['lim']['xmin'] = -65.0
defs['lim']['xmax'] = -60.
defs['lim']['ymin'] = 15.0
defs['lim']['ymax'] = 19.
elif region == 'burbuda_zoom':
defs['lim']['xmin'] = -63.8
defs['lim']['xmax'] = -60.8
defs['lim']['ymin'] = 16.8
defs['lim']['ymax'] = 18.65
elif region == 'puertorico':
defs['lim']['xmin'] = -67.35
defs['lim']['xmax'] = -66.531
defs['lim']['ymin'] = 18.321
defs['lim']['ymax'] = 18.674
elif region == 'puertorico_shore':
defs['lim']['xmin'] = -67.284
defs['lim']['xmax'] = -66.350
defs['lim']['ymin'] = 18.360
defs['lim']['ymax'] = 18.890
elif region == 'key_west':
defs['lim']['xmin'] = -82.7
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.3
defs['lim']['ymax'] = 27.2
elif region == 'key_west_zoom':
defs['lim']['xmin'] = -82.2
defs['lim']['xmax'] = -79.4
defs['lim']['ymin'] = 24.1
defs['lim']['ymax'] = 26.1
elif region == 'cuba_zoom':
defs['lim']['xmin'] = -82.
defs['lim']['xmax'] = -77.
defs['lim']['ymin'] = 21.5
defs['lim']['ymax'] = 23.5
elif region == 'key_west_timeseries':
defs['lim']['xmin'] = -84.62
defs['lim']['xmax'] = -79.2
defs['lim']['ymin'] = 23.6
defs['lim']['ymax'] = 30.0
elif region == 'pr_timeseries':
defs['lim']['xmin'] = -68
defs['lim']['xmax'] = -64
defs['lim']['ymin'] = 17.3
defs['lim']['ymax'] = 19.2
elif region == 'key_west_anim':
defs['lim']['xmin'] = -85.5
defs['lim']['xmax'] = -74.5
defs['lim']['ymin'] = 21.0
defs['lim']['ymax'] = 31.5
## ISABEL
elif region == 'isa_region':
defs['lim']['xmin'] = -80.2
defs['lim']['xmax'] = -71.6
defs['lim']['ymin'] = 31.9
defs['lim']['ymax'] = 41.9
elif region == 'isa_local':
defs['lim']['xmin'] = -77.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 34.5
defs['lim']['ymax'] = 40.0
defs['lim']['xmin'] = -78.5
defs['lim']['xmax'] = -74
defs['lim']['ymin'] = 33.5
defs['lim']['ymax'] = 39.5
elif region == 'isa_hwm':
defs['lim']['xmin'] = -76.01
defs['lim']['xmax'] = -75.93
defs['lim']['ymin'] = 36.74
defs['lim']['ymax'] = 36.93
elif region == 'isa_landfall':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 37.5
elif region == 'isa_landfall_zoom':
defs['lim']['xmin'] = -77.8
defs['lim']['xmax'] = -75.2
defs['lim']['ymin'] = 34.2
defs['lim']['ymax'] = 36.0
## SANDY
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_track':
defs['lim']['xmin'] = -82.0
defs['lim']['xmax'] = -67.0
defs['lim']['ymin'] = 23.0
defs['lim']['ymax'] = 43.6
elif region == 'san_area':
defs['lim']['xmin'] = -77.0
defs['lim']['xmax'] = -70.0
defs['lim']['ymin'] = 37.0
defs['lim']['ymax'] = 42.0
elif region == 'san_area2':
defs['lim']['xmin'] = -75.9
defs['lim']['xmax'] = -73.3
defs['lim']['ymin'] = 38.5
defs['lim']['ymax'] = 41.3
elif region == 'san_newyork':
defs['lim']['xmin'] = -74.5
defs['lim']['xmax'] = -73.55
defs['lim']['ymin'] = 40.35
defs['lim']['ymax'] = 41.2
elif region == 'san_delaware':
defs['lim']['xmin'] = -75.87
defs['lim']['xmax'] = -74.31
defs['lim']['ymin'] = 38.26
defs['lim']['ymax'] = 40.51
elif region == 'san_jamaica_bay':
defs['lim']['xmin'] = -73.963520
defs['lim']['xmax'] = -73.731455
defs['lim']['ymin'] = 40.518074
defs['lim']['ymax'] = 40.699618
elif region == 'irn_region':
defs['lim']['xmin'] = -78.41
defs['lim']['xmax'] = -73.48
defs['lim']['ymin'] = 33.55
defs['lim']['ymax'] = 41.31
elif region == 'irn_hwm':
defs['lim']['xmin'] = -78.64
defs['lim']['xmax'] = -69.54
defs['lim']['ymin'] = 33.80
defs['lim']['ymax'] = 41.82
## ANDREW
elif region == 'and_region':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -77.5
defs['lim']['ymin'] = 23.
defs['lim']['ymax'] = 32.
elif region == 'and_fl_lu':
defs['lim']['xmin'] = -98.5
defs['lim']['xmax'] = -76.5
defs['lim']['ymin'] = 21.
defs['lim']['ymax'] = 32.
elif region == 'and_local_lu':
defs['lim']['xmin'] = -95
defs['lim']['xmax'] = -86
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 32
elif region == 'and_local_fl':
defs['lim']['xmin'] = -86
defs['lim']['xmax'] = -79.5
defs['lim']['ymin'] = 24.
defs['lim']['ymax'] = 34
elif region == 'and_local_lu_landfall':
defs['lim']['xmin'] = -92.4
defs['lim']['xmax'] = -87.5
defs['lim']['ymin'] = 28.
defs['lim']['ymax'] = 31.
elif region == 'and_local_fl_landfall':
defs['lim']['xmin'] = -80.0
defs['lim']['xmax'] = -80.5
defs['lim']['ymin'] = 25.34
defs['lim']['ymax'] = 25.8
## operational upgrade
# NYC area: -74.027725,40.596099
elif region == 'NYC_area':
defs['lim']['xmin'] = -74.027725 - 0.25
defs['lim']['xmax'] = -74.027725 + 0.25
defs['lim']['ymin'] = 40.596099 - 0.2
defs['lim']['ymax'] = 40.596099 + 0.2
# Tampa area: -82.455511,27.921438
elif region == 'Tampa_area':
defs['lim']['xmin'] = -82.455511 - 0.25
defs['lim']['xmax'] = -82.455511 + 0.25
defs['lim']['ymin'] = 27.921438 - 0.2
defs['lim']['ymax'] = 27.921438 + 0.2
# Marshall Islands: 169.107299,7.906637
elif region == 'Marshall':
defs['lim']['xmin'] = 169.107299 - 0.25
defs['lim']['xmax'] = 169.107299 + 0.25
defs['lim']['ymin'] = 7.906637 - 0.2
defs['lim']['ymax'] = 7.906637 + 0.2
# Palau: 134.461436,7.436438
elif region == 'Palau':
defs['lim']['xmin'] = 134.461436 - 0.25
defs['lim']['xmax'] = 134.461436 + 0.25
defs['lim']['ymin'] = 7.436438 - 0.2
defs['lim']['ymax'] = 7.436438 + 0.2
elif region == 'NYC_Area_m':
defs['lim']['xmin'] = -73.55
defs['lim']['xmax'] = -74.26
defs['lim']['ymin'] = 40.55
defs['lim']['ymax'] = 40.91
elif region == 'Tampa_Area_m':
defs['lim']['xmin'] = -82.37
defs['lim']['xmax'] = -82.75
defs['lim']['ymin'] = 27.63
defs['lim']['ymax'] = 28.05
elif region == 'Marshall_Islands_m':
defs['lim']['xmin'] = 164.92
defs['lim']['xmax'] = 173.45
defs['lim']['ymin'] = 5.10
defs['lim']['ymax'] = 11.90
elif region == 'Palau_m':
defs['lim']['xmin'] = 134.01
defs['lim']['xmax'] = 134.78
defs['lim']['ymin'] = 6.78
defs['lim']['ymax'] = 8.52
elif region == 'Port_Arthur_m':
defs['lim']['xmin'] = -93.60
defs['lim']['xmax'] = -94.24
defs['lim']['ymin'] = 29.62
defs['lim']['ymax'] = 30.14
return defs['lim']
| en | 0.551239 | #!/usr/bin/env python # -*- coding: utf-8 -*- Geo regions for map plot ##IKE ##IRMA ## ISABEL ## SANDY ## ANDREW ## operational upgrade # NYC area: -74.027725,40.596099 # Tampa area: -82.455511,27.921438 # Marshall Islands: 169.107299,7.906637 # Palau: 134.461436,7.436438 | 2.359016 | 2 |
figures/plot_log_figure_paper.py | davidADSP/deepAI_paper | 21 | 8126 | <reponame>davidADSP/deepAI_paper
import numpy
import matplotlib.pyplot as plt
fig_convergence = plt.figure(1,figsize=(12,6))
x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt')
plt.subplot(122)
plt.plot(x[:,0])
plt.xlim([0,500])
plt.ylim([-10,200])
plt.xlabel('Steps')
plt.ylabel('Free Action')
plt.axvline(x=230.0,linestyle=':')
plt.axvline(x=250.0,linestyle=':')
plt.axvline(x=270.0,linestyle=':')
ax = plt.subplot(121)
plt.plot(x[:,0])
plt.ylim([-10,200])
ax.axvspan(0, 500, alpha=0.3, color='red')
plt.xlim([0,30000])
plt.xlabel('Steps')
plt.ylabel('Free Action')
fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.15)
fig_convergence.savefig('fig_convergence.pdf')
plt.show()
| import numpy
import matplotlib.pyplot as plt
fig_convergence = plt.figure(1,figsize=(12,6))
x = numpy.loadtxt('log_deepAI_paper_nonlin_action_long.txt')
plt.subplot(122)
plt.plot(x[:,0])
plt.xlim([0,500])
plt.ylim([-10,200])
plt.xlabel('Steps')
plt.ylabel('Free Action')
plt.axvline(x=230.0,linestyle=':')
plt.axvline(x=250.0,linestyle=':')
plt.axvline(x=270.0,linestyle=':')
ax = plt.subplot(121)
plt.plot(x[:,0])
plt.ylim([-10,200])
ax.axvspan(0, 500, alpha=0.3, color='red')
plt.xlim([0,30000])
plt.xlabel('Steps')
plt.ylabel('Free Action')
fig_convergence.subplots_adjust(left=0.07, bottom=0.1, right=0.95, top=0.95,
wspace=0.2, hspace=0.15)
fig_convergence.savefig('fig_convergence.pdf')
plt.show() | none | 1 | 2.475748 | 2 |
|
setup.py | matiasgrana/nagios_sql | 0 | 8127 | <gh_stars>0
#! python3
# Help from: http://www.scotttorborg.com/python-packaging/minimal.html
# https://docs.python.org/3/distutils/commandref.html#sdist-cmd
# https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# https://docs.python.org/3.4/tutorial/modules.html
# Install it with python setup.py install
# Or use: python setup.py develop (changes to the source files will be
# immediately available)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
from setuptools import setup, find_packages
import os
from os import path
import rstcheck
exec(open('src/version.py').read())
# __version__ comes when execution src/version.py
version = __version__
here = path.abspath(path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requires = [x.strip() for x in f if x.strip()]
def check_readme(file='README.rst'):
"""
Checks readme rst file, to ensure it will upload to pypi and be formatted
correctly.
:param file:
:return:
"""
# Get the long description from the relevant file
with open(file, encoding='utf-8') as f:
readme_content = f.read()
errors = list(rstcheck.check(readme_content))
if errors:
msg = 'There are errors in {}, errors \n {}'.format(file,
errors[0].message)
raise SystemExit(msg)
else:
msg = 'No errors in {}'.format(file)
print(msg)
readme_file = path.join(here, 'README.rst')
# Get the long description from the relevant file
with open(readme_file, encoding='utf-8') as f:
long_description = f.read()
check_readme(readme_file)
# Define setuptools specifications
setup(name='nagios_sql',
version=version,
description='Nagios plugin with sqlchecks',
long_description=long_description, # this is the file README.rst
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: SQL',
'Topic :: System :: Monitoring',
'Topic :: Database :: Database Engines/Servers',
'Topic :: System :: Systems Administration'
],
url='https://github.com/pablodav/nagios_sql',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
#include_package_data=True,
#package_data={
# 'data': 'src/data/*',
#},
#data_files=[('VERSION', ['src/VERSION'])],
entry_points={
'console_scripts': [
'nagios_sql = src.nagios_sql:main'
]
},
install_requires=requires,
tests_require=['pytest',
'pytest-cov'],
zip_safe=False)
| #! python3
# Help from: http://www.scotttorborg.com/python-packaging/minimal.html
# https://docs.python.org/3/distutils/commandref.html#sdist-cmd
# https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# https://docs.python.org/3.4/tutorial/modules.html
# Install it with python setup.py install
# Or use: python setup.py develop (changes to the source files will be
# immediately available)
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
from setuptools import setup, find_packages
import os
from os import path
import rstcheck
exec(open('src/version.py').read())
# __version__ comes when execution src/version.py
version = __version__
here = path.abspath(path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requires = [x.strip() for x in f if x.strip()]
def check_readme(file='README.rst'):
"""
Checks readme rst file, to ensure it will upload to pypi and be formatted
correctly.
:param file:
:return:
"""
# Get the long description from the relevant file
with open(file, encoding='utf-8') as f:
readme_content = f.read()
errors = list(rstcheck.check(readme_content))
if errors:
msg = 'There are errors in {}, errors \n {}'.format(file,
errors[0].message)
raise SystemExit(msg)
else:
msg = 'No errors in {}'.format(file)
print(msg)
readme_file = path.join(here, 'README.rst')
# Get the long description from the relevant file
with open(readme_file, encoding='utf-8') as f:
long_description = f.read()
check_readme(readme_file)
# Define setuptools specifications
setup(name='nagios_sql',
version=version,
description='Nagios plugin with sqlchecks',
long_description=long_description, # this is the file README.rst
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: SQL',
'Topic :: System :: Monitoring',
'Topic :: Database :: Database Engines/Servers',
'Topic :: System :: Systems Administration'
],
url='https://github.com/pablodav/nagios_sql',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
#include_package_data=True,
#package_data={
# 'data': 'src/data/*',
#},
#data_files=[('VERSION', ['src/VERSION'])],
entry_points={
'console_scripts': [
'nagios_sql = src.nagios_sql:main'
]
},
install_requires=requires,
tests_require=['pytest',
'pytest-cov'],
zip_safe=False) | en | 0.539492 | #! python3 # Help from: http://www.scotttorborg.com/python-packaging/minimal.html # https://docs.python.org/3/distutils/commandref.html#sdist-cmd # https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # https://docs.python.org/3.4/tutorial/modules.html # Install it with python setup.py install # Or use: python setup.py develop (changes to the source files will be # immediately available) # https://pypi.python.org/pypi?%3Aaction=list_classifiers # __version__ comes when execution src/version.py Checks readme rst file, to ensure it will upload to pypi and be formatted correctly. :param file: :return: # Get the long description from the relevant file # Get the long description from the relevant file # Define setuptools specifications # this is the file README.rst #include_package_data=True, #package_data={ # 'data': 'src/data/*', #}, #data_files=[('VERSION', ['src/VERSION'])], | 2.072865 | 2 |
textnn/utils/test/test_progress_iterator.py | tongr/TextNN | 1 | 8128 | import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
def capture_sysout(cmd):
capturedOutput = io.StringIO() # Create StringIO object
sys.stdout = capturedOutput # and redirect stdout.
cmd() # Call function.
sys.stdout = sys.__stdout__ # Reset redirect.
return capturedOutput.getvalue() # Now works as before.
def test_progress_iterator():
def progress_generator():
sum(ProgressIterator([1, 2, 3], interval=0, description=""))
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
def test_progress_iterator_with_statement():
def progress_generator():
with ProgressIterator([1,2,3], interval=0, description="") as it:
sum(it)
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
| import io
import sys
from textnn.utils import ProgressIterator
#inspired by https://stackoverflow.com/a/34738440
def capture_sysout(cmd):
capturedOutput = io.StringIO() # Create StringIO object
sys.stdout = capturedOutput # and redirect stdout.
cmd() # Call function.
sys.stdout = sys.__stdout__ # Reset redirect.
return capturedOutput.getvalue() # Now works as before.
def test_progress_iterator():
def progress_generator():
sum(ProgressIterator([1, 2, 3], interval=0, description=""))
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
def test_progress_iterator_with_statement():
def progress_generator():
with ProgressIterator([1,2,3], interval=0, description="") as it:
sum(it)
report = capture_sysout(cmd=progress_generator)
lines = report.strip().split("\n")
# expected result (with changing numbers):
# 1/3 [=========>....................] - ETA: 7s
# 2/3 [===================>..........] - ETA: 1s
# 3/3 [==============================] - 4s 1s/step
assert lines[0].startswith("1/3")
assert "ETA: " in lines[0]
assert lines[1].startswith("2/3")
assert "ETA: " in lines[1]
assert lines[2].startswith("3/3")
assert lines[2].endswith("s/step")
| en | 0.696754 | #inspired by https://stackoverflow.com/a/34738440 # Create StringIO object # and redirect stdout. # Call function. # Reset redirect. # Now works as before. # expected result (with changing numbers): # 1/3 [=========>....................] - ETA: 7s # 2/3 [===================>..........] - ETA: 1s # 3/3 [==============================] - 4s 1s/step # expected result (with changing numbers): # 1/3 [=========>....................] - ETA: 7s # 2/3 [===================>..........] - ETA: 1s # 3/3 [==============================] - 4s 1s/step | 2.653333 | 3 |
reach.py | NIKH0610/class5-homework | 0 | 8129 | import os
import numpy as np
import pandas as pd
housing_df = pd.read_csv(filepath_or_buffer='~/C:\Users\nikhi\NIKH0610\class5-homework\toys-datasets\boston') | import os
import numpy as np
import pandas as pd
housing_df = pd.read_csv(filepath_or_buffer='~/C:\Users\nikhi\NIKH0610\class5-homework\toys-datasets\boston') | none | 1 | 2.407378 | 2 |
|
queries/general_queries.py | souparvo/airflow-plugins | 0 | 8130 |
def insert_metatable():
"""SQL query to insert records from table insert into a table on a DB
"""
return """
INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES
('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}');
""" |
def insert_metatable():
"""SQL query to insert records from table insert into a table on a DB
"""
return """
INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES
('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}');
""" | en | 0.148354 | SQL query to insert records from table insert into a table on a DB INSERT INTO TABLE {{ params.target_schema }}.{{ params.target_table }} VALUES ('{{ params.schema }}', '{{ params.table }}', {{ ti.xcom_pull(key='hive_res', task_ids=params.count_inserts)[0][0] }}, current_timestamp(), '{{ params.type }}'); | 2.66333 | 3 |
pyvisa_py/highlevel.py | Handfeger/pyvisa-py | 1 | 8131 | # -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
class PyVisaLibrary(highlevel.VisaLibraryBase):
"""A pure Python backend for PyVISA.
The object is basically a dispatcher with some common functions implemented.
When a new resource object is requested to pyvisa, the library creates a
Session object (that knows how to perform low-level communication operations)
associated with a session handle (a number, usually refered just as session).
A call to a library function is handled by PyVisaLibrary if it involves a
resource agnostic function or dispatched to the correct session object
(obtained from the session id).
Importantly, the user is unaware of this. PyVisaLibrary behaves for
the user just as NIVisaLibrary.
"""
#: Live session object identified by a randon session ID
sessions: Dict[int, sessions.Session]
# Try to import packages implementing lower level functionality.
try:
from .serial import SerialSession
logger.debug("SerialSession was correctly imported.")
except Exception as e:
logger.debug("SerialSession was not imported %s." % e)
try:
from .usb import USBRawSession, USBSession
logger.debug("USBSession and USBRawSession were correctly imported.")
except Exception as e:
logger.debug("USBSession and USBRawSession were not imported %s." % e)
try:
from .tcpip import TCPIPInstrSession, TCPIPSocketSession
logger.debug("TCPIPSession was correctly imported.")
except Exception as e:
logger.debug("TCPIPSession was not imported %s." % e)
try:
from .gpib import GPIBSession
logger.debug("GPIBSession was correctly imported.")
except Exception as e:
logger.debug("GPIBSession was not imported %s." % e)
@staticmethod
def get_library_paths() -> Iterable[LibraryPath]:
"""List a dummy library path to allow to create the library."""
return (LibraryPath("py"),)
@staticmethod
def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]:
"""Return a list of lines with backend info."""
from . import __version__
d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict()
d["Version"] = "%s" % __version__
for key, val in sessions.Session.iter_valid_session_classes():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = "Available " + val.get_low_level_info()
for key, issue in sessions.Session.iter_session_classes_issues():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = issue.split("\n")
return d
def _init(self) -> None:
"""Custom initialization code."""
# Map session handle to session object.
self.sessions = {}
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| # -*- coding: utf-8 -*-
"""Highlevel wrapper of the VISA Library.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import random
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from pyvisa import constants, highlevel, rname
from pyvisa.constants import StatusCode
from pyvisa.typing import VISAEventContext, VISARMSession, VISASession
from pyvisa.util import LibraryPath
from . import sessions
from .common import logger
class PyVisaLibrary(highlevel.VisaLibraryBase):
"""A pure Python backend for PyVISA.
The object is basically a dispatcher with some common functions implemented.
When a new resource object is requested to pyvisa, the library creates a
Session object (that knows how to perform low-level communication operations)
associated with a session handle (a number, usually refered just as session).
A call to a library function is handled by PyVisaLibrary if it involves a
resource agnostic function or dispatched to the correct session object
(obtained from the session id).
Importantly, the user is unaware of this. PyVisaLibrary behaves for
the user just as NIVisaLibrary.
"""
#: Live session object identified by a randon session ID
sessions: Dict[int, sessions.Session]
# Try to import packages implementing lower level functionality.
try:
from .serial import SerialSession
logger.debug("SerialSession was correctly imported.")
except Exception as e:
logger.debug("SerialSession was not imported %s." % e)
try:
from .usb import USBRawSession, USBSession
logger.debug("USBSession and USBRawSession were correctly imported.")
except Exception as e:
logger.debug("USBSession and USBRawSession were not imported %s." % e)
try:
from .tcpip import TCPIPInstrSession, TCPIPSocketSession
logger.debug("TCPIPSession was correctly imported.")
except Exception as e:
logger.debug("TCPIPSession was not imported %s." % e)
try:
from .gpib import GPIBSession
logger.debug("GPIBSession was correctly imported.")
except Exception as e:
logger.debug("GPIBSession was not imported %s." % e)
@staticmethod
def get_library_paths() -> Iterable[LibraryPath]:
"""List a dummy library path to allow to create the library."""
return (LibraryPath("py"),)
@staticmethod
def get_debug_info() -> Dict[str, Union[str, List[str], Dict[str, str]]]:
"""Return a list of lines with backend info."""
from . import __version__
d: OrderedDict[str, Union[str, List[str], Dict[str, str]]] = OrderedDict()
d["Version"] = "%s" % __version__
for key, val in sessions.Session.iter_valid_session_classes():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = "Available " + val.get_low_level_info()
for key, issue in sessions.Session.iter_session_classes_issues():
key_name = "%s %s" % (key[0].name.upper(), key[1])
d[key_name] = issue.split("\n")
return d
def _init(self) -> None:
"""Custom initialization code."""
# Map session handle to session object.
self.sessions = {}
def _register(self, obj: object) -> VISASession:
"""Creates a random but unique session handle for a session object.
Register it in the sessions dictionary and return the value.
"""
session = None
while session is None or session in self.sessions:
session = random.randint(1000000, 9999999)
self.sessions[session] = obj
return session
def open(
self,
session: VISARMSession,
resource_name: str,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = constants.VI_TMO_IMMEDIATE,
) -> Tuple[VISASession, StatusCode]:
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
Specifies the maximum time period (in milliseconds) that this
operation waits before returning an error. constants.VI_TMO_IMMEDIATE
and constants.VI_TMO_INFINITE are used as min and max.
Returns
-------
VISASession
Unique logical identifier reference to a session
StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)"
% open_timeout
)
try:
parsed = rname.parse_resource_name(resource_name)
except rname.InvalidResourceName:
return (
VISASession(0),
self.handle_return_value(None, StatusCode.error_invalid_resource_name),
)
cls = sessions.Session.get_session_class(
parsed.interface_type_const, parsed.resource_class
)
sess = cls(session, resource_name, parsed, open_timeout)
return self._register(sess), StatusCode.success
def clear(self, session: VISASession) -> StatusCode:
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
session : typin.VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.clear())
def flush(
self, session: VISASession, mask: constants.BufferOperation
) -> StatusCode:
"""Flush the specified buffers.
The buffers can be associated with formatted I/O operations and/or
serial communication.
Corresponds to viFlush function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferOperation
Specifies the action to be taken with flushing the buffer.
The values can be combined using the | operator. However multiple
operations on a single buffer cannot be combined.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.flush(mask))
def gpib_command(
self, session: VISASession, command_byte: bytes
) -> Tuple[int, StatusCode]:
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
command_byte : bytes
Data to write.
Returns
-------
int
Number of written bytes
StatusCode
Return value of the library call.
"""
try:
written, st = self.sessions[session].gpib_command(command_byte)
return written, self.handle_return_value(session, st)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
def assert_trigger(
self, session: VISASession, protocol: constants.TriggerProtocol
) -> StatusCode:
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].assert_trigger(protocol)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_send_ifc(self, session: VISASession) -> StatusCode:
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_send_ifc()
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_ren(
self, session: VISASession, mode: constants.RENLineOperation
) -> StatusCode:
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_ren(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_control_atn(
self, session: VISASession, mode: constants.ATNLineOperation
) -> StatusCode:
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session, self.sessions[session].gpib_control_atn(mode)
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def gpib_pass_control(
self, session: VISASession, primary_address: int, secondary_address: int
) -> StatusCode:
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].gpib_pass_control(
primary_address, secondary_address
),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def read_stb(self, session: VISASession) -> Tuple[int, StatusCode]:
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
stb, status_code = sess.read_stb()
return stb, self.handle_return_value(session, status_code)
def close(
self, session: Union[VISASession, VISAEventContext, VISARMSession]
) -> StatusCode:
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
# The RM session directly references the library.
if sess is not self:
return self.handle_return_value(session, sess.close())
else:
return self.handle_return_value(session, StatusCode.success)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def open_default_resource_manager(self) -> Tuple[VISARMSession, StatusCode]:
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
StatusCode
Return value of the library call.
"""
return (
cast(VISARMSession, self._register(self)),
self.handle_return_value(None, StatusCode.success),
)
def list_resources(
self, session: VISARMSession, query: str = "?*::INSTR"
) -> Tuple[str, ...]:
"""Return a tuple of all connected devices matching query.
Parameters
----------
session : VISARMSession
Unique logical identifier to the resource manager session.
query : str
Regular expression used to match devices.
Returns
-------
Tuple[str, ...]
Resource names of all the connected devices matching the query.
"""
# For each session type, ask for the list of connected resources and
# merge them into a single list.
# HINT: the cast should not be necessary here
resources: List[str] = []
for key, st in sessions.Session.iter_valid_session_classes():
resources += st.list_resources()
return rname.filter(resources, query)
def read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the read method of the session object.
try:
data, status_code = self.sessions[session].read(count)
except KeyError:
return (
b"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
return data, self.handle_return_value(session, status_code)
def write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
StatusCode
Return value of the library call.
"""
# from the session handle, dispatch to the write method of the session object.
try:
written, status_code = self.sessions[session].write(data)
except KeyError:
return 0, self.handle_return_value(session, StatusCode.error_invalid_object)
return written, self.handle_return_value(session, status_code)
def buffer_read(self, session: VISASession, count: int) -> Tuple[bytes, StatusCode]:
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
session : VISASession\
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Data read
StatusCode
Return value of the library call.
"""
return self.read(session, count)
def buffer_write(self, session: VISASession, data: bytes) -> Tuple[int, StatusCode]:
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
StatusCode
return value of the library call.
"""
return self.write(session, data)
def get_attribute(
self,
session: Union[VISASession, VISAEventContext, VISARMSession],
attribute: Union[constants.ResourceAttribute, constants.EventAttribute],
) -> Tuple[Any, StatusCode]:
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
None,
self.handle_return_value(session, StatusCode.error_invalid_object),
)
state, status_code = sess.get_attribute(
cast(constants.ResourceAttribute, attribute)
)
return state, self.handle_return_value(session, status_code)
def set_attribute(
self,
session: VISASession,
attribute: constants.ResourceAttribute,
attribute_state: Any,
) -> StatusCode:
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
return self.handle_return_value(
session,
self.sessions[session].set_attribute(attribute, attribute_state),
)
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
def lock(
self,
session: VISASession,
lock_type: constants.Lock,
timeout: int,
requested_key: Optional[str] = None,
) -> Tuple[str, StatusCode]:
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
str
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return (
"",
self.handle_return_value(session, StatusCode.error_invalid_object),
)
key, status_code = sess.lock(lock_type, timeout, requested_key)
return key, self.handle_return_value(session, status_code)
def unlock(self, session: VISASession) -> StatusCode:
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
Returns
-------
StatusCode
Return value of the library call.
"""
try:
sess = self.sessions[session]
except KeyError:
return self.handle_return_value(session, StatusCode.error_invalid_object)
return self.handle_return_value(session, sess.unlock())
def disable_event(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
def discard_events(
self,
session: VISASession,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
) -> StatusCode:
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
StatusCode
Return value of the library call.
"""
pass
| en | 0.661742 | # -*- coding: utf-8 -*- Highlevel wrapper of the VISA Library. :copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details. :license: MIT, see LICENSE for more details. A pure Python backend for PyVISA. The object is basically a dispatcher with some common functions implemented. When a new resource object is requested to pyvisa, the library creates a Session object (that knows how to perform low-level communication operations) associated with a session handle (a number, usually refered just as session). A call to a library function is handled by PyVisaLibrary if it involves a resource agnostic function or dispatched to the correct session object (obtained from the session id). Importantly, the user is unaware of this. PyVisaLibrary behaves for the user just as NIVisaLibrary. #: Live session object identified by a randon session ID # Try to import packages implementing lower level functionality. List a dummy library path to allow to create the library. Return a list of lines with backend info. Custom initialization code. # Map session handle to session object. Creates a random but unique session handle for a session object. Register it in the sessions dictionary and return the value. Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. Parameters ---------- session : VISARMSession Resource Manager session (should always be a session returned from open_default_resource_manager()). resource_name : str Unique symbolic name of a resource. access_mode : constants.AccessModes, optional Specifies the mode by which the resource is to be accessed. open_timeout : int Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. constants.VI_TMO_IMMEDIATE and constants.VI_TMO_INFINITE are used as min and max. Returns ------- VISASession Unique logical identifier reference to a session StatusCode Return value of the library call. Clears a device. Corresponds to viClear function of the VISA library. Parameters ---------- session : typin.VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. Flush the specified buffers. The buffers can be associated with formatted I/O operations and/or serial communication. Corresponds to viFlush function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mask : constants.BufferOperation Specifies the action to be taken with flushing the buffer. The values can be combined using the | operator. However multiple operations on a single buffer cannot be combined. Returns ------- StatusCode Return value of the library call. Write GPIB command bytes on the bus. Corresponds to viGpibCommand function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. command_byte : bytes Data to write. Returns ------- int Number of written bytes StatusCode Return value of the library call. Assert software or hardware trigger. Corresponds to viAssertTrigger function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. protocol : constants.TriggerProtocol Trigger protocol to use during assertion. Returns ------- StatusCode Return value of the library call. Pulse the interface clear line (IFC) for at least 100 microseconds. Corresponds to viGpibSendIFC function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. Controls the state of the GPIB Remote Enable (REN) interface line. Optionally the remote/local state of the device can also be set. Corresponds to viGpibControlREN function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mode : constants.RENLineOperation State of the REN line and optionally the device remote/local state. Returns ------- StatusCode Return value of the library call. Specifies the state of the ATN line and the local active controller state. Corresponds to viGpibControlATN function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. mode : constants.ATNLineOperation State of the ATN line and optionally the local active controller state. Returns ------- StatusCode Return value of the library call. Tell a GPIB device to become controller in charge (CIC). Corresponds to viGpibPassControl function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. primary_address : int Primary address of the GPIB device to which you want to pass control. secondary_address : int Secondary address of the targeted GPIB device. If the targeted device does not have a secondary address, this parameter should contain the value Constants.VI_NO_SEC_ADDR. Returns ------- StatusCode Return value of the library call. Reads a status byte of the service request. Corresponds to viReadSTB function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- int Service request status byte StatusCode Return value of the library call. Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. Parameters --------- session : Union[VISASession, VISAEventContext, VISARMSession] Unique logical identifier to a session, event, resource manager. Returns ------- StatusCode Return value of the library call. # The RM session directly references the library. This function returns a session to the Default Resource Manager resource. Corresponds to viOpenDefaultRM function of the VISA library. Returns ------- VISARMSession Unique logical identifier to a Default Resource Manager session StatusCode Return value of the library call. Return a tuple of all connected devices matching query. Parameters ---------- session : VISARMSession Unique logical identifier to the resource manager session. query : str Regular expression used to match devices. Returns ------- Tuple[str, ...] Resource names of all the connected devices matching the query. # For each session type, ask for the list of connected resources and # merge them into a single list. # HINT: the cast should not be necessary here Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. count : int Number of bytes to be read. Returns ------- bytes Date read StatusCode Return value of the library call. # from the session handle, dispatch to the read method of the session object. Write data to device or interface synchronously. Corresponds to viWrite function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. data : bytes Data to be written. Returns ------- int Number of bytes actually transferred StatusCode Return value of the library call. # from the session handle, dispatch to the write method of the session object. Reads data through the use of a formatted I/O read buffer. The data can be read from a device or an interface. Corresponds to viBufRead function of the VISA library. Parameters ---------- session : VISASession\ Unique logical identifier to a session. count : int Number of bytes to be read. Returns ------- bytes Data read StatusCode Return value of the library call. Writes data to a formatted I/O write buffer synchronously. Corresponds to viBufWrite function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. data : bytes Data to be written. Returns ------- int number of written bytes StatusCode return value of the library call. Retrieves the state of an attribute. Corresponds to viGetAttribute function of the VISA library. Parameters ---------- session : Union[VISASession, VISAEventContext] Unique logical identifier to a session, event, or find list. attribute : Union[constants.ResourceAttribute, constants.EventAttribute] Resource or event attribute for which the state query is made. Returns ------- Any State of the queried attribute for a specified resource StatusCode Return value of the library call. Set the state of an attribute. Corresponds to viSetAttribute function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. attribute : constants.ResourceAttribute Attribute for which the state is to be modified. attribute_state : Any The state of the attribute to be set for the specified object. Returns ------- StatusCode Return value of the library call. Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. lock_type : constants.Lock Specifies the type of lock requested. timeout : int Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. requested_key : Optional[str], optional Requested locking key in the case of a shared lock. For an exclusive lock it should be None. Returns ------- str Key that can then be passed to other sessions to share the lock, or None for an exclusive lock. StatusCode Return value of the library call. Relinquish a lock for the specified resource. Corresponds to viUnlock function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. Returns ------- StatusCode Return value of the library call. Disable notification for an event type(s) via the specified mechanism(s). Corresponds to viDisableEvent function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. event_type : constants.EventType Event type. mechanism : constants.EventMechanism Event handling mechanisms to be disabled. Returns ------- StatusCode Return value of the library call. Discard event occurrences for a given type and mechanisms in a session. Corresponds to viDiscardEvents function of the VISA library. Parameters ---------- session : VISASession Unique logical identifier to a session. event_type : constans.EventType Logical event identifier. mechanism : constants.EventMechanism Specifies event handling mechanisms to be discarded. Returns ------- StatusCode Return value of the library call. | 2.094589 | 2 |
detectron/utils/webly_vis.py | sisrfeng/NA-fWebSOD | 23 | 8132 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import numpy as np
import os
import math
from PIL import Image, ImageDraw, ImageFont
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import get_output_dir
def vis_training(cur_iter):
prefix = ''
if cfg.WEBLY.MINING:
prefix = 'mining_'
if not (cfg.WSL.DEBUG or
(cfg.WSL.SAMPLE and cur_iter % cfg.WSL.SAMPLE_ITER == 0)):
return
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
sample_dir = os.path.join(output_dir, 'webly_sample')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for gpu_id in range(cfg.NUM_GPUS):
data_ids = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data_ids'))
ims = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'data'))
labels_oh = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, 'labels_oh'))
im_score = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, 'cls_prob'))
roi_score = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred'))
# roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, prefix + 'rois_pred_softmax'))
rois = workspace.FetchBlob('gpu_{}/{}'.format(gpu_id, prefix + 'rois'))
# anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format(
# gpu_id, 'anchor_argmax'))
preffix = 'iter_' + str(cur_iter) + '_gpu_' + str(gpu_id)
save_im(labels_oh, im_score, ims, cfg.PIXEL_MEANS, preffix, sample_dir)
save_rois(labels_oh, im_score, roi_score, ims, rois, cfg.PIXEL_MEANS,
preffix, '', sample_dir)
# continue
if cfg.WEBLY.ENTROPY:
pass
else:
continue
class_weight = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_class_weight'))
rois_pred_hatE = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_hatE'))
rois_pred_E = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_E'))
y_logN__logy = workspace.FetchBlob('gpu_{}/{}'.format(
gpu_id, prefix + 'rois_pred_y_logN__logy'))
save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
cfg.PIXEL_MEANS, preffix, '', sample_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy)
def save_im(labels_oh, im_score, ims, pixel_means, prefix, output_dir):
batch_size, num_classes = im_score.shape
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '.png')
cv2.imwrite(file_name, im)
def save_rois(labels_oh, im_score, roi_score, ims, rois, pixel_means, prefix,
suffix, output_dir):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
has_bg = False
num_rois_this = min(500, num_rois)
for b in range(batch_size):
for c in range(num_classes):
# if labels_oh[b][c] == 0.0:
# continue
if im_score[b][c] < 0.1:
if has_bg:
continue
has_bg = True
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
argsort = np.argsort(-np.abs(roi_score[:, c]))
argsort = argsort[:num_rois_this]
argsort = argsort[::-1]
if im_score[b][c] < 0.1:
scale_p = 1.0
else:
scale_p = 1.0 / roi_score[:, c].max()
for n in range(num_rois_this):
roi = rois[argsort[n]]
if roi[0] != b:
continue
if roi_score[argsort[n]][c] * scale_p < 0.4:
thickness = 3
else:
thickness = 6
jet = gray2jet(roi_score[argsort[n]][c] * scale_p)
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]), jet, thickness)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_' +
suffix + '.png')
cv2.imwrite(file_name, im_S)
continue
num_anchors = anchor_argmax.shape[0]
for n in range(num_rois):
roi = rois[n]
if roi[0] != b:
continue
for a in range(num_anchors):
if anchor_argmax[a][n] == 1.0:
break
jet = gray2jet(1.0 * a / num_anchors)
cv2.rectangle(im_A, (roi[1], roi[2]), (roi[3], roi[4]), jet, 1)
file_name = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_A_' +
suffix + '.png')
cv2.imwrite(file_name, im_A)
def save_entropy(labels_oh, im_score, class_weight, roi_score, ims, rois,
pixel_means, prefix, suffix, output_dir, rois_pred_hatE,
rois_pred_E, y_logN__logy):
num_rois, num_classes = roi_score.shape
batch_size, _, height, weight = ims.shape
rois_pred_E_sum = np.sum(rois_pred_E, axis=0).reshape(1, -1)
E_sum_norm = np.true_divide(rois_pred_E_sum, y_logN__logy)
E_sum_norm = np.where(E_sum_norm > 1., 1., E_sum_norm)
E_class_weight = 1 - E_sum_norm
for b in range(batch_size):
for c in range(num_classes):
if labels_oh[b][c] == 0.0 and im_score[b][c] < 0.1:
continue
im = ims[b, :, :, :].copy()
channel_swap = (1, 2, 0)
im = im.transpose(channel_swap)
im += pixel_means
im = im.astype(np.uint8)
im_S = im.copy()
im_A = im.copy()
im_hatE = im.copy()
im_E = im.copy()
_NUM = 10
argsort_roi = np.argsort(roi_score[:, c])[::-1]
argsort_hatE = np.argsort(rois_pred_hatE[:, c])[::-1]
argsort_E = np.argsort(rois_pred_E[:, c])[::-1]
if len(argsort_roi) >= _NUM:
_NUM = 10
else:
_NUM = len(argsort_roi)
argsort_roi = argsort_roi[:_NUM][::-1]
argsort_hatE = argsort_hatE[:_NUM][::-1]
argsort_E = argsort_E[:_NUM][::-1]
argsort_hatE = argsort_roi
argsort_E = argsort_roi
scale_p = 1.0 / roi_score[:, c].max()
scale_p = 1.0
for n in range(_NUM):
roi = rois[argsort_roi[n]]
hatE_roi = rois[argsort_hatE[n]]
E_roi = rois[argsort_E[n]]
if roi[0] != b:
continue
# draw roi
jet = gray2jet(roi_score[argsort_roi[n]][c] * scale_p)
bgr = jet
rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_S, (roi[1], roi[2]), (roi[3], roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
text = "{:.4f}".format(roi_score[argsort_roi[n]][c])
im_S = putText_with_TNR(im_S, int(roi[1]), int(roi[2]), 15,
jet, rgb, text)
if hatE_roi[0] != b:
continue
# draw rois_pred_hatE
# jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_hatE, (hatE_roi[1], hatE_roi[2]),
(hatE_roi[3], hatE_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text hat_E
text = "{:.4f}".format(rois_pred_hatE[argsort_hatE[n]][c])
im_hatE = putText_with_TNR(im_hatE, int(hatE_roi[1]),
int(hatE_roi[2]), 15, jet, rgb,
text)
if E_roi[0] != b:
continue
# draw rois_pred_E
# jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p)
# bgr = jet
# rgb = (jet[2], jet[1], jet[0])
# roi location
cv2.rectangle(im_E, (E_roi[1], E_roi[2]), (E_roi[3], E_roi[4]),
bgr,
2,
lineType=cv2.LINE_AA)
# put Text E
text = "{:.4f}".format(rois_pred_E[argsort_E[n]][c])
im_E = putText_with_TNR(im_E, int(E_roi[1]), int(E_roi[2]), 15,
jet, rgb, text)
# write im_score
text = "{:.4f}".format(im_score[b][c])
im_S = putText_with_TNR(im_S, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(class_weight[b][c])
im_hatE = putText_with_TNR(im_hatE, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
# write class_weight
text = "{:.4f}".format(E_class_weight[b][c])
im_E = putText_with_TNR(im_E, 0, 0, 20, (0, 140, 255),
(255, 255, 255), text)
file_name_roi = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_roi' +
suffix + '.png')
cv2.imwrite(file_name_roi, im_S)
file_name_hatE = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) +
'_hatE' + suffix + '.png')
cv2.imwrite(file_name_hatE, im_hatE)
file_name_E = os.path.join(
output_dir, prefix + '_b_' + str(b) + '_c_' + str(c) + '_E' +
suffix + '.png')
cv2.imwrite(file_name_E, im_E)
def dump_proto_files(model, output_dir):
"""Save prototxt descriptions of the training network and parameter
initialization network."""
with open(os.path.join(output_dir, model.net.Proto().name), 'w') as fid:
fid.write(str(model.net.Proto()))
with open(os.path.join(output_dir,
model.param_init_net.Proto().name), 'w') as fid:
fid.write(str(model.param_init_net.Proto()))
def gray2jet(f):
# plot short rainbow RGB
a = f / 0.25 # invert and group
X = math.floor(a) # this is the integer part
Y = math.floor(255 * (a - X)) # fractional part from 0 to 255
Z = math.floor(128 * (a - X)) # fractional part from 0 to 128
if X == 0:
r = 0
g = Y
b = 128 - Z
elif X == 1:
r = Y
g = 255
b = 0
elif X == 2:
r = 255
g = 255 - Z
b = 0
elif X == 3:
r = 255
g = 128 - Z
b = 0
elif X == 4:
r = 255
g = 0
b = 0
# opencv is bgr, not rgb
return (b, g, r)
def putText_with_TNR(img, x, y, size, fontColor, bgColor, string):
thickness = 2
font_scale = 1.1
font = cv2.FONT_HERSHEY_SIMPLEX
s = cv2.getTextSize(string, font, font_scale, thickness)
cv2.rectangle(
img,
(x + thickness, y + thickness),
(x + thickness + s[0][0] + 2, y + thickness + s[0][1] + 2),
# (0, 140, 255),
fontColor,
cv2.FILLED,
lineType=cv2.LINE_AA)
position = (x + thickness + 1, y + thickness + s[0][1] + 1)
cv2.putText(img, string, position, font, font_scale, (255, 255, 255),
thickness, cv2.LINE_AA)
return img
# from OpenCV to PIL
font = "/home/chenzhiwei/Documents/myFonts/timesnewroman.ttf"
img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
font = ImageFont.truetype(font, size)
position = (x + 3, y - 2)
draw = ImageDraw.Draw(img_PIL)
offsetx, offsety = font.getoffset(string)
width, height = font.getsize(string)
draw.rectangle((offsetx + x + 2, offsety + y - 3, offsetx + x + width + 3,
offsety + y + height - 3),
fill=bgColor)
draw.text(position, string, font=font, fill=fontColor)
# back to OpenCV type
img_OpenCV = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
return img_OpenCV
| en | 0.417631 | # roi_score_softmax = workspace.FetchBlob('gpu_{}/{}'.format( # gpu_id, prefix + 'rois_pred_softmax')) # anchor_argmax = workspace.FetchBlob('gpu_{}/{}'.format( # gpu_id, 'anchor_argmax')) # continue # if labels_oh[b][c] == 0.0: # continue # if labels_oh[b][c] == 0.0: # continue # draw roi # roi location # draw rois_pred_hatE # jet = gray2jet(rois_pred_hatE[argsort_hatE[n]][c] * scale_p) # bgr = jet # rgb = (jet[2], jet[1], jet[0]) # roi location # put Text hat_E # draw rois_pred_E # jet = gray2jet(rois_pred_E[argsort_E[n]][c] * scale_p) # bgr = jet # rgb = (jet[2], jet[1], jet[0]) # roi location # put Text E # write im_score # write class_weight # write class_weight Save prototxt descriptions of the training network and parameter initialization network. # plot short rainbow RGB # invert and group # this is the integer part # fractional part from 0 to 255 # fractional part from 0 to 128 # opencv is bgr, not rgb # (0, 140, 255), # from OpenCV to PIL # back to OpenCV type | 2.217182 | 2 |
salt/runner.py | StepOneInc/salt | 1 | 8133 | # -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
def cmd(self, fun, arg, pub_data=None, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(arg)
def _append_kwarg(arglist, kwarg):
'''
Append the kwarg dict to the arglist
'''
kwarg['__kwarg__'] = True
arglist.append(kwarg)
if kwarg:
try:
if isinstance(arglist[-1], dict) \
and '__kwarg__' in arglist[-1]:
for key, val in six.iteritems(kwarg):
if key in arglist[-1]:
log.warning(
'Overriding keyword argument {0!r}'.format(key)
)
arglist[-1][key] = val
else:
# No kwargs yet present in arglist
_append_kwarg(arglist, kwarg)
except IndexError:
# arglist is empty, just append
_append_kwarg(arglist, kwarg)
self._verify_fun(fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
log.debug('Runner starting with jid {0}'.format(jid))
self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job'))
target = RunnerClient._thread_return
data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs}
args = (self, self.opts, data)
ret = jid
if self.opts.get('async', False):
process = multiprocessing.Process(
target=target, args=args
)
process.start()
else:
ret = target(*args)
return ret
@classmethod
def _thread_return(cls, instance, opts, data):
'''
The multiprocessing process calls back here
to stream returns
'''
# Runners modules runtime injection:
# - the progress event system with the correct jid
# - Provide JID if the runner wants to access it directly
done = {}
progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress
for func_name, func in instance.functions.items():
if func.__module__ in done:
continue
mod = sys.modules[func.__module__]
mod.__jid__ = data['jid']
mod.__progress__ = progress
done[func.__module__] = mod
ret = instance.functions[data['fun']](*data['args'], **data['kwargs'])
# Sleep for just a moment to let any progress events return
time.sleep(0.1)
ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']}
# Don't use the invoking processes' event socket because it could be closed down by the time we arrive here.
# Create another, for safety's sake.
salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner'))
try:
fstr = '{0}.save_runner_load'.format(opts['master_job_cache'])
instance.returners[fstr](data['jid'], ret_load)
except KeyError:
log.debug(
'The specified returner used for the master job cache '
'"{0}" does not have a save_runner_load function! The results '
'of this runner execution will not be stored.'.format(
opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
if opts.get('async', False):
return data['jid']
else:
return ret
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'runner'
sreq = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
reformatted_low = {'fun': low.pop('fun')}
reformatted_low.update(auth_creds)
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return self.master_call(**reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
sevent = salt.utils.event.get_event('master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts)
reformatted_low = self._reformat_low(low)
job = self.master_call(**reformatted_low)
ret_tag = tagify('ret', base=job['tag'])
timelimit = time.time() + (timeout or 300)
while True:
ret = sevent.get_event(full=True)
if ret is None:
if time.time() > timelimit:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
else:
continue
if ret['tag'] == ret_tag:
return ret['data']['return']
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
ret = {}
if self.opts.get('doc', False):
self.print_docs()
else:
try:
# Run the runner!
jid = super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
if self.opts.get('async', False):
log.info('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured.')
sys.exit(0)
rets = self.get_runner_returns(jid)
else:
rets = [jid]
# Gather the returns
for ret in rets:
if not self.opts.get('quiet', False):
if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None:
print(self.outputters[ret['outputter']](ret['data']))
else:
salt.output.display_output(ret, '', self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
def get_runner_returns(self, jid, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
if timeout is None:
timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
while True:
raw = self.event.get_event(timeout, full=True)
time.sleep(0.1)
# If we saw no events in the event bus timeout
# OR
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
if raw is None and (time.time() > timeout_at and
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid:
continue
elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid:
last_progress_timestamp = time.time()
yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']}
elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid:
yield raw['data']['return']
break
# Handle a findjob that might have been kicked off under the covers
elif raw['data']['fun'] == 'saltutil.findjob':
timeout_at = timeout_at + 10
continue
except (IndexError, KeyError):
continue
| # -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import collections
import logging
import time
import sys
import multiprocessing
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.error import raise_error
from salt.utils.event import tagify
import salt.ext.six as six
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts) # Must be self.functions for mixin to work correctly :-/
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
def cmd(self, fun, arg, pub_data=None, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(arg)
def _append_kwarg(arglist, kwarg):
'''
Append the kwarg dict to the arglist
'''
kwarg['__kwarg__'] = True
arglist.append(kwarg)
if kwarg:
try:
if isinstance(arglist[-1], dict) \
and '__kwarg__' in arglist[-1]:
for key, val in six.iteritems(kwarg):
if key in arglist[-1]:
log.warning(
'Overriding keyword argument {0!r}'.format(key)
)
arglist[-1][key] = val
else:
# No kwargs yet present in arglist
_append_kwarg(arglist, kwarg)
except IndexError:
# arglist is empty, just append
_append_kwarg(arglist, kwarg)
self._verify_fun(fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
jid = self.returners[fstr]()
log.debug('Runner starting with jid {0}'.format(jid))
self.event.fire_event({'runner_job': fun}, tagify([jid, 'new'], 'job'))
target = RunnerClient._thread_return
data = {'fun': fun, 'jid': jid, 'args': args, 'kwargs': kwargs}
args = (self, self.opts, data)
ret = jid
if self.opts.get('async', False):
process = multiprocessing.Process(
target=target, args=args
)
process.start()
else:
ret = target(*args)
return ret
@classmethod
def _thread_return(cls, instance, opts, data):
'''
The multiprocessing process calls back here
to stream returns
'''
# Runners modules runtime injection:
# - the progress event system with the correct jid
# - Provide JID if the runner wants to access it directly
done = {}
progress = salt.utils.event.get_runner_event(opts, data['jid']).fire_progress
for func_name, func in instance.functions.items():
if func.__module__ in done:
continue
mod = sys.modules[func.__module__]
mod.__jid__ = data['jid']
mod.__progress__ = progress
done[func.__module__] = mod
ret = instance.functions[data['fun']](*data['args'], **data['kwargs'])
# Sleep for just a moment to let any progress events return
time.sleep(0.1)
ret_load = {'return': ret, 'fun': data['fun'], 'fun_args': data['args']}
# Don't use the invoking processes' event socket because it could be closed down by the time we arrive here.
# Create another, for safety's sake.
salt.utils.event.MasterEvent(opts['sock_dir']).fire_event(ret_load, tagify([data['jid'], 'return'], 'runner'))
try:
fstr = '{0}.save_runner_load'.format(opts['master_job_cache'])
instance.returners[fstr](data['jid'], ret_load)
except KeyError:
log.debug(
'The specified returner used for the master job cache '
'"{0}" does not have a save_runner_load function! The results '
'of this runner execution will not be stored.'.format(
opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
if opts.get('async', False):
return data['jid']
else:
return ret
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'runner'
sreq = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
reformatted_low = {'fun': low.pop('fun')}
reformatted_low.update(auth_creds)
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return self.master_call(**reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': '<PASSWORD>',
'eauth': 'pam',
})
'''
sevent = salt.utils.event.get_event('master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts)
reformatted_low = self._reformat_low(low)
job = self.master_call(**reformatted_low)
ret_tag = tagify('ret', base=job['tag'])
timelimit = time.time() + (timeout or 300)
while True:
ret = sevent.get_event(full=True)
if ret is None:
if time.time() > timelimit:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
else:
continue
if ret['tag'] == ret_tag:
return ret['data']['return']
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
ret = {}
if self.opts.get('doc', False):
self.print_docs()
else:
try:
# Run the runner!
jid = super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
if self.opts.get('async', False):
log.info('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured.')
sys.exit(0)
rets = self.get_runner_returns(jid)
else:
rets = [jid]
# Gather the returns
for ret in rets:
if not self.opts.get('quiet', False):
if isinstance(ret, dict) and 'outputter' in ret and ret['outputter'] is not None:
print(self.outputters[ret['outputter']](ret['data']))
else:
salt.output.display_output(ret, '', self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
def get_runner_returns(self, jid, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
if timeout is None:
timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
while True:
raw = self.event.get_event(timeout, full=True)
time.sleep(0.1)
# If we saw no events in the event bus timeout
# OR
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
if raw is None and (time.time() > timeout_at and
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
if not raw['tag'].split('/')[1] == 'runner' and raw['tag'].split('/')[2] == jid:
continue
elif raw['tag'].split('/')[3] == 'progress' and raw['tag'].split('/')[2] == jid:
last_progress_timestamp = time.time()
yield {'data': raw['data']['data'], 'outputter': raw['data']['outputter']}
elif raw['tag'].split('/')[3] == 'return' and raw['tag'].split('/')[2] == jid:
yield raw['data']['return']
break
# Handle a findjob that might have been kicked off under the covers
elif raw['data']['fun'] == 'saltutil.findjob':
timeout_at = timeout_at + 10
continue
except (IndexError, KeyError):
continue
| en | 0.757486 | # -*- coding: utf-8 -*- Execute salt convenience routines # Import python libs # Import salt libs The interface used by the :command:`salt-run` CLI tool on the Salt Master It executes :ref:`runner modules <all-salt.runners>` which run on the Salt Master. Importing and using ``RunnerClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. Salt's :conf_master:`external_auth` can be used to authenticate calls. The eauth user must be authorized to execute runner modules: (``@runner``). Only the :py:meth:`master_call` below supports eauth. # Must be self.functions for mixin to work correctly :-/ Execute a runner function .. code-block:: python >>> opts = salt.config.master_config('/etc/salt/master') >>> runner = salt.runner.RunnerClient(opts) >>> runner.cmd('jobs.list_jobs', []) { '20131219215650131543': { 'Arguments': [300], 'Function': 'test.sleep', 'StartTime': '2013, Dec 19 21:56:50.131543', 'Target': '*', 'Target-type': 'glob', 'User': 'saltdev' }, '20131219215921857715': { 'Arguments': [300], 'Function': 'test.sleep', 'StartTime': '2013, Dec 19 21:59:21.857715', 'Target': '*', 'Target-type': 'glob', 'User': 'saltdev' }, } Append the kwarg dict to the arglist # No kwargs yet present in arglist # arglist is empty, just append The multiprocessing process calls back here to stream returns # Runners modules runtime injection: # - the progress event system with the correct jid # - Provide JID if the runner wants to access it directly # Sleep for just a moment to let any progress events return # Don't use the invoking processes' event socket because it could be closed down by the time we arrive here. # Create another, for safety's sake. Execute a runner function through the master network interface (eauth). Format the low data for RunnerClient()'s master_call() function The master_call function here has a different function signature than on WheelClient. So extract all the eauth keys and the fun key and assume everything else is a kwarg to pass along to the runner function to be called. Execute a runner function asynchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_async({ 'fun': 'jobs.list_jobs', 'username': 'saltdev', 'password': '<PASSWORD>', 'eauth': 'pam', }) Execute a runner function synchronously; eauth is respected This function requires that :conf_master:`external_auth` is configured and the user is authorized to execute runner functions: (``@runner``). .. code-block:: python runner.eauth_sync({ 'fun': 'jobs.list_jobs', 'username': 'saltdev', 'password': '<PASSWORD>', 'eauth': 'pam', }) Execute the salt runner interface Print out the documentation! Execute the runner sequence # Run the runner! # Gather the returns Gather the return data from the event system, break hard when timeout is reached. # If we saw no events in the event bus timeout # OR # we have reached the total timeout # AND # have not seen any progress events for the length of the timeout. # Timeout reached # Handle a findjob that might have been kicked off under the covers | 2.24697 | 2 |
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py | RivtLib/replit01 | 1 | 8134 | # For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
def get_hook_dirs():
return [os.path.dirname(__file__)] | # For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
def get_hook_dirs():
return [os.path.dirname(__file__)] | en | 0.754897 | # For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html | 1.603295 | 2 |
pong-pg.py | s-gv/pong-keras | 0 | 8135 | <filename>pong-pg.py
# Copyright (c) 2019 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from tensorflow.keras.optimizers import RMSprop, Adam
import tensorflow.keras.backend as K
env = gym.make('PongDeterministic-v4')
UP_ACTION = 2
DOWN_ACTION = 3
ACTIONS = [UP_ACTION, DOWN_ACTION]
# Neural net model takes the state and outputs action and value for that state
model = Sequential([
Dense(512, activation='elu', input_shape=(2*6400,)),
Dense(len(ACTIONS), activation='softmax'),
])
model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy')
gamma = 0.99
# preprocess frames
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """
if I is None: return np.zeros((6400,))
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """
discounted_r = np.zeros((len(r),))
running_add = 0
for t in reversed(range(0, len(r))):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def train():
reward_sums = []
for ep in range(2000):
Xs, ys, rewards = [], [], []
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
Xs.append(x)
ys.append(ya)
rewards.append(reward)
#if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}')
if done:
Xs = np.array(Xs)
ys = np.array(ys)
discounted_rewards = discount_rewards(rewards)
advantages = (discounted_rewards - discounted_rewards.mean()) / discounted_rewards.std()
print(f'adv: {np.min(advantages):.2f}, {np.max(advantages):.2f}')
model.fit(Xs, ys, sample_weight=advantages, epochs=1, batch_size=1024)
reward_sum = sum(rewards)
reward_sums.append(reward_sum)
avg_reward_sum = sum(reward_sums[-50:]) / len(reward_sums[-50:])
print(f'Episode {ep} -- reward_sum: {reward_sum}, avg_reward_sum: {avg_reward_sum}\n')
if ep % 20 == 0:
model.save_weights('params/model3.h5')
break
def test():
global env
env = gym.wrappers.Monitor(env, './tmp', video_callable=lambda ep_id: True, force=True)
model.load_weights('params/model3.h5')
reward_sum = 0
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
#ya = np.argmax(action_probs[0])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
reward_sum += reward
if reward != 0:
print(f't: {t} -- reward: {reward}')
if done:
print(f't: {t} -- reward_sum: {reward_sum}')
break
def main():
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
test()
else:
train()
if __name__ == '__main__':
main()
| <filename>pong-pg.py
# Copyright (c) 2019 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import numpy as np
import gym
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Lambda, Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from tensorflow.keras.optimizers import RMSprop, Adam
import tensorflow.keras.backend as K
env = gym.make('PongDeterministic-v4')
UP_ACTION = 2
DOWN_ACTION = 3
ACTIONS = [UP_ACTION, DOWN_ACTION]
# Neural net model takes the state and outputs action and value for that state
model = Sequential([
Dense(512, activation='elu', input_shape=(2*6400,)),
Dense(len(ACTIONS), activation='softmax'),
])
model.compile(optimizer=RMSprop(1e-4), loss='sparse_categorical_crossentropy')
gamma = 0.99
# preprocess frames
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ """
if I is None: return np.zeros((6400,))
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ """
discounted_r = np.zeros((len(r),))
running_add = 0
for t in reversed(range(0, len(r))):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def train():
reward_sums = []
for ep in range(2000):
Xs, ys, rewards = [], [], []
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
Xs.append(x)
ys.append(ya)
rewards.append(reward)
#if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}')
if done:
Xs = np.array(Xs)
ys = np.array(ys)
discounted_rewards = discount_rewards(rewards)
advantages = (discounted_rewards - discounted_rewards.mean()) / discounted_rewards.std()
print(f'adv: {np.min(advantages):.2f}, {np.max(advantages):.2f}')
model.fit(Xs, ys, sample_weight=advantages, epochs=1, batch_size=1024)
reward_sum = sum(rewards)
reward_sums.append(reward_sum)
avg_reward_sum = sum(reward_sums[-50:]) / len(reward_sums[-50:])
print(f'Episode {ep} -- reward_sum: {reward_sum}, avg_reward_sum: {avg_reward_sum}\n')
if ep % 20 == 0:
model.save_weights('params/model3.h5')
break
def test():
global env
env = gym.wrappers.Monitor(env, './tmp', video_callable=lambda ep_id: True, force=True)
model.load_weights('params/model3.h5')
reward_sum = 0
prev_obs, obs = None, env.reset()
for t in range(99000):
x = np.hstack([prepro(obs), prepro(prev_obs)])
prev_obs = obs
action_probs = model.predict(x[None, :])
#ya = np.argmax(action_probs[0])
ya = np.random.choice(len(ACTIONS), p=action_probs[0])
action = ACTIONS[ya]
obs, reward, done, _ = env.step(action)
reward_sum += reward
if reward != 0:
print(f't: {t} -- reward: {reward}')
if done:
print(f't: {t} -- reward_sum: {reward_sum}')
break
def main():
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
test()
else:
train()
if __name__ == '__main__':
main()
| en | 0.806982 | # Copyright (c) 2019 <NAME>. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Neural net model takes the state and outputs action and value for that state # preprocess frames prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector. http://karpathy.github.io/2016/05/31/rl/ # crop # downsample by factor of 2 # erase background (background type 1) # erase background (background type 2) # everything else (paddles, ball) just set to 1 take 1D float array of rewards and compute discounted reward. http://karpathy.github.io/2016/05/31/rl/ # reset the sum, since this was a game boundary (pong specific!) #if reward != 0: print(f'Episode {ep} -- step: {t}, ya: {ya}, reward: {reward}') #ya = np.argmax(action_probs[0]) | 2.413043 | 2 |
dexp/cli/dexp_commands/crop.py | JoOkuma/dexp | 0 | 8136 | import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
| import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
| en | 0.313179 | # , help='input path' # , help='output path' # # | 2.001261 | 2 |
morse_DMT/write_dipha_file_3d_revise.py | YinuoJin/DMT_loss | 1 | 8137 | import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| import sys
from matplotlib import image as mpimg
import numpy as np
import os
DIPHA_CONST = 8067171840
DIPHA_IMAGE_TYPE_CONST = 1
DIM = 3
input_dir = os.path.join(os.getcwd(), sys.argv[1])
dipha_output_filename = sys.argv[2]
vert_filename = sys.argv[3]
input_filenames = [name
for name in os.listdir(input_dir)
if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")]
input_filenames.sort()
image = mpimg.imread(os.path.join(input_dir, input_filenames[0]))
nx, ny = image.shape
del image
nz = len(input_filenames)
print(nx, ny, nz)
#sys.exit()
im_cube = np.zeros([nx, ny, nz])
i = 0
for name in input_filenames:
sys.stdout.flush()
print(i, name)
fileName = input_dir + "/" + name
im_cube[:, :, i] = mpimg.imread(fileName)
i = i + 1
print('writing dipha output...')
with open(dipha_output_filename, 'wb') as output_file:
# this is needed to verify you are giving dipha a dipha file
np.int64(DIPHA_CONST).tofile(output_file)
# this tells dipha that we are giving an image as input
np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file)
# number of points
np.int64(nx * ny * nz).tofile(output_file)
# dimension
np.int64(DIM).tofile(output_file)
# pixels in each dimension
np.int64(nx).tofile(output_file)
np.int64(ny).tofile(output_file)
np.int64(nz).tofile(output_file)
# pixel values
for k in range(nz):
sys.stdout.flush()
print('dipha - working on image', k)
for j in range(ny):
for i in range(nx):
val = int(-im_cube[i, j, k]*255)
'''
if val != 0 and val != -1:
print('val check:', val)
'''
np.float64(val).tofile(output_file)
output_file.close()
print('writing vert file')
with open(vert_filename, 'w') as vert_file:
for k in range(nz):
sys.stdout.flush()
print('verts - working on image', k)
for j in range(ny):
for i in range(nx):
vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n')
vert_file.close()
print(nx, ny, nz)
| en | 0.803509 | #sys.exit() # this is needed to verify you are giving dipha a dipha file # this tells dipha that we are giving an image as input # number of points # dimension # pixels in each dimension # pixel values if val != 0 and val != -1:
print('val check:', val) | 2.349253 | 2 |
microservices/validate/tools/validates.py | clodonil/pipeline_aws_custom | 0 | 8138 | """
Tools para validar o arquivo template recebido do SQS
"""
class Validate:
def __init__(self):
pass
def check_validate_yml(self, template):
"""
valida se o arquivo yml é valido
"""
if template:
return True
else:
return False
def check_yml_struct(self, template):
"""
Valida se a estrutura do yml é valido
"""
if template:
return True
else:
return False
def check_template_exist(self, template):
"""
Valida se o template informado no arquivo yml existe
"""
if template:
return True
else:
return False
def check_callback_protocol_endpoint(self, template):
"""
validar se o protocolo e endpoint são validos
"""
return True
def check_template(self, template):
if self.check_validate_yml(template) \
and self.check_yml_struct(template) \
and self.check_template_exist(template) \
and self.check_callback_protocol_endpoint(template):
msg = {"status": True}
return msg
else:
msg = {'status': False, 'message': 'problema no arquivo yml'}
return msg
def change_yml_to_json(content):
try:
template_json = yaml.safe_load(content)
return template_json
except yaml.YAMLError as error:
return {"message": str(error)}
| """
Tools para validar o arquivo template recebido do SQS
"""
class Validate:
def __init__(self):
pass
def check_validate_yml(self, template):
"""
valida se o arquivo yml é valido
"""
if template:
return True
else:
return False
def check_yml_struct(self, template):
"""
Valida se a estrutura do yml é valido
"""
if template:
return True
else:
return False
def check_template_exist(self, template):
"""
Valida se o template informado no arquivo yml existe
"""
if template:
return True
else:
return False
def check_callback_protocol_endpoint(self, template):
"""
validar se o protocolo e endpoint são validos
"""
return True
def check_template(self, template):
if self.check_validate_yml(template) \
and self.check_yml_struct(template) \
and self.check_template_exist(template) \
and self.check_callback_protocol_endpoint(template):
msg = {"status": True}
return msg
else:
msg = {'status': False, 'message': 'problema no arquivo yml'}
return msg
def change_yml_to_json(content):
try:
template_json = yaml.safe_load(content)
return template_json
except yaml.YAMLError as error:
return {"message": str(error)}
| pt | 0.638276 | Tools para validar o arquivo template recebido do SQS valida se o arquivo yml é valido Valida se a estrutura do yml é valido Valida se o template informado no arquivo yml existe validar se o protocolo e endpoint são validos | 2.877774 | 3 |
MetropolisMCMC.py | unrealTOM/MC | 4 | 8139 | import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import math
def normal(mu,sigma,x): #normal distribution
return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2)
def eval(x):
return normal(-4,1,x) + normal(4,1,x)
#return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2)
def ref(x_star,x): #normal distribution
return normal(x,10,x_star)
N = [100,500,1000,5000]
fig = plt.figure()
for i in range(4):
X = np.array([])
x = 0.1 #initialize x0 to be 0.1
for j in range(N[i]):
u = np.random.rand()
x_star = np.random.normal(x,10)
A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x))
if u < A:
x = x_star
X=np.hstack((X,x))
ax = fig.add_subplot(2,2,i+1)
ax.hist(X,bins=100,density=True)
x = np.linspace(-10,20,5000)
#ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant
ax.plot(x,eval(x)/2) #2 approximates the normalizing constant
ax.set_ylim(0,0.35)
ax.text(-9,0.25,'I=%d'%N[i])
fig.suptitle('Metropolis_Hastings for MCMC(Normal)')
#fig.suptitle('Metropolis_Hastings for MCMC(Exp.)')
plt.savefig('MetropolisNormal.png',dpi=100)
#plt.savefig('MetropolisExp.png',dpi=100)
plt.show()
| en | 0.376885 | #normal distribution #return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2) #normal distribution #initialize x0 to be 0.1 #*q(x,x_star)/p(x)/q(x_star,x)) #ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant #2 approximates the normalizing constant #fig.suptitle('Metropolis_Hastings for MCMC(Exp.)') #plt.savefig('MetropolisExp.png',dpi=100) | 3.176049 | 3 |
gfwlist/gen.py | lipeijian/shadowsocks-android | 137 | 8140 | <reponame>lipeijian/shadowsocks-android
#!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
china_list_set.add(IPy.IP(line))
# 输出结果
for ip in china_list_set:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main()
| #!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
china_list_set.add(IPy.IP(line))
# 输出结果
for ip in china_list_set:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main() | ja | 0.203684 | #!/usr/bin/python # -*- encoding: utf8 -*- # 输出结果 | 3.262187 | 3 |
Specialization/Personal/SortHours.py | lastralab/Statistics | 3 | 8141 | name = "mail.txt"
counts = dict()
handle = open(name)
for line in handle:
line = line.rstrip()
if line == '':
continue
words = line.split()
if words[0] == 'From':
counts[words[5][:2]] = counts.get(words[5][:2], 0) + 1
tlist = list()
for key, value in counts.items():
newtup = (key, value)
tlist.append(newtup)
tlist.sort()
for key, value in tlist:
print key, value
| name = "mail.txt"
counts = dict()
handle = open(name)
for line in handle:
line = line.rstrip()
if line == '':
continue
words = line.split()
if words[0] == 'From':
counts[words[5][:2]] = counts.get(words[5][:2], 0) + 1
tlist = list()
for key, value in counts.items():
newtup = (key, value)
tlist.append(newtup)
tlist.sort()
for key, value in tlist:
print key, value
| none | 1 | 3.238135 | 3 |
|
core/simulators/carla_scenario_simulator.py | RangiLyu/DI-drive | 0 | 8142 | import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
class CarlaScenarioSimulator(CarlaSimulator):
"""
Carla simualtor used to run scenarios.
The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map
according to it. The sensors and running status are set as common Carla simulator.
When created, it will set up Carla client due to arguments, set simulator basic configurations used all around
its lifetime, and set some default running configurations.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
- scenario_manager (Any): Scenario Manager instance used to get running state.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
resolution=1.0,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: int = 9050,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla scenario simulator.
"""
super().__init__(cfg, client, host, port, tm_port, timeout)
self._resolution = self._cfg.resolution
self._scenario = None
self._start_scenario = False
self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout)
self._criteria_status = dict()
def init(self, config: Any) -> None:
"""
Init simulator episode with provided args.
This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be
a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario
manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the
route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians
dut to provided args and default configs, and reset running status. If no collision happens when creating
actors, the init will end and return.
:Arguments:
- config (Any): Scenario configuration instance, containing information about the scenarios.
"""
self._scenario_config = config
self.clean_up()
self._set_town(config.town)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap":
print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name))
print("WARNING: This scenario requires to use map: {}".format(config.town))
print("[SIMULATOR] Preparing scenario: " + config.name)
config.n_vehicles = self._n_vehicles
config.disable_two_wheels = self._disable_two_wheels
if "RouteScenario" in config.name:
self._scenario = RouteScenario(
world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution
)
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._scenario.route_timeout
else:
# select scenario
if config.type in SCENARIO_CLASS_DICT:
scenario_class = SCENARIO_CLASS_DICT[config.type]
ego_vehicles = []
for vehicle in config.ego_vehicles:
ego_vehicles.append(
CarlaDataProvider.request_new_actor(
vehicle.model,
vehicle.transform,
vehicle.rolename,
True,
color=vehicle.color,
actor_category=vehicle.category
)
)
self._scenario = scenario_class(
world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug
)
else:
raise RuntimeError("Scenario '{}' not support!".format(config.type))
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True)
self._total_distance = self._planner.distance_to_goal
self._spawn_pedestrians()
if self._ready():
if self._debug:
self._count_actors()
break
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and scenarios, update informations for all sensors and measurement.
"""
if not self._start_scenario:
self._manager.start_scenario()
self._start_scenario = True
self._tick += 1
world_snapshot = self._world.get_snapshot()
timestamp = world_snapshot.timestamp
self._timestamp = timestamp.elapsed_seconds
self._manager.tick_scenario(timestamp)
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def get_criteria(self) -> List:
"""
Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted.
:Returns:
List: Criteria list of scenario.
"""
criterion_list = self._manager.analyze_tick()
for name, actor_id, result, actual_value, expected_value in criterion_list:
if actor_id == self._hero_actor.id:
self._criteria_status.update({name: [result, actual_value, expected_value]})
return self._criteria_status
def end_scenario(self) -> None:
"""
End current scenario. Must be called before ending an episode.
"""
if self._start_scenario:
self._manager.end_scenario()
self._start_scenario = False
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider,
and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla
client to start next episode.
"""
if self._manager is not None:
self._manager.clean_up()
self._criteria_status.clear()
super().clean_up()
@property
def scenario_manager(self) -> Any:
return self._manager
| import os
from typing import Any, Dict, List, Optional
import carla
from core.simulators.carla_simulator import CarlaSimulator
from core.simulators.carla_data_provider import CarlaDataProvider
from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT
from .srunner.scenariomanager.scenario_manager import ScenarioManager
class CarlaScenarioSimulator(CarlaSimulator):
"""
Carla simualtor used to run scenarios.
The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map
according to it. The sensors and running status are set as common Carla simulator.
When created, it will set up Carla client due to arguments, set simulator basic configurations used all around
its lifetime, and set some default running configurations.
If no traffic manager port is provided, it will find random free port in system.
:Arguments:
- cfg (Dict): Config Dict.
- client (carla.Client, optional): Already established Carla client. Defaults to None.
- host (str, optional): TCP host Carla client link to. Defaults to 'localhost'.
- port (int, optional): TCP port Carla client link to. Defaults to 9000.
- tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None.
- timeout (float, optional): Carla client link timeout. Defaults to 10.0.
:Interfaces:
init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up
:Properties:
- town_name (str): Current town name.
- hero_player (carla.Actor): hero actor in simulation.
- collided (bool): Whether collided in current episode.
- end_distance (float): Distance to target in current frame.
- end_timeout (float): Timeout for entire route provided by planner.
- total_diatance (float): Dictance for entire route provided by planner.
- scenario_manager (Any): Scenario Manager instance used to get running state.
"""
config = dict(
town='Town01',
weather='random',
sync_mode=True,
delta_seconds=0.1,
no_rendering=False,
auto_pilot=False,
n_vehicles=0,
n_pedestrians=0,
disable_two_wheels=False,
col_threshold=400,
resolution=1.0,
waypoint_num=20,
obs=list(),
planner=dict(),
aug=None,
verbose=True,
debug=False,
)
def __init__(
self,
cfg: Dict,
client: Optional[carla.Client] = None,
host: str = 'localhost',
port: int = 9000,
tm_port: int = 9050,
timeout: float = 10.0,
**kwargs
) -> None:
"""
Init Carla scenario simulator.
"""
super().__init__(cfg, client, host, port, tm_port, timeout)
self._resolution = self._cfg.resolution
self._scenario = None
self._start_scenario = False
self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout)
self._criteria_status = dict()
def init(self, config: Any) -> None:
"""
Init simulator episode with provided args.
This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be
a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario
manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the
route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians
dut to provided args and default configs, and reset running status. If no collision happens when creating
actors, the init will end and return.
:Arguments:
- config (Any): Scenario configuration instance, containing information about the scenarios.
"""
self._scenario_config = config
self.clean_up()
self._set_town(config.town)
self._set_weather(self._weather)
self._blueprints = self._world.get_blueprint_library()
while True:
self.clean_up()
CarlaDataProvider.set_client(self._client)
CarlaDataProvider.set_world(self._world)
CarlaDataProvider.set_traffic_manager_port(self._tm.get_port())
if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap":
print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name))
print("WARNING: This scenario requires to use map: {}".format(config.town))
print("[SIMULATOR] Preparing scenario: " + config.name)
config.n_vehicles = self._n_vehicles
config.disable_two_wheels = self._disable_two_wheels
if "RouteScenario" in config.name:
self._scenario = RouteScenario(
world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution
)
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True)
self._total_distance = self._planner.distance_to_goal
self._end_timeout = self._scenario.route_timeout
else:
# select scenario
if config.type in SCENARIO_CLASS_DICT:
scenario_class = SCENARIO_CLASS_DICT[config.type]
ego_vehicles = []
for vehicle in config.ego_vehicles:
ego_vehicles.append(
CarlaDataProvider.request_new_actor(
vehicle.model,
vehicle.transform,
vehicle.rolename,
True,
color=vehicle.color,
actor_category=vehicle.category
)
)
self._scenario = scenario_class(
world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug
)
else:
raise RuntimeError("Scenario '{}' not support!".format(config.type))
self._hero_actor = self._scenario.ego_vehicles[0]
self._prepare_observations()
self._manager.load_scenario(self._scenario)
self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True)
self._total_distance = self._planner.distance_to_goal
self._spawn_pedestrians()
if self._ready():
if self._debug:
self._count_actors()
break
def run_step(self) -> None:
"""
Run one step simulation.
This will tick Carla world and scenarios, update informations for all sensors and measurement.
"""
if not self._start_scenario:
self._manager.start_scenario()
self._start_scenario = True
self._tick += 1
world_snapshot = self._world.get_snapshot()
timestamp = world_snapshot.timestamp
self._timestamp = timestamp.elapsed_seconds
self._manager.tick_scenario(timestamp)
if self._planner is not None:
self._planner.run_step()
self._collided = self._collision_sensor.collided
self._traffic_light_helper.tick()
if self._bev_wrapper is not None:
if CarlaDataProvider._hero_vehicle_route is not None:
self._bev_wrapper.tick()
def get_criteria(self) -> List:
"""
Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted.
:Returns:
List: Criteria list of scenario.
"""
criterion_list = self._manager.analyze_tick()
for name, actor_id, result, actual_value, expected_value in criterion_list:
if actor_id == self._hero_actor.id:
self._criteria_status.update({name: [result, actual_value, expected_value]})
return self._criteria_status
def end_scenario(self) -> None:
"""
End current scenario. Must be called before ending an episode.
"""
if self._start_scenario:
self._manager.end_scenario()
self._start_scenario = False
def clean_up(self) -> None:
"""
Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider,
and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla
client to start next episode.
"""
if self._manager is not None:
self._manager.clean_up()
self._criteria_status.clear()
super().clean_up()
@property
def scenario_manager(self) -> Any:
return self._manager
| en | 0.743983 | Carla simualtor used to run scenarios. The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map according to it. The sensors and running status are set as common Carla simulator. When created, it will set up Carla client due to arguments, set simulator basic configurations used all around its lifetime, and set some default running configurations. If no traffic manager port is provided, it will find random free port in system. :Arguments: - cfg (Dict): Config Dict. - client (carla.Client, optional): Already established Carla client. Defaults to None. - host (str, optional): TCP host Carla client link to. Defaults to 'localhost'. - port (int, optional): TCP port Carla client link to. Defaults to 9000. - tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None. - timeout (float, optional): Carla client link timeout. Defaults to 10.0. :Interfaces: init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up :Properties: - town_name (str): Current town name. - hero_player (carla.Actor): hero actor in simulation. - collided (bool): Whether collided in current episode. - end_distance (float): Distance to target in current frame. - end_timeout (float): Timeout for entire route provided by planner. - total_diatance (float): Dictance for entire route provided by planner. - scenario_manager (Any): Scenario Manager instance used to get running state. Init Carla scenario simulator. Init simulator episode with provided args. This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians dut to provided args and default configs, and reset running status. If no collision happens when creating actors, the init will end and return. :Arguments: - config (Any): Scenario configuration instance, containing information about the scenarios. # select scenario Run one step simulation. This will tick Carla world and scenarios, update informations for all sensors and measurement. Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted. :Returns: List: Criteria list of scenario. End current scenario. Must be called before ending an episode. Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider, and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla client to start next episode. | 2.524806 | 3 |
bin/run.py | Conengmo/python-empty-project | 0 | 8143 | import myproject
myproject.logs(show_level='debug')
myproject.mymod.do_something()
| import myproject
myproject.logs(show_level='debug')
myproject.mymod.do_something()
| none | 1 | 1.369227 | 1 |
|
development/simple_email.py | gerold-penz/python-simplemail | 16 | 8144 | <filename>development/simple_email.py
#!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
import simplemail
simplemail.Email(
smtp_server = "smtp.a1.net:25",
smtp_user = "xxx",
smtp_password = "<PASSWORD>",
use_tls = False,
from_address = "xxx",
to_address = "xxx",
subject = u"Really simple test with umlauts (öäüß)",
message = u"This is the message with umlauts (öäüß)",
).send()
print "Sent"
print
| <filename>development/simple_email.py
#!/usr/bin/env python
# coding: utf-8
# BEGIN --- required only for testing, remove in real world code --- BEGIN
import os
import sys
THISDIR = os.path.dirname(os.path.abspath(__file__))
APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir))
sys.path.insert(0, APPDIR)
# END --- required only for testing, remove in real world code --- END
import simplemail
simplemail.Email(
smtp_server = "smtp.a1.net:25",
smtp_user = "xxx",
smtp_password = "<PASSWORD>",
use_tls = False,
from_address = "xxx",
to_address = "xxx",
subject = u"Really simple test with umlauts (öäüß)",
message = u"This is the message with umlauts (öäüß)",
).send()
print "Sent"
print
| en | 0.660359 | #!/usr/bin/env python # coding: utf-8 # BEGIN --- required only for testing, remove in real world code --- BEGIN # END --- required only for testing, remove in real world code --- END | 2.795105 | 3 |
features/hdf_features.py | DerekYJC/bmi_python | 0 | 8145 | <reponame>DerekYJC/bmi_python
'''
HDF-saving features
'''
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os, sys
import subprocess
from riglib import calibrations, bmi
from riglib.bmi import extractor
from riglib.experiment import traits
import hdfwriter
class SaveHDF(object):
'''
Saves data from registered sources into tables in an HDF file
'''
def init(self):
'''
Secondary init function. See riglib.experiment.Experiment.init()
Prior to starting the task, this 'init' starts an HDFWriter sink.
'''
from riglib import sink
self.sinks = sink.sinks
self.h5file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
self.h5file.flush()
self.h5file.close()
self.hdf = sink.sinks.start(self.sink_class, filename=self.h5file.name)
super(SaveHDF, self).init()
@property
def sink_class(self):
'''
Specify the sink class as a function in case future descendant classes want to use a different type of sink
'''
return hdfwriter.HDFWriter
def run(self):
'''
Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running.
See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running
'''
try:
super(SaveHDF, self).run()
finally:
self.hdf.stop()
def join(self):
'''
Re-join any spawned process for cleanup
'''
self.hdf.join()
super(SaveHDF, self).join()
def set_state(self, condition, **kwargs):
'''
Save task state transitions to HDF
Parameters
----------
condition: string
Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task
Returns
-------
None
'''
self.hdf.sendMsg(condition)
super(SaveHDF, self).set_state(condition, **kwargs)
def record_annotation(self, msg):
""" Record a user-input annotation """
self.hdf.sendMsg("annotation: " + msg)
super(SaveHDF, self).record_annotation(msg)
print("Saved annotation to HDF: " + msg)
def get_h5_filename(self):
return self.h5file.name
def cleanup(self, database, saveid, **kwargs):
'''
See LogExperiment.cleanup for documentation
'''
super(SaveHDF, self).cleanup(database, saveid, **kwargs)
print("Beginning HDF file cleanup")
print("\tHDF data currently saved to temp file: %s" % self.h5file.name)
try:
print("\tRunning self.cleanup_hdf()")
self.cleanup_hdf()
except:
print("\n\n\n\n\nError cleaning up HDF file!")
import traceback
traceback.print_exc()
# this 'if' is needed because the remote procedure call to save_data doesn't like kwargs
dbname = kwargs['dbname'] if 'dbname' in kwargs else 'default'
if dbname == 'default':
database.save_data(self.h5file.name, "hdf", saveid)
else:
database.save_data(self.h5file.name, "hdf", saveid, dbname=dbname)
| '''
HDF-saving features
'''
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os, sys
import subprocess
from riglib import calibrations, bmi
from riglib.bmi import extractor
from riglib.experiment import traits
import hdfwriter
class SaveHDF(object):
'''
Saves data from registered sources into tables in an HDF file
'''
def init(self):
'''
Secondary init function. See riglib.experiment.Experiment.init()
Prior to starting the task, this 'init' starts an HDFWriter sink.
'''
from riglib import sink
self.sinks = sink.sinks
self.h5file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)
self.h5file.flush()
self.h5file.close()
self.hdf = sink.sinks.start(self.sink_class, filename=self.h5file.name)
super(SaveHDF, self).init()
@property
def sink_class(self):
'''
Specify the sink class as a function in case future descendant classes want to use a different type of sink
'''
return hdfwriter.HDFWriter
def run(self):
'''
Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running.
See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running
'''
try:
super(SaveHDF, self).run()
finally:
self.hdf.stop()
def join(self):
'''
Re-join any spawned process for cleanup
'''
self.hdf.join()
super(SaveHDF, self).join()
def set_state(self, condition, **kwargs):
'''
Save task state transitions to HDF
Parameters
----------
condition: string
Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task
Returns
-------
None
'''
self.hdf.sendMsg(condition)
super(SaveHDF, self).set_state(condition, **kwargs)
def record_annotation(self, msg):
""" Record a user-input annotation """
self.hdf.sendMsg("annotation: " + msg)
super(SaveHDF, self).record_annotation(msg)
print("Saved annotation to HDF: " + msg)
def get_h5_filename(self):
return self.h5file.name
def cleanup(self, database, saveid, **kwargs):
'''
See LogExperiment.cleanup for documentation
'''
super(SaveHDF, self).cleanup(database, saveid, **kwargs)
print("Beginning HDF file cleanup")
print("\tHDF data currently saved to temp file: %s" % self.h5file.name)
try:
print("\tRunning self.cleanup_hdf()")
self.cleanup_hdf()
except:
print("\n\n\n\n\nError cleaning up HDF file!")
import traceback
traceback.print_exc()
# this 'if' is needed because the remote procedure call to save_data doesn't like kwargs
dbname = kwargs['dbname'] if 'dbname' in kwargs else 'default'
if dbname == 'default':
database.save_data(self.h5file.name, "hdf", saveid)
else:
database.save_data(self.h5file.name, "hdf", saveid, dbname=dbname) | en | 0.788463 | HDF-saving features Saves data from registered sources into tables in an HDF file Secondary init function. See riglib.experiment.Experiment.init() Prior to starting the task, this 'init' starts an HDFWriter sink. Specify the sink class as a function in case future descendant classes want to use a different type of sink Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running. See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running Re-join any spawned process for cleanup Save task state transitions to HDF Parameters ---------- condition: string Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task Returns ------- None Record a user-input annotation See LogExperiment.cleanup for documentation # this 'if' is needed because the remote procedure call to save_data doesn't like kwargs | 2.43676 | 2 |
common/irma/common/exceptions.py | vaginessa/irma | 0 | 8146 | <reponame>vaginessa/irma
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
class IrmaDependencyError(Exception):
"""Error caused by a missing dependency."""
pass
class IrmaMachineManagerError(Exception):
"""Error on a machine manager."""
pass
class IrmaMachineError(Exception):
"""Error on a machine."""
pass
class IrmaAdminError(Exception):
"""Error in admin part."""
pass
class IrmaDatabaseError(Exception):
"""Error on a database manager."""
pass
class IrmaCoreError(Exception):
"""Error in core parts (Db, Ftp, Celery..)"""
pass
class IrmaDatabaseResultNotFound(IrmaDatabaseError):
"""A database result was required but none was found."""
pass
class IrmaFileSystemError(IrmaDatabaseError):
"""Nothing corresponding to the request has been found in the database."""
pass
class IrmaConfigurationError(IrmaCoreError):
"""Error wrong configuration."""
pass
class IrmaFtpError(IrmaCoreError):
"""Error on ftp manager."""
pass
class IrmaFTPSError(IrmaFtpError):
"""Error on ftp/tls manager."""
pass
class IrmaSFTPError(IrmaFtpError):
"""Error on sftp manager."""
pass
class IrmaTaskError(IrmaCoreError):
"""Error while processing celery tasks."""
pass
class IrmaLockError(Exception):
"""Error for the locks on db content (already taken)"""
pass
class IrmaLockModeError(Exception):
"""Error for the mode of the locks (doesn't exist)"""
pass
class IrmaValueError(Exception):
"""Error for the parameters passed to the functions"""
pass
| #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
class IrmaDependencyError(Exception):
"""Error caused by a missing dependency."""
pass
class IrmaMachineManagerError(Exception):
"""Error on a machine manager."""
pass
class IrmaMachineError(Exception):
"""Error on a machine."""
pass
class IrmaAdminError(Exception):
"""Error in admin part."""
pass
class IrmaDatabaseError(Exception):
"""Error on a database manager."""
pass
class IrmaCoreError(Exception):
"""Error in core parts (Db, Ftp, Celery..)"""
pass
class IrmaDatabaseResultNotFound(IrmaDatabaseError):
"""A database result was required but none was found."""
pass
class IrmaFileSystemError(IrmaDatabaseError):
"""Nothing corresponding to the request has been found in the database."""
pass
class IrmaConfigurationError(IrmaCoreError):
"""Error wrong configuration."""
pass
class IrmaFtpError(IrmaCoreError):
"""Error on ftp manager."""
pass
class IrmaFTPSError(IrmaFtpError):
"""Error on ftp/tls manager."""
pass
class IrmaSFTPError(IrmaFtpError):
"""Error on sftp manager."""
pass
class IrmaTaskError(IrmaCoreError):
"""Error while processing celery tasks."""
pass
class IrmaLockError(Exception):
"""Error for the locks on db content (already taken)"""
pass
class IrmaLockModeError(Exception):
"""Error for the mode of the locks (doesn't exist)"""
pass
class IrmaValueError(Exception):
"""Error for the parameters passed to the functions"""
pass | en | 0.860838 | # # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. Error caused by a missing dependency. Error on a machine manager. Error on a machine. Error in admin part. Error on a database manager. Error in core parts (Db, Ftp, Celery..) A database result was required but none was found. Nothing corresponding to the request has been found in the database. Error wrong configuration. Error on ftp manager. Error on ftp/tls manager. Error on sftp manager. Error while processing celery tasks. Error for the locks on db content (already taken) Error for the mode of the locks (doesn't exist) Error for the parameters passed to the functions | 1.957827 | 2 |
tf_crnn/libs/infer.py | sunmengnan/city_brain | 0 | 8147 | <reponame>sunmengnan/city_brain
import time
import os
import math
import numpy as np
from libs import utils
from libs.img_dataset import ImgDataset
from nets.crnn import CRNN
from nets.cnn.paper_cnn import PaperCNN
import shutil
def calculate_accuracy(predicts, labels):
"""
:param predicts: encoded predict result
:param labels: ground true label
:return: accuracy
"""
assert len(predicts) == len(labels)
correct_count = 0
for i, p_label in enumerate(predicts):
if p_label == labels[i]:
correct_count += 1
acc = correct_count / len(predicts)
return acc, correct_count
def calculate_edit_distance_mean(edit_distences):
"""
排除了 edit_distance == 0 的值计算编辑距离的均值
:param edit_distences:
:return:
"""
data = np.array(edit_distences)
data = data[data != 0]
if len(data) == 0:
return 0
return np.mean(data)
def validation(sess, feeds, fetches, dataset, converter, result_dir, name,
step=None, print_batch_info=False, copy_failed=False):
"""
Save file name: {acc}_{step}.txt
:param sess: tensorflow session
:param model: crnn network
:param result_dir:
:param name: val, test, infer. used to create sub dir in result_dir
:return:
"""
sess.run(dataset.init_op)
img_paths = []
predicts = []
trimed_predicts = []
labels = []
trimed_labels = []
edit_distances = []
total_batch_time = 0
for batch in range(dataset.num_batches):
img_batch, widths, label_batch, batch_labels, batch_img_paths = dataset.get_next_batch(sess)
if len(batch_labels) == 0:
continue
batch_start_time = time.time()
feed = {feeds['inputs']: img_batch,
feeds['labels']: label_batch,
feeds['sequence_length']: PaperCNN.get_sequence_lengths(widths),
feeds['is_training']: False}
try:
batch_predicts, edit_distance, batch_edit_distances = sess.run(fetches, feed)
except Exception:
print(batch_labels)
continue
batch_predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in batch_predicts]
trimed_batch_predicts = [utils.remove_all_symbols(txt) for txt in batch_predicts]
trimed_batch_labels = [utils.remove_all_symbols(txt) for txt in batch_labels]
img_paths.extend(batch_img_paths)
predicts.extend(batch_predicts)
labels.extend(batch_labels)
trimed_predicts.extend(trimed_batch_predicts)
trimed_labels.extend(trimed_batch_labels)
edit_distances.extend(batch_edit_distances)
acc, correct_count = calculate_accuracy(batch_predicts, batch_labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_batch_predicts, trimed_batch_labels)
batch_time = time.time() - batch_start_time
total_batch_time += batch_time
if print_batch_info:
print("{:.03f}s [{}/{}] acc: {:.03f}({}/{}), edit_distance: {:.03f}, trim_acc {:.03f}({}/{})"
.format(batch_time, batch, dataset.num_batches,
acc, correct_count, dataset.batch_size,
edit_distance,
trimed_acc, trimed_correct_count, dataset.batch_size))
acc, correct_count = calculate_accuracy(predicts, labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_predicts, trimed_labels)
edit_distance_mean = calculate_edit_distance_mean(edit_distances)
total_edit_distance = sum(edit_distances)
acc_str = "Accuracy: {:.03f} ({}/{}), Trimed Accuracy: {:.03f} ({}/{})" \
"Total edit distance: {:.03f}, " \
"Average edit distance: {:.03f}, Average batch time: {:.03f}" \
.format(acc, correct_count, dataset.size,
trimed_acc, trimed_correct_count, dataset.size,
total_edit_distance, edit_distance_mean, total_batch_time / dataset.num_batches)
print(acc_str)
save_dir = os.path.join(result_dir, name)
utils.check_dir_exist(save_dir)
result_file_path = save_txt_result(save_dir, acc, step, labels, predicts, 'acc',
edit_distances, acc_str)
save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances,
acc_str, only_failed=True)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances, only_failed=True)
save_txt_4_analyze(save_dir, labels, predicts, 'acc', step)
save_txt_4_analyze(save_dir, trimed_labels, trimed_predicts, 'tacc', step)
# Copy image not all match to a dir
# TODO: we will only save failed imgs for acc
if copy_failed:
failed_infer_img_dir = result_file_path[:-4] + "_failed"
if os.path.exists(failed_infer_img_dir) and os.path.isdir(failed_infer_img_dir):
shutil.rmtree(failed_infer_img_dir)
utils.check_dir_exist(failed_infer_img_dir)
failed_image_indices = []
for i, val in enumerate(edit_distances):
if val != 0:
failed_image_indices.append(i)
for i in failed_image_indices:
img_path = img_paths[i]
img_name = img_path.split("/")[-1]
dst_path = os.path.join(failed_infer_img_dir, img_name)
shutil.copyfile(img_path, dst_path)
failed_infer_result_file_path = os.path.join(failed_infer_img_dir, "result.txt")
with open(failed_infer_result_file_path, 'w', encoding='utf-8') as f:
for i in failed_image_indices:
p_label = predicts[i]
t_label = labels[i]
f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
return acc, trimed_acc, edit_distance_mean, total_edit_distance, correct_count, trimed_correct_count
def save_txt_4_analyze(save_dir, labels, predicts, acc_type, step):
"""
把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计
"""
txt_path = os.path.join(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type))
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
f.write("{}__$__{}\n".format(t_label, p_label))
def save_txt_result(save_dir, acc, step, labels, predicts, acc_type,
edit_distances=None, acc_str=None, only_failed=False):
"""
:param acc_type: 'acc' or 'tacc'
:return:
"""
failed_suffix = ''
if only_failed:
failed_suffix = 'failed'
if step is not None:
txt_path = os.path.join(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix))
else:
txt_path = os.path.join(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix))
print("Write result to %s" % txt_path)
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
all_match = (t_label == p_label)
if only_failed and all_match:
continue
# f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("all match: {}\n".format(1 if all_match else 0))
if edit_distances:
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
if acc_str:
f.write(acc_str + "\n")
return txt_path
| import time
import os
import math
import numpy as np
from libs import utils
from libs.img_dataset import ImgDataset
from nets.crnn import CRNN
from nets.cnn.paper_cnn import PaperCNN
import shutil
def calculate_accuracy(predicts, labels):
"""
:param predicts: encoded predict result
:param labels: ground true label
:return: accuracy
"""
assert len(predicts) == len(labels)
correct_count = 0
for i, p_label in enumerate(predicts):
if p_label == labels[i]:
correct_count += 1
acc = correct_count / len(predicts)
return acc, correct_count
def calculate_edit_distance_mean(edit_distences):
"""
排除了 edit_distance == 0 的值计算编辑距离的均值
:param edit_distences:
:return:
"""
data = np.array(edit_distences)
data = data[data != 0]
if len(data) == 0:
return 0
return np.mean(data)
def validation(sess, feeds, fetches, dataset, converter, result_dir, name,
step=None, print_batch_info=False, copy_failed=False):
"""
Save file name: {acc}_{step}.txt
:param sess: tensorflow session
:param model: crnn network
:param result_dir:
:param name: val, test, infer. used to create sub dir in result_dir
:return:
"""
sess.run(dataset.init_op)
img_paths = []
predicts = []
trimed_predicts = []
labels = []
trimed_labels = []
edit_distances = []
total_batch_time = 0
for batch in range(dataset.num_batches):
img_batch, widths, label_batch, batch_labels, batch_img_paths = dataset.get_next_batch(sess)
if len(batch_labels) == 0:
continue
batch_start_time = time.time()
feed = {feeds['inputs']: img_batch,
feeds['labels']: label_batch,
feeds['sequence_length']: PaperCNN.get_sequence_lengths(widths),
feeds['is_training']: False}
try:
batch_predicts, edit_distance, batch_edit_distances = sess.run(fetches, feed)
except Exception:
print(batch_labels)
continue
batch_predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in batch_predicts]
trimed_batch_predicts = [utils.remove_all_symbols(txt) for txt in batch_predicts]
trimed_batch_labels = [utils.remove_all_symbols(txt) for txt in batch_labels]
img_paths.extend(batch_img_paths)
predicts.extend(batch_predicts)
labels.extend(batch_labels)
trimed_predicts.extend(trimed_batch_predicts)
trimed_labels.extend(trimed_batch_labels)
edit_distances.extend(batch_edit_distances)
acc, correct_count = calculate_accuracy(batch_predicts, batch_labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_batch_predicts, trimed_batch_labels)
batch_time = time.time() - batch_start_time
total_batch_time += batch_time
if print_batch_info:
print("{:.03f}s [{}/{}] acc: {:.03f}({}/{}), edit_distance: {:.03f}, trim_acc {:.03f}({}/{})"
.format(batch_time, batch, dataset.num_batches,
acc, correct_count, dataset.batch_size,
edit_distance,
trimed_acc, trimed_correct_count, dataset.batch_size))
acc, correct_count = calculate_accuracy(predicts, labels)
trimed_acc, trimed_correct_count = calculate_accuracy(trimed_predicts, trimed_labels)
edit_distance_mean = calculate_edit_distance_mean(edit_distances)
total_edit_distance = sum(edit_distances)
acc_str = "Accuracy: {:.03f} ({}/{}), Trimed Accuracy: {:.03f} ({}/{})" \
"Total edit distance: {:.03f}, " \
"Average edit distance: {:.03f}, Average batch time: {:.03f}" \
.format(acc, correct_count, dataset.size,
trimed_acc, trimed_correct_count, dataset.size,
total_edit_distance, edit_distance_mean, total_batch_time / dataset.num_batches)
print(acc_str)
save_dir = os.path.join(result_dir, name)
utils.check_dir_exist(save_dir)
result_file_path = save_txt_result(save_dir, acc, step, labels, predicts, 'acc',
edit_distances, acc_str)
save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances,
acc_str, only_failed=True)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances)
save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc',
edit_distances, only_failed=True)
save_txt_4_analyze(save_dir, labels, predicts, 'acc', step)
save_txt_4_analyze(save_dir, trimed_labels, trimed_predicts, 'tacc', step)
# Copy image not all match to a dir
# TODO: we will only save failed imgs for acc
if copy_failed:
failed_infer_img_dir = result_file_path[:-4] + "_failed"
if os.path.exists(failed_infer_img_dir) and os.path.isdir(failed_infer_img_dir):
shutil.rmtree(failed_infer_img_dir)
utils.check_dir_exist(failed_infer_img_dir)
failed_image_indices = []
for i, val in enumerate(edit_distances):
if val != 0:
failed_image_indices.append(i)
for i in failed_image_indices:
img_path = img_paths[i]
img_name = img_path.split("/")[-1]
dst_path = os.path.join(failed_infer_img_dir, img_name)
shutil.copyfile(img_path, dst_path)
failed_infer_result_file_path = os.path.join(failed_infer_img_dir, "result.txt")
with open(failed_infer_result_file_path, 'w', encoding='utf-8') as f:
for i in failed_image_indices:
p_label = predicts[i]
t_label = labels[i]
f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
return acc, trimed_acc, edit_distance_mean, total_edit_distance, correct_count, trimed_correct_count
def save_txt_4_analyze(save_dir, labels, predicts, acc_type, step):
"""
把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计
"""
txt_path = os.path.join(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type))
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
f.write("{}__$__{}\n".format(t_label, p_label))
def save_txt_result(save_dir, acc, step, labels, predicts, acc_type,
edit_distances=None, acc_str=None, only_failed=False):
"""
:param acc_type: 'acc' or 'tacc'
:return:
"""
failed_suffix = ''
if only_failed:
failed_suffix = 'failed'
if step is not None:
txt_path = os.path.join(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix))
else:
txt_path = os.path.join(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix))
print("Write result to %s" % txt_path)
with open(txt_path, 'w', encoding='utf-8') as f:
for i, p_label in enumerate(predicts):
t_label = labels[i]
all_match = (t_label == p_label)
if only_failed and all_match:
continue
# f.write("{}\n".format(img_paths[i]))
f.write("input: {:17s} length: {}\n".format(t_label, len(t_label)))
f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label)))
f.write("all match: {}\n".format(1 if all_match else 0))
if edit_distances:
f.write("edit distance: {}\n".format(edit_distances[i]))
f.write('-' * 30 + '\n')
if acc_str:
f.write(acc_str + "\n")
return txt_path | en | 0.413463 | :param predicts: encoded predict result :param labels: ground true label :return: accuracy 排除了 edit_distance == 0 的值计算编辑距离的均值 :param edit_distences: :return: Save file name: {acc}_{step}.txt :param sess: tensorflow session :param model: crnn network :param result_dir: :param name: val, test, infer. used to create sub dir in result_dir :return: # Copy image not all match to a dir # TODO: we will only save failed imgs for acc 把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计 :param acc_type: 'acc' or 'tacc' :return: # f.write("{}\n".format(img_paths[i])) | 2.303129 | 2 |
Day 2/Day_2_Python.py | giTan7/30-Days-Of-Code | 1 | 8148 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
tip = (meal_cost * tip_percent)/100
tax = (meal_cost * tax_percent)/100
print(int(meal_cost + tip + tax + 0.5))
# We add 0.5 because the float should be rounded to the nearest integer
if __name__ == '__main__':
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
solve(meal_cost, tip_percent, tax_percent)
# Time complexity: O(1)
# Space complexity: O(1)
| #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
tip = (meal_cost * tip_percent)/100
tax = (meal_cost * tax_percent)/100
print(int(meal_cost + tip + tax + 0.5))
# We add 0.5 because the float should be rounded to the nearest integer
if __name__ == '__main__':
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
solve(meal_cost, tip_percent, tax_percent)
# Time complexity: O(1)
# Space complexity: O(1)
| en | 0.717038 | #!/bin/python3 # Complete the solve function below. # We add 0.5 because the float should be rounded to the nearest integer # Time complexity: O(1) # Space complexity: O(1) | 3.974538 | 4 |
modules/templates/RLPPTM/tools/mis.py | nursix/rlpptm | 1 | 8149 | # -*- coding: utf-8 -*-
#
# Helper Script for Mass-Invitation of Participant Organisations
#
# RLPPTM Template Version 1.0
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py
#
import os
import sys
from core import s3_format_datetime
from templates.RLPPTM.config import SCHOOLS
from templates.RLPPTM.helpers import InviteUserOrg
# Batch limit (set to False to disable)
BATCH_LIMIT = 250
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
log = None
def info(msg):
sys.stderr.write("%s" % msg)
if log:
log.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
if log:
log.write("%s\n" % msg)
# Load models for tables
otable = s3db.org_organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
utable = s3db.auth_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
ctable = s3db.pr_contact
timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S")
LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp)
# -----------------------------------------------------------------------------
# Invite organisations
#
if not failed:
try:
with open(LOGFILE, "w", encoding="utf-8") as logfile:
log = logfile
join = [mtable.on((mtable.organisation_id == otable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == SCHOOLS) & \
(gtable.deleted == False)),
]
query = (otable.deleted == False)
organisations = db(query).select(otable.id,
otable.pe_id,
otable.name,
join = join,
orderby = otable.id,
)
total = len(organisations)
infoln("Total: %s Organisations" % total)
infoln("")
skipped = sent = failures = 0
invite_org = InviteUserOrg.invite_account
for organisation in organisations:
info("%s..." % organisation.name)
# Get all accounts that are linked to this org
organisation_id = organisation.id
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = db(query).select(utable.id,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
if rows:
# There are already accounts linked to this organisation
invited, registered = [], []
for row in rows:
username = row.auth_user.email
if row.pr_person_user.pe_id:
registered.append(username)
else:
invited.append(username)
if registered:
infoln("already registered (%s)." % ", ".join(registered))
else:
infoln("already invited (%s)." % ", ".join(invited))
skipped += 1
continue
# Find email address
query = (ctable.pe_id == organisation.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
info("(%s)..." % email)
else:
infoln("no email address.")
skipped += 1
continue
error = invite_org(organisation, email, account=None)
if not error:
sent += 1
infoln("invited.")
db.commit()
else:
failures += 1
infoln("invitation failed (%s)." % error)
if BATCH_LIMIT and sent >= BATCH_LIMIT:
infoln("Batch limit (%s) reached" % BATCH_LIMIT)
skipped = total - (sent + failures)
break
infoln("")
infoln("%s invitations sent" % sent)
infoln("%s invitations failed" % failures)
infoln("%s organisations skipped" % skipped)
log = None
except IOError:
infoln("...failed (could not create logfile)")
failed = True
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("PROCESS FAILED - Action rolled back.")
else:
db.commit()
infoln("PROCESS SUCCESSFUL.")
| # -*- coding: utf-8 -*-
#
# Helper Script for Mass-Invitation of Participant Organisations
#
# RLPPTM Template Version 1.0
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py
#
import os
import sys
from core import s3_format_datetime
from templates.RLPPTM.config import SCHOOLS
from templates.RLPPTM.helpers import InviteUserOrg
# Batch limit (set to False to disable)
BATCH_LIMIT = 250
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
log = None
def info(msg):
sys.stderr.write("%s" % msg)
if log:
log.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
if log:
log.write("%s\n" % msg)
# Load models for tables
otable = s3db.org_organisation
gtable = s3db.org_group
mtable = s3db.org_group_membership
utable = s3db.auth_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
ctable = s3db.pr_contact
timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S")
LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp)
# -----------------------------------------------------------------------------
# Invite organisations
#
if not failed:
try:
with open(LOGFILE, "w", encoding="utf-8") as logfile:
log = logfile
join = [mtable.on((mtable.organisation_id == otable.id) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.name == SCHOOLS) & \
(gtable.deleted == False)),
]
query = (otable.deleted == False)
organisations = db(query).select(otable.id,
otable.pe_id,
otable.name,
join = join,
orderby = otable.id,
)
total = len(organisations)
infoln("Total: %s Organisations" % total)
infoln("")
skipped = sent = failures = 0
invite_org = InviteUserOrg.invite_account
for organisation in organisations:
info("%s..." % organisation.name)
# Get all accounts that are linked to this org
organisation_id = organisation.id
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = db(query).select(utable.id,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
if rows:
# There are already accounts linked to this organisation
invited, registered = [], []
for row in rows:
username = row.auth_user.email
if row.pr_person_user.pe_id:
registered.append(username)
else:
invited.append(username)
if registered:
infoln("already registered (%s)." % ", ".join(registered))
else:
infoln("already invited (%s)." % ", ".join(invited))
skipped += 1
continue
# Find email address
query = (ctable.pe_id == organisation.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
info("(%s)..." % email)
else:
infoln("no email address.")
skipped += 1
continue
error = invite_org(organisation, email, account=None)
if not error:
sent += 1
infoln("invited.")
db.commit()
else:
failures += 1
infoln("invitation failed (%s)." % error)
if BATCH_LIMIT and sent >= BATCH_LIMIT:
infoln("Batch limit (%s) reached" % BATCH_LIMIT)
skipped = total - (sent + failures)
break
infoln("")
infoln("%s invitations sent" % sent)
infoln("%s invitations failed" % failures)
infoln("%s organisations skipped" % skipped)
log = None
except IOError:
infoln("...failed (could not create logfile)")
failed = True
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("PROCESS FAILED - Action rolled back.")
else:
db.commit()
infoln("PROCESS SUCCESSFUL.")
| en | 0.607011 | # -*- coding: utf-8 -*- # # Helper Script for Mass-Invitation of Participant Organisations # # RLPPTM Template Version 1.0 # # Execute in web2py folder after code upgrade like: # python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py # # Batch limit (set to False to disable) # Override auth (disables all permission checks) # Failed-flag # Info # Load models for tables # ----------------------------------------------------------------------------- # Invite organisations # # Get all accounts that are linked to this org # There are already accounts linked to this organisation # Find email address # ----------------------------------------------------------------------------- # Finishing up # | 1.844329 | 2 |
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py | harshp8l/deep-learning-lang-detection | 84 | 8150 | def save_form(form, actor=None):
"""Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require
this to be passed (is not handled by middleware), you can use this to replace form.save().
Requires you to use the audit.Model model as the actor is passed to the object's save method.
"""
obj = form.save(commit=False)
obj.save(actor=actor)
form.save_m2m()
return obj
#def intermediate_save(instance, actor=None):
# """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform
# intermediate saves:
#
# obj.value1 = 1
# intermediate_save(obj)
# obj.value2 = 2
# obj.save()
# <value 1 and value 2 are both stored in the database>
# """
# if hasattr(instance, '_audit_changes'):
# tmp = instance._audit_changes
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
# instance._audit_changes = tmp
# else:
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
| def save_form(form, actor=None):
"""Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require
this to be passed (is not handled by middleware), you can use this to replace form.save().
Requires you to use the audit.Model model as the actor is passed to the object's save method.
"""
obj = form.save(commit=False)
obj.save(actor=actor)
form.save_m2m()
return obj
#def intermediate_save(instance, actor=None):
# """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform
# intermediate saves:
#
# obj.value1 = 1
# intermediate_save(obj)
# obj.value2 = 2
# obj.save()
# <value 1 and value 2 are both stored in the database>
# """
# if hasattr(instance, '_audit_changes'):
# tmp = instance._audit_changes
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
# instance._audit_changes = tmp
# else:
# if actor:
# instance.save(actor=actor)
# else:
# instance.save()
| en | 0.773747 | Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require this to be passed (is not handled by middleware), you can use this to replace form.save(). Requires you to use the audit.Model model as the actor is passed to the object's save method. #def intermediate_save(instance, actor=None): # """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform # intermediate saves: # # obj.value1 = 1 # intermediate_save(obj) # obj.value2 = 2 # obj.save() # <value 1 and value 2 are both stored in the database> # """ # if hasattr(instance, '_audit_changes'): # tmp = instance._audit_changes # if actor: # instance.save(actor=actor) # else: # instance.save() # instance._audit_changes = tmp # else: # if actor: # instance.save(actor=actor) # else: # instance.save() | 3.42811 | 3 |
engine/test_sysctl.py | kingsd041/os-tests | 0 | 8151 | # coding = utf-8
# Create date: 2018-11-05
# Author :Hailong
def test_sysctl(ros_kvm_with_paramiko, cloud_config_url):
command = 'sudo cat /proc/sys/kernel/domainname'
feed_back = 'test'
client = ros_kvm_with_paramiko(cloud_config='{url}/test_sysctl.yml'.format(url=cloud_config_url))
stdin, stdout, stderr = client.exec_command(command, timeout=10)
output = stdout.read().decode('utf-8').replace('\n', '')
assert (feed_back == output)
command_b = 'sudo cat /proc/sys/dev/cdrom/debug'
feed_back_b = '1'
stdin, stdout, stderr = client.exec_command(command_b, timeout=10)
output_b = stdout.read().decode('utf-8').replace('\n', '')
client.close()
assert (feed_back_b == output_b)
| # coding = utf-8
# Create date: 2018-11-05
# Author :Hailong
def test_sysctl(ros_kvm_with_paramiko, cloud_config_url):
command = 'sudo cat /proc/sys/kernel/domainname'
feed_back = 'test'
client = ros_kvm_with_paramiko(cloud_config='{url}/test_sysctl.yml'.format(url=cloud_config_url))
stdin, stdout, stderr = client.exec_command(command, timeout=10)
output = stdout.read().decode('utf-8').replace('\n', '')
assert (feed_back == output)
command_b = 'sudo cat /proc/sys/dev/cdrom/debug'
feed_back_b = '1'
stdin, stdout, stderr = client.exec_command(command_b, timeout=10)
output_b = stdout.read().decode('utf-8').replace('\n', '')
client.close()
assert (feed_back_b == output_b)
| en | 0.832735 | # coding = utf-8 # Create date: 2018-11-05 # Author :Hailong | 1.984443 | 2 |
paprika_sync/core/management/commands/import_recipes_from_file.py | grschafer/paprika-sync | 0 | 8152 | import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from paprika_sync.core.models import PaprikaAccount
from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer
from paprika_sync.core.utils import log_start_end
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import all recipes from file to specified PaprikaAccount'
def add_arguments(self, parser):
parser.add_argument(
'file',
help='Path to json file containing list of all recipes',
)
parser.add_argument(
'--categories-file',
help='Path to json file containing list of all categories',
)
parser.add_argument(
'paprika_account_id',
type=int,
help='ID of PaprikaAccount to import recipes to',
)
parser.add_argument(
'-r', '--remove',
action='store_true',
help="Removes all of account's existing recipes before importing",
)
@log_start_end(logger)
def handle(self, *args, **options):
recipes_file = options['file']
categories_file = options['categories_file']
pa_id = options['paprika_account_id']
wipe_account = options['remove']
logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account)
pa = PaprikaAccount.objects.get(id=pa_id)
with open(recipes_file, 'rt') as fin:
recipes = json.load(fin)
logger.info('Found %s recipes to import to %s', len(recipes), pa)
categories = []
if categories_file:
with open(categories_file, 'rt') as fin:
categories = json.load(fin)
logger.info('Found %s categories to import to %s', len(categories), pa)
with transaction.atomic():
if wipe_account:
pa.recipes.all().delete()
pa.categories.all().delete()
for category in categories:
category['paprika_account'] = pa.id
cs = CategorySerializer(data=category)
if cs.is_valid():
cs.save()
else:
logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors)
for recipe in recipes:
# Remove categories if we're not bothering to import them
if not categories:
recipe['categories'] = []
recipe['paprika_account'] = pa.id
rs = RecipeSerializer(data=recipe)
if rs.is_valid():
rs.save()
else:
logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors)
# recipe_field_names = set([f.name for f in Recipe._meta.fields])
# Recipe.objects.create(
# paprika_account=pa,
# **{k: v for k, v in recipe.items() if k in recipe_field_names},
# )
logger.info('Finished recipe import successfully')
# transaction.set_rollback(True)
| import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from paprika_sync.core.models import PaprikaAccount
from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer
from paprika_sync.core.utils import log_start_end
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import all recipes from file to specified PaprikaAccount'
def add_arguments(self, parser):
parser.add_argument(
'file',
help='Path to json file containing list of all recipes',
)
parser.add_argument(
'--categories-file',
help='Path to json file containing list of all categories',
)
parser.add_argument(
'paprika_account_id',
type=int,
help='ID of PaprikaAccount to import recipes to',
)
parser.add_argument(
'-r', '--remove',
action='store_true',
help="Removes all of account's existing recipes before importing",
)
@log_start_end(logger)
def handle(self, *args, **options):
recipes_file = options['file']
categories_file = options['categories_file']
pa_id = options['paprika_account_id']
wipe_account = options['remove']
logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account)
pa = PaprikaAccount.objects.get(id=pa_id)
with open(recipes_file, 'rt') as fin:
recipes = json.load(fin)
logger.info('Found %s recipes to import to %s', len(recipes), pa)
categories = []
if categories_file:
with open(categories_file, 'rt') as fin:
categories = json.load(fin)
logger.info('Found %s categories to import to %s', len(categories), pa)
with transaction.atomic():
if wipe_account:
pa.recipes.all().delete()
pa.categories.all().delete()
for category in categories:
category['paprika_account'] = pa.id
cs = CategorySerializer(data=category)
if cs.is_valid():
cs.save()
else:
logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors)
for recipe in recipes:
# Remove categories if we're not bothering to import them
if not categories:
recipe['categories'] = []
recipe['paprika_account'] = pa.id
rs = RecipeSerializer(data=recipe)
if rs.is_valid():
rs.save()
else:
logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors)
# recipe_field_names = set([f.name for f in Recipe._meta.fields])
# Recipe.objects.create(
# paprika_account=pa,
# **{k: v for k, v in recipe.items() if k in recipe_field_names},
# )
logger.info('Finished recipe import successfully')
# transaction.set_rollback(True)
| en | 0.497824 | # Remove categories if we're not bothering to import them # recipe_field_names = set([f.name for f in Recipe._meta.fields]) # Recipe.objects.create( # paprika_account=pa, # **{k: v for k, v in recipe.items() if k in recipe_field_names}, # ) # transaction.set_rollback(True) | 2.093487 | 2 |
scripts/update_asp_l1.py | sot/mica | 0 | 8153 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mica.archive.asp_l1
mica.archive.asp_l1.main()
| #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mica.archive.asp_l1
mica.archive.asp_l1.main()
| en | 0.567712 | #!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst | 0.868234 | 1 |
pair.py | hhgarnes/python-validity | 0 | 8154 | <reponame>hhgarnes/python-validity<gh_stars>0
from time import sleep
from proto9x.usb import usb
from proto9x.tls import tls
from proto9x.flash import read_flash
from proto9x.init_flash import init_flash
from proto9x.upload_fwext import upload_fwext
from proto9x.calibrate import calibrate
from proto9x.init_db import init_db
#usb.trace_enabled=True
#tls.trace_enabled=True
def restart():
print('Sleeping...')
sleep(3)
tls.reset()
usb.open()
usb.send_init()
tls.parseTlsFlash(read_flash(1, 0, 0x1000))
tls.open()
usb.open()
print('Initializing flash...')
init_flash()
restart()
print('Uploading firmware...')
upload_fwext()
restart()
print('Calibrating...')
calibrate()
print('Init database...')
init_db()
print('That\'s it, pairing\'s finished')
| from time import sleep
from proto9x.usb import usb
from proto9x.tls import tls
from proto9x.flash import read_flash
from proto9x.init_flash import init_flash
from proto9x.upload_fwext import upload_fwext
from proto9x.calibrate import calibrate
from proto9x.init_db import init_db
#usb.trace_enabled=True
#tls.trace_enabled=True
def restart():
print('Sleeping...')
sleep(3)
tls.reset()
usb.open()
usb.send_init()
tls.parseTlsFlash(read_flash(1, 0, 0x1000))
tls.open()
usb.open()
print('Initializing flash...')
init_flash()
restart()
print('Uploading firmware...')
upload_fwext()
restart()
print('Calibrating...')
calibrate()
print('Init database...')
init_db()
print('That\'s it, pairing\'s finished') | zh | 0.311698 | #usb.trace_enabled=True #tls.trace_enabled=True | 2.159036 | 2 |
output/models/ms_data/element/elem_q017_xsd/elem_q017.py | tefra/xsdata-w3c-tests | 1 | 8155 | from dataclasses import dataclass, field
@dataclass
class FooTest:
class Meta:
name = "fooTest"
value: str = field(
init=False,
default="Hello"
)
@dataclass
class Root:
class Meta:
name = "root"
foo_test: str = field(
init=False,
default="Hello",
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| from dataclasses import dataclass, field
@dataclass
class FooTest:
class Meta:
name = "fooTest"
value: str = field(
init=False,
default="Hello"
)
@dataclass
class Root:
class Meta:
name = "root"
foo_test: str = field(
init=False,
default="Hello",
metadata={
"name": "fooTest",
"type": "Element",
"required": True,
}
)
| none | 1 | 2.930167 | 3 |
|
contrib_src/predict.py | modelhub-ai/mic-dkfz-brats | 1 | 8156 | import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return np.vstack(all_preds)
def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None,
mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge",
pad_kwargs=None):
with torch.no_grad():
x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by)
#x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size)
new_shp = x.shape
data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32)
data[0] = x
if BATCH_SIZE is not None:
data = np.vstack([data] * BATCH_SIZE)
stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring)
slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:]
stacked = stacked[slicer]
uncertainty = stacked.var(0)
bayesian_predictions = stacked
softmax_pred = stacked.mean(0)
if regions_class_order is None:
predicted_segmentation = softmax_pred.argmax(0)
else:
predicted_segmentation_shp = softmax_pred[0].shape
predicted_segmentation = np.zeros(predicted_segmentation_shp)
for i, c in enumerate(regions_class_order):
predicted_segmentation[softmax_pred[i] > 0.5] = c
return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(ax)
return input
class Generic_UNet_Cotraining(SegmentationNetwork):
def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False):
"""
Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah.
What a mess.
You know what's the best part? No documentation. What a great piece of code.
:param input_channels:
:param base_num_features:
:param num_classes:
:param num_conv_per_stage:
:param num_downscale:
:param feat_map_mul_on_downscale:
:param conv_op:
:param conv_kwargs:
:param norm_op:
:param norm_op_kwargs:
:param dropout_op:
:param dropout_op_kwargs:
:param nonlin:
:param nonlin_kwargs:
:param deep_supervision:
:param dropout_in_localization:
:param final_nonlin:
:param weightInitializer:
:param pool_op_kernel_sizes:
:param upscale_logits:
:param convolutional_pooling:
:param convolutional_upsampling:
"""
super(Generic_UNet_Cotraining, self).__init__()
assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int"
self.num_classes = num_classes
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p':0.5, 'inplace':True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
if pool_op_kernel_sizes is None:
if conv_op == nn.Conv2d:
pool_op_kernel_sizes = [(2, 2)] * num_downscale
elif conv_op == nn.Conv3d:
pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.final_nonlin = final_nonlin
assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage"
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.do_ds = deep_supervision
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_downscale):
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d-1]
else:
first_stride = None
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
if self.conv_op == nn.Conv3d:
output_features = min(output_features, max_num_filters_3d)
else:
output_features = min(output_features, max_num_filters_2d)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)))
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_downscale):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_downscale-1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes]))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_downscale - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
self.apply(self.weightInitializer)
self.test_return_output = 0
self.inference = False
def train(self, mode=True):
super(Generic_UNet_Cotraining, self).train(mode)
def eval(self):
super(Generic_UNet_Cotraining, self).eval()
def infer(self, infer):
self.train(False)
self.inference = infer
def forward(self, x):
#input_var = x
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
if not self.inference:
seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))])
else:
seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x)))
if self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object):
def __init__(self):
self.preprocessed_data_directory = None
# set through arguments from init
self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce"
self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce"
self.output_folder = 'model/params'
self.dataset_directory = None
self.device = 0
self.fold = 0
self.preprocessed_data_directory = None
self.gt_niftis_folder = None
# set in self.initialize()
self.network = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.plans = None
self.was_initialized = False
self.also_val_in_tr_mode = False
self.dataset = None
self.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True):
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold)
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.process_plans_file()
if training:
raise NotImplementedError
self.initialize_network_optimizer_and_scheduler()
self.network.inference_apply_nonlin = self.inference_apply_nonlin
self.was_initialized = True
def initialize_network_optimizer_and_scheduler(self):
net_numpool = max(self.net_pool_per_axis)
net_pool_kernel_sizes = []
for s in range(1, net_numpool+1):
this_pool_kernel_sizes = [1, 1, 1]
if self.net_pool_per_axis[0] >= s:
this_pool_kernel_sizes[0] = 2
if self.net_pool_per_axis[1] >= s:
this_pool_kernel_sizes[1] = 2
if len(self.patch_size)>2:
if self.net_pool_per_axis[2] >= s:
this_pool_kernel_sizes[2] = 2
else:
this_pool_kernel_sizes = this_pool_kernel_sizes[:-1]
net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes))
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False}
dropout_op_kwargs = {'p':0, 'inplace':True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2,
conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2),
net_pool_kernel_sizes, True, False, False)
self.optimizer = None
self.lr_scheduler = None
self.network.set_device(self.device)
def process_plans_file(self):
self.batch_size = 2
self.net_pool_per_axis = [4, 4, 4]
self.patch_size = (128, 128, 128)
self.intensity_properties = None
self.normalization_schemes = ["nonCT"] * 4
self.base_num_features = 30
self.num_input_channels = 4
self.do_dummy_2D_aug = False
self.use_mask_for_norm = True
self.only_keep_largest_connected_component = {(0, ): False}
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,))
self.regions_class_order = (1, 3, 2)
self.batch_size = 2
self.base_num_features = 30
self.num_classes = (3, 3)
def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian):
return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2]
def load_best_checkpoint(self, train=True):
self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train)
def load_checkpoint(self, fname, train=True):
print("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize()
saved_model = torch.load(fname)
new_state_dict = OrderedDict()
for k, value in saved_model['state_dict'].items():
key = k
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
if len(saved_model['plot_stuff']) < 9:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff']
self.all_val_eval_metrics_dc_per_sample_std = []
else:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff']
self.network.set_device(self.device)
def resize_softmax_output(softmax_output, new_shape, order=3):
'''
Resizes softmax output. Resizes each channel in c separately and fuses results back together
:param softmax_output: c x x x y x z
:param new_shape: x x y x z
:param order:
:return:
'''
tpe = softmax_output.dtype
new_shp = [softmax_output.shape[0]] + list(new_shape)
result = np.zeros(new_shp, dtype=softmax_output.dtype)
for i in range(softmax_output.shape[0]):
result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True)
return result.astype(tpe)
def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None):
'''
segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out
of the original image
:param segmentation:
:param dct:
:param out_fname:
:return:
'''
old_size = dct.get('size_before_cropping')
bbox = dct.get('brain_bbox')
if bbox is not None:
seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size))
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c]))
seg_old_size[:, bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = softmax_output
else:
seg_old_size = softmax_output
segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order)
if region_class_order is None:
segmentation = segmentation.argmax(0)
else:
seg_old_spacing_final = np.zeros(segmentation.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[segmentation[i] > 0.5] = c
segmentation = seg_old_spacing_final
return segmentation.astype(np.uint8)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
def convert_labels_back(seg):
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
new_seg[seg == 1] = 2
new_seg[seg == 2] = 4
new_seg[seg == 3] = 1
return new_seg
def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0):
"""
brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but
gets the job done
:param itk_image:
:param is_seg:
:param spacing_target:
:param brain_mask:
:return:
"""
spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]]
image = sitk.GetArrayFromImage(itk_image).astype(float)
if not is_seg:
if brain_mask is None:
brain_mask = (image!=image[0,0,0]).astype(float)
if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]):
image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32)
brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int)
image[brain_mask==0] = 0
#subtract mean, divide by std. use heuristic masking
image[brain_mask!=0] -= image[brain_mask!=0].mean()
image[brain_mask!=0] /= image[brain_mask!=0].std()
else:
new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))),
int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))),
int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2]))))
image = resize_segmentation(image, new_shape, 1, cval)
return image
def create_brain_masks(data):
"""
data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic
to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction)
:param data:
:return:
"""
shp = list(data.shape)
brain_mask = np.zeros(shp, dtype=np.float32)
for b in range(data.shape[0]):
for c in range(data.shape[1]):
this_mask = data[b, c] != 0
this_mask = binary_fill_holes(this_mask)
brain_mask[b, c] = this_mask
return brain_mask
def extract_brain_region(image, segmentation, outside_value=0):
brain_voxels = np.where(segmentation != outside_value)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
# resize images
resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx))
return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]]
def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None):
images = {}
# t1
images["T1"] = sitk.ReadImage(t1_file)
# t1km
images["T1KM"] = sitk.ReadImage(t1km_file)
properties_dict = {
"spacing": images["T1"].GetSpacing(),
"direction": images["T1"].GetDirection(),
"size": images["T1"].GetSize(),
"origin": images["T1"].GetOrigin()
}
# t2
images["T2"] = sitk.ReadImage(t2_file)
# flair
images["FLAIR"] = sitk.ReadImage(flair_file)
if seg_file is not None:
images['seg'] = sitk.ReadImage(seg_file)
if bet_file is not None:
images['bet_mask'] = sitk.ReadImage(bet_file)
else:
t1_npy = sitk.GetArrayFromImage(images["T1"])
mask = create_brain_masks(t1_npy[None])[0].astype(int)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(images["T1"])
images['bet_mask'] = mask
try:
images["t1km_sub"] = images["T1KM"] - images["T1"]
except RuntimeError:
tmp1 = sitk.GetArrayFromImage(images["T1KM"])
tmp2 = sitk.GetArrayFromImage(images["T1"])
res = tmp1 - tmp2
res_itk = sitk.GetImageFromArray(res)
res_itk.CopyInformation(images["T1"])
images["t1km_sub"] = res_itk
for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]:
images[k] = sitk.Mask(images[k], images['bet_mask'], 0)
bet_numpy = sitk.GetArrayFromImage(images['bet_mask'])
for k in images.keys():
is_seg = (k == "seg") | (k == "bet_mask")
if is_seg:
cval = -1
else:
cval = 0
images[k] = preprocess_image(images[k], is_seg=is_seg,
spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval)
properties_dict['size_before_cropping'] = images["T1"].shape
mask = np.copy(images['bet_mask'])
for k in images.keys():
images[k], bbox = extract_brain_region(images[k], mask, False)
properties_dict['brain_bbox'] = bbox
if (label_conversion_fn is not None) and ("seg" in images.keys()):
images["seg"] = label_conversion_fn(images["seg"])
use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg']
if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()):
use_these.append("bet_mask")
else:
images["seg"][images["bet_mask"] <= 0] = -1
imgs = []
for seq in use_these:
if seq not in images.keys():
imgs.append(np.zeros(images["T1"].shape)[None])
else:
imgs.append(images[seq][None])
all_data = np.vstack(imgs)
return all_data, properties_dict
def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc):
"""
Segments the passed files
"""
trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE()
trainer.initialize(False)
all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None,
True, None)
all_softmax = []
for fold in range(5):
trainer.output_folder = join(netLoc, "%d" % fold)
trainer.load_best_checkpoint(False)
trainer.network.infer(True)
trainer.network.test_return_output = 0
softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False,
None, None, trainer.patch_size, True)
all_softmax.append(softmax[None])
softmax_consolidated = np.vstack(all_softmax).mean(0)
output = save_segmentation_nifti_softmax(softmax_consolidated, dct,
"tumor_isen2018_class.nii.gz", 1,
trainer.regions_class_order)
return output
| import json
import os
from collections import OrderedDict
from copy import deepcopy
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output
from skimage.transform import resize
from torch.optim import lr_scheduler
from torch import nn
import numpy as np
import torch
from scipy.ndimage import binary_fill_holes
'''
This code is not intended to be looked at by anyone. It is messy. It is undocumented.
And the entire training pipeline is missing.
'''
max_num_filters_3d = 320
max_num_filters_2d = 480
join = os.path.join
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def resize_image(image, old_spacing, new_spacing, order=3, cval=0):
new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))),
int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))),
int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
if any([i != j for i, j in zip(image.shape, new_shape)]):
res = resize(image, new_shape, order=order, mode='edge', cval=cval)
else:
res = image
return res
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
if not return_slicer:
return res
else:
pad_list = np.array(pad_list)
pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]
slicer = list(slice(*i) for i in pad_list)
return res, slicer
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
self.input_shape_must_be_divisible_by = None
self.conv_op = None
super(NeuralNetwork, self).__init__()
self.inference_apply_nonlin = lambda x:x
def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4),
tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False,
pad_border_mode="edge", pad_kwargs=None):
"""
:param x: (c, x, y , z)
:param do_mirroring:
:param num_repeats:
:param use_train_mode:
:param batch_size:
:param mirror_axes:
:param tiled:
:param tile_in_z:
:param step:
:param patch_size:
:param regions_class_order:
:param use_gaussian:
:return:
"""
current_mode = self.training
if use_train_mode is not None and use_train_mode:
self.train()
elif use_train_mode is not None and not use_train_mode:
self.eval()
else:
pass
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if self.conv_op == nn.Conv3d:
if tiled:
res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring,
mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
elif self.conv_op == nn.Conv2d:
if tiled:
res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes,
step, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs=pad_kwargs)
else:
res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size,
mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
if use_train_mode is not None:
self.train(current_mode)
return res
def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True):
with torch.no_grad():
a = torch.zeros(x.shape).float()
if self.get_device() == "cpu":
a = a.cpu()
else:
a = a.cuda(self.get_device())
if do_mirroring:
mirror_idx = 8
else:
mirror_idx = 1
all_preds = []
for i in range(num_repeats):
for m in range(mirror_idx):
data_for_net = np.array(x)
do_stuff = False
if m == 0:
do_stuff = True
pass
if m == 1 and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
do_stuff = True
data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1]
if do_stuff:
_ = a.data.copy_(torch.from_numpy(np.copy(data_for_net)))
p = self.inference_apply_nonlin(self(a))
p = p.data.cpu().numpy()
if m == 0:
pass
if m == 1 and (4 in mirror_axes):
p = p[:, :, :, :, ::-1]
if m == 2 and (3 in mirror_axes):
p = p[:, :, :, ::-1, :]
if m == 3 and (4 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, :, ::-1, ::-1]
if m == 4 and (2 in mirror_axes):
p = p[:, :, ::-1, :, :]
if m == 5 and (2 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, :, ::-1]
if m == 6 and (2 in mirror_axes) and (3 in mirror_axes):
p = p[:, :, ::-1, ::-1, :]
if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes):
p = p[:, :, ::-1, ::-1, ::-1]
all_preds.append(p)
return np.vstack(all_preds)
def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None,
mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge",
pad_kwargs=None):
with torch.no_grad():
x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by)
#x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size)
new_shp = x.shape
data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32)
data[0] = x
if BATCH_SIZE is not None:
data = np.vstack([data] * BATCH_SIZE)
stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring)
slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:]
stacked = stacked[slicer]
uncertainty = stacked.var(0)
bayesian_predictions = stacked
softmax_pred = stacked.mean(0)
if regions_class_order is None:
predicted_segmentation = softmax_pred.argmax(0)
else:
predicted_segmentation_shp = softmax_pred[0].shape
predicted_segmentation = np.zeros(predicted_segmentation_shp)
for i, c in enumerate(regions_class_order):
predicted_segmentation[softmax_pred[i] > 0.5] = c
return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(ax)
return input
class Generic_UNet_Cotraining(SegmentationNetwork):
def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False):
"""
Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah.
What a mess.
You know what's the best part? No documentation. What a great piece of code.
:param input_channels:
:param base_num_features:
:param num_classes:
:param num_conv_per_stage:
:param num_downscale:
:param feat_map_mul_on_downscale:
:param conv_op:
:param conv_kwargs:
:param norm_op:
:param norm_op_kwargs:
:param dropout_op:
:param dropout_op_kwargs:
:param nonlin:
:param nonlin_kwargs:
:param deep_supervision:
:param dropout_in_localization:
:param final_nonlin:
:param weightInitializer:
:param pool_op_kernel_sizes:
:param upscale_logits:
:param convolutional_pooling:
:param convolutional_upsampling:
"""
super(Generic_UNet_Cotraining, self).__init__()
assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int"
self.num_classes = num_classes
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p':0.5, 'inplace':True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
if pool_op_kernel_sizes is None:
if conv_op == nn.Conv2d:
pool_op_kernel_sizes = [(2, 2)] * num_downscale
elif conv_op == nn.Conv3d:
pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.final_nonlin = final_nonlin
assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage"
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.do_ds = deep_supervision
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_downscale):
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d-1]
else:
first_stride = None
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
if self.conv_op == nn.Conv3d:
output_features = min(output_features, max_num_filters_3d)
else:
output_features = min(output_features, max_num_filters_2d)
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)))
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
for u in range(num_downscale):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_downscale-1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False))
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes]))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_downscale - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
self.apply(self.weightInitializer)
self.test_return_output = 0
self.inference = False
def train(self, mode=True):
super(Generic_UNet_Cotraining, self).train(mode)
def eval(self):
super(Generic_UNet_Cotraining, self).eval()
def infer(self, infer):
self.train(False)
self.inference = infer
def forward(self, x):
#input_var = x
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
if not self.inference:
seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))])
else:
seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x)))
if self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object):
def __init__(self):
self.preprocessed_data_directory = None
# set through arguments from init
self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce"
self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce"
self.output_folder = 'model/params'
self.dataset_directory = None
self.device = 0
self.fold = 0
self.preprocessed_data_directory = None
self.gt_niftis_folder = None
# set in self.initialize()
self.network = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.plans = None
self.was_initialized = False
self.also_val_in_tr_mode = False
self.dataset = None
self.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True):
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold)
if not os.path.isdir(self.output_folder):
os.mkdir(self.output_folder)
self.process_plans_file()
if training:
raise NotImplementedError
self.initialize_network_optimizer_and_scheduler()
self.network.inference_apply_nonlin = self.inference_apply_nonlin
self.was_initialized = True
def initialize_network_optimizer_and_scheduler(self):
net_numpool = max(self.net_pool_per_axis)
net_pool_kernel_sizes = []
for s in range(1, net_numpool+1):
this_pool_kernel_sizes = [1, 1, 1]
if self.net_pool_per_axis[0] >= s:
this_pool_kernel_sizes[0] = 2
if self.net_pool_per_axis[1] >= s:
this_pool_kernel_sizes[1] = 2
if len(self.patch_size)>2:
if self.net_pool_per_axis[2] >= s:
this_pool_kernel_sizes[2] = 2
else:
this_pool_kernel_sizes = this_pool_kernel_sizes[:-1]
net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes))
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True}
norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False}
dropout_op_kwargs = {'p':0, 'inplace':True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True}
self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2,
conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2),
net_pool_kernel_sizes, True, False, False)
self.optimizer = None
self.lr_scheduler = None
self.network.set_device(self.device)
def process_plans_file(self):
self.batch_size = 2
self.net_pool_per_axis = [4, 4, 4]
self.patch_size = (128, 128, 128)
self.intensity_properties = None
self.normalization_schemes = ["nonCT"] * 4
self.base_num_features = 30
self.num_input_channels = 4
self.do_dummy_2D_aug = False
self.use_mask_for_norm = True
self.only_keep_largest_connected_component = {(0, ): False}
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,))
self.regions_class_order = (1, 3, 2)
self.batch_size = 2
self.base_num_features = 30
self.num_classes = (3, 3)
def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian):
return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2]
def load_best_checkpoint(self, train=True):
self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train)
def load_checkpoint(self, fname, train=True):
print("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize()
saved_model = torch.load(fname)
new_state_dict = OrderedDict()
for k, value in saved_model['state_dict'].items():
key = k
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
if len(saved_model['plot_stuff']) < 9:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff']
self.all_val_eval_metrics_dc_per_sample_std = []
else:
self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \
self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \
self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff']
self.network.set_device(self.device)
def resize_softmax_output(softmax_output, new_shape, order=3):
'''
Resizes softmax output. Resizes each channel in c separately and fuses results back together
:param softmax_output: c x x x y x z
:param new_shape: x x y x z
:param order:
:return:
'''
tpe = softmax_output.dtype
new_shp = [softmax_output.shape[0]] + list(new_shape)
result = np.zeros(new_shp, dtype=softmax_output.dtype)
for i in range(softmax_output.shape[0]):
result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True)
return result.astype(tpe)
def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None):
'''
segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out
of the original image
:param segmentation:
:param dct:
:param out_fname:
:return:
'''
old_size = dct.get('size_before_cropping')
bbox = dct.get('brain_bbox')
if bbox is not None:
seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size))
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c]))
seg_old_size[:, bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = softmax_output
else:
seg_old_size = softmax_output
segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order)
if region_class_order is None:
segmentation = segmentation.argmax(0)
else:
seg_old_spacing_final = np.zeros(segmentation.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[segmentation[i] > 0.5] = c
segmentation = seg_old_spacing_final
return segmentation.astype(np.uint8)
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
def convert_labels_back(seg):
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
new_seg[seg == 1] = 2
new_seg[seg == 2] = 4
new_seg[seg == 3] = 1
return new_seg
def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0):
"""
brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but
gets the job done
:param itk_image:
:param is_seg:
:param spacing_target:
:param brain_mask:
:return:
"""
spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]]
image = sitk.GetArrayFromImage(itk_image).astype(float)
if not is_seg:
if brain_mask is None:
brain_mask = (image!=image[0,0,0]).astype(float)
if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]):
image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32)
brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int)
image[brain_mask==0] = 0
#subtract mean, divide by std. use heuristic masking
image[brain_mask!=0] -= image[brain_mask!=0].mean()
image[brain_mask!=0] /= image[brain_mask!=0].std()
else:
new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))),
int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))),
int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2]))))
image = resize_segmentation(image, new_shape, 1, cval)
return image
def create_brain_masks(data):
"""
data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic
to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction)
:param data:
:return:
"""
shp = list(data.shape)
brain_mask = np.zeros(shp, dtype=np.float32)
for b in range(data.shape[0]):
for c in range(data.shape[1]):
this_mask = data[b, c] != 0
this_mask = binary_fill_holes(this_mask)
brain_mask[b, c] = this_mask
return brain_mask
def extract_brain_region(image, segmentation, outside_value=0):
brain_voxels = np.where(segmentation != outside_value)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
# resize images
resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx))
return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]]
def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None):
images = {}
# t1
images["T1"] = sitk.ReadImage(t1_file)
# t1km
images["T1KM"] = sitk.ReadImage(t1km_file)
properties_dict = {
"spacing": images["T1"].GetSpacing(),
"direction": images["T1"].GetDirection(),
"size": images["T1"].GetSize(),
"origin": images["T1"].GetOrigin()
}
# t2
images["T2"] = sitk.ReadImage(t2_file)
# flair
images["FLAIR"] = sitk.ReadImage(flair_file)
if seg_file is not None:
images['seg'] = sitk.ReadImage(seg_file)
if bet_file is not None:
images['bet_mask'] = sitk.ReadImage(bet_file)
else:
t1_npy = sitk.GetArrayFromImage(images["T1"])
mask = create_brain_masks(t1_npy[None])[0].astype(int)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(images["T1"])
images['bet_mask'] = mask
try:
images["t1km_sub"] = images["T1KM"] - images["T1"]
except RuntimeError:
tmp1 = sitk.GetArrayFromImage(images["T1KM"])
tmp2 = sitk.GetArrayFromImage(images["T1"])
res = tmp1 - tmp2
res_itk = sitk.GetImageFromArray(res)
res_itk.CopyInformation(images["T1"])
images["t1km_sub"] = res_itk
for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]:
images[k] = sitk.Mask(images[k], images['bet_mask'], 0)
bet_numpy = sitk.GetArrayFromImage(images['bet_mask'])
for k in images.keys():
is_seg = (k == "seg") | (k == "bet_mask")
if is_seg:
cval = -1
else:
cval = 0
images[k] = preprocess_image(images[k], is_seg=is_seg,
spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval)
properties_dict['size_before_cropping'] = images["T1"].shape
mask = np.copy(images['bet_mask'])
for k in images.keys():
images[k], bbox = extract_brain_region(images[k], mask, False)
properties_dict['brain_bbox'] = bbox
if (label_conversion_fn is not None) and ("seg" in images.keys()):
images["seg"] = label_conversion_fn(images["seg"])
use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg']
if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()):
use_these.append("bet_mask")
else:
images["seg"][images["bet_mask"] <= 0] = -1
imgs = []
for seq in use_these:
if seq not in images.keys():
imgs.append(np.zeros(images["T1"].shape)[None])
else:
imgs.append(images[seq][None])
all_data = np.vstack(imgs)
return all_data, properties_dict
def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc):
"""
Segments the passed files
"""
trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE()
trainer.initialize(False)
all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None,
True, None)
all_softmax = []
for fold in range(5):
trainer.output_folder = join(netLoc, "%d" % fold)
trainer.load_best_checkpoint(False)
trainer.network.infer(True)
trainer.network.test_return_output = 0
softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False,
None, None, trainer.patch_size, True)
all_softmax.append(softmax[None])
softmax_consolidated = np.vstack(all_softmax).mean(0)
output = save_segmentation_nifti_softmax(softmax_consolidated, dct,
"tumor_isen2018_class.nii.gz", 1,
trainer.regions_class_order)
return output
| en | 0.739555 | # resize_softmax_output This code is not intended to be looked at by anyone. It is messy. It is undocumented. And the entire training pipeline is missing. :param x: (c, x, y , z) :param do_mirroring: :param num_repeats: :param use_train_mode: :param batch_size: :param mirror_axes: :param tiled: :param tile_in_z: :param step: :param patch_size: :param regions_class_order: :param use_gaussian: :return: #x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size) Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah. What a mess. You know what's the best part? No documentation. What a great piece of code. :param input_channels: :param base_num_features: :param num_classes: :param num_conv_per_stage: :param num_downscale: :param feat_map_mul_on_downscale: :param conv_op: :param conv_kwargs: :param norm_op: :param norm_op_kwargs: :param dropout_op: :param dropout_op_kwargs: :param nonlin: :param nonlin_kwargs: :param deep_supervision: :param dropout_in_localization: :param final_nonlin: :param weightInitializer: :param pool_op_kernel_sizes: :param upscale_logits: :param convolutional_pooling: :param convolutional_upsampling: # self.conv_blocks_context[-1] is bottleneck, so start with -2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again # register all modules properly # lambda x:x is not a Module so we need to distinguish here #input_var = x # set through arguments from init # set in self.initialize() # loaded automatically from plans_file Resizes softmax output. Resizes each channel in c separately and fuses results back together :param softmax_output: c x x x y x z :param new_shape: x x y x z :param order: :return: segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out of the original image :param segmentation: :param dct: :param out_fname: :return: brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but gets the job done :param itk_image: :param is_seg: :param spacing_target: :param brain_mask: :return: #subtract mean, divide by std. use heuristic masking data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction) :param data: :return: # resize images # t1 # t1km # t2 # flair Segments the passed files | 2.095942 | 2 |
plot/finderror.py | architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | 0 | 8157 | <reponame>architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
import os
basepath = '/home/archit/scratch/cartpoles/data/hyperparam/cartpole/offline_learning/esarsa-adam/'
dirs = os.listdir(basepath)
string = ''''''
for dir in dirs:
print(dir)
subbasepath = basepath + dir + '/'
subdirs = os.listdir(subbasepath)
for subdir in subdirs:
print(subdir)
subsubbasepath = subbasepath + subdir + '/'
subsubdirs = os.listdir(subsubbasepath)
string += subsubbasepath + '\n'
content = []
for i in range(0,len(subsubdirs)-1):
for j in range(i+1, len(subsubdirs)):
a = os.system('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt')
content.append([a, subsubdirs[i], subsubdirs[j]])
filteredcontent = [i for i in content if i[0] == 0]
for i in range(len(filteredcontent)):
string += ' and '.join(filteredcontent[i][1:])
if i != len(filteredcontent) - 1:
string += ', '
string += '\n\n'
f = open('offlinelearningerrors.txt','w')
f.write(string)
f.close()
| import os
basepath = '/home/archit/scratch/cartpoles/data/hyperparam/cartpole/offline_learning/esarsa-adam/'
dirs = os.listdir(basepath)
string = ''''''
for dir in dirs:
print(dir)
subbasepath = basepath + dir + '/'
subdirs = os.listdir(subbasepath)
for subdir in subdirs:
print(subdir)
subsubbasepath = subbasepath + subdir + '/'
subsubdirs = os.listdir(subsubbasepath)
string += subsubbasepath + '\n'
content = []
for i in range(0,len(subsubdirs)-1):
for j in range(i+1, len(subsubdirs)):
a = os.system('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt')
content.append([a, subsubdirs[i], subsubdirs[j]])
filteredcontent = [i for i in content if i[0] == 0]
for i in range(len(filteredcontent)):
string += ' and '.join(filteredcontent[i][1:])
if i != len(filteredcontent) - 1:
string += ', '
string += '\n\n'
f = open('offlinelearningerrors.txt','w')
f.write(string)
f.close() | none | 1 | 2.284702 | 2 |
|
src/pybacked/zip_handler.py | bluePlatinum/pyback | 0 | 8158 | import os
import shutil
import tempfile
import zipfile
def archive_write(archivepath, data, filename, compression, compressionlevel):
"""
Create a file named filename in the archive and write data to it
:param archivepath: The path to the zip-archive
:type archivepath: str
:param data: The data to be written to the file
:type data: str
:param filename: The filename for the newly created file
:type filename: str
:param compression: The desired compression for the zip-archive
:type compression: int
:param compressionlevel: The desired compression level for the zip-archive
:type compressionlevel: int
:return: void
"""
archive = zipfile.ZipFile(archivepath, mode='a',
compression=compression,
compresslevel=compressionlevel)
archive.writestr(filename, data)
archive.close()
def create_archive(archivepath, filedict, compression, compressionlevel):
"""
Write filedict to zip-archive data subdirectory. Will check wether archive
at archivepath exists before writing. If file exists will raise a
FileExistsError.
:param archivepath: the path to the file
:param filedict: dictionary containing the filepath, filename key-value
pairs
:param compression: desired compression methods (see zipfile documentation)
:param compressionlevel: compression level (see zipfile documentation)
:return: void
"""
if os.path.isfile(archivepath):
raise FileExistsError("Specified file already exists")
else:
archive = zipfile.ZipFile(archivepath, mode='x',
compression=compression,
compresslevel=compressionlevel)
for filepath, filename in filedict.items():
archive.write(filepath, arcname="data/" + filename)
archive.close()
def extract_archdata(archivepath, filename, destination):
"""
Extract a file from a archive and write it to the destination. If the
destination path already exists extract_archdata will not overwrite but
will throw a "FileExists" error.
:param archivepath: The path to the archive containing the file
:type archivepath: str
:param filename: The archive name of the desired file.
:type filename: str
:param destination: The path at which the extracted file is to be placed.
:type destination: str
:return: void
:rtype: None
"""
# check if destination path already exists
if os.path.exists(destination):
raise FileExistsError("The specified destination is already in use")
archive = zipfile.ZipFile(archivepath, mode='r')
with tempfile.TemporaryDirectory() as tmpdir:
archive.extract(filename, path=tmpdir)
# create directories for the destination
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination)
def read_bin(archivepath, filelist):
"""
Read a list of files from an archive and return the file data as a
dictionary of filename, data key-value pairs.
:param archivepath: the path to the archive
:param filelist: list of filenames to read
:return: dictionary with filename, data key-value pairs
:rtype: dict
"""
datadict = dict()
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
for filename in filelist:
try:
file = archive.open(filename)
datadict[filename] = file.read().decode()
file.close()
except KeyError:
datadict[filename] = None
archive.close()
return datadict
def read_diff_log(archivepath):
"""
Read the diff-log.csv from a given archive file.
:param archivepath: The path to the zip-archive
:type archivepath: str
:return: The diff-log.csv contents in ascii string form.
:rtype: str
"""
arch = zipfile.ZipFile(archivepath, mode='r')
diff_log_file = arch.open("diff-log.csv")
diff_log_bin = diff_log_file.read()
diff_log = diff_log_bin.decode()
diff_log_file.close()
arch.close()
return diff_log
def zip_extract(archivepath, filelist, extractpath):
"""
Extract a list of files to a specific location
:param archivepath: the path to the zip-archive
:param filelist: list of member filenames to extract
:param extractpath: path for the extracted files
:return: void
"""
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
archive.extractall(path=extractpath, members=filelist)
archive.close()
| import os
import shutil
import tempfile
import zipfile
def archive_write(archivepath, data, filename, compression, compressionlevel):
"""
Create a file named filename in the archive and write data to it
:param archivepath: The path to the zip-archive
:type archivepath: str
:param data: The data to be written to the file
:type data: str
:param filename: The filename for the newly created file
:type filename: str
:param compression: The desired compression for the zip-archive
:type compression: int
:param compressionlevel: The desired compression level for the zip-archive
:type compressionlevel: int
:return: void
"""
archive = zipfile.ZipFile(archivepath, mode='a',
compression=compression,
compresslevel=compressionlevel)
archive.writestr(filename, data)
archive.close()
def create_archive(archivepath, filedict, compression, compressionlevel):
"""
Write filedict to zip-archive data subdirectory. Will check wether archive
at archivepath exists before writing. If file exists will raise a
FileExistsError.
:param archivepath: the path to the file
:param filedict: dictionary containing the filepath, filename key-value
pairs
:param compression: desired compression methods (see zipfile documentation)
:param compressionlevel: compression level (see zipfile documentation)
:return: void
"""
if os.path.isfile(archivepath):
raise FileExistsError("Specified file already exists")
else:
archive = zipfile.ZipFile(archivepath, mode='x',
compression=compression,
compresslevel=compressionlevel)
for filepath, filename in filedict.items():
archive.write(filepath, arcname="data/" + filename)
archive.close()
def extract_archdata(archivepath, filename, destination):
"""
Extract a file from a archive and write it to the destination. If the
destination path already exists extract_archdata will not overwrite but
will throw a "FileExists" error.
:param archivepath: The path to the archive containing the file
:type archivepath: str
:param filename: The archive name of the desired file.
:type filename: str
:param destination: The path at which the extracted file is to be placed.
:type destination: str
:return: void
:rtype: None
"""
# check if destination path already exists
if os.path.exists(destination):
raise FileExistsError("The specified destination is already in use")
archive = zipfile.ZipFile(archivepath, mode='r')
with tempfile.TemporaryDirectory() as tmpdir:
archive.extract(filename, path=tmpdir)
# create directories for the destination
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination)
def read_bin(archivepath, filelist):
"""
Read a list of files from an archive and return the file data as a
dictionary of filename, data key-value pairs.
:param archivepath: the path to the archive
:param filelist: list of filenames to read
:return: dictionary with filename, data key-value pairs
:rtype: dict
"""
datadict = dict()
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
for filename in filelist:
try:
file = archive.open(filename)
datadict[filename] = file.read().decode()
file.close()
except KeyError:
datadict[filename] = None
archive.close()
return datadict
def read_diff_log(archivepath):
"""
Read the diff-log.csv from a given archive file.
:param archivepath: The path to the zip-archive
:type archivepath: str
:return: The diff-log.csv contents in ascii string form.
:rtype: str
"""
arch = zipfile.ZipFile(archivepath, mode='r')
diff_log_file = arch.open("diff-log.csv")
diff_log_bin = diff_log_file.read()
diff_log = diff_log_bin.decode()
diff_log_file.close()
arch.close()
return diff_log
def zip_extract(archivepath, filelist, extractpath):
"""
Extract a list of files to a specific location
:param archivepath: the path to the zip-archive
:param filelist: list of member filenames to extract
:param extractpath: path for the extracted files
:return: void
"""
if os.path.isfile(archivepath):
archive = zipfile.ZipFile(archivepath, mode='r')
else:
raise FileNotFoundError("Specified file does not exist")
archive.extractall(path=extractpath, members=filelist)
archive.close()
| en | 0.736778 | Create a file named filename in the archive and write data to it :param archivepath: The path to the zip-archive :type archivepath: str :param data: The data to be written to the file :type data: str :param filename: The filename for the newly created file :type filename: str :param compression: The desired compression for the zip-archive :type compression: int :param compressionlevel: The desired compression level for the zip-archive :type compressionlevel: int :return: void Write filedict to zip-archive data subdirectory. Will check wether archive at archivepath exists before writing. If file exists will raise a FileExistsError. :param archivepath: the path to the file :param filedict: dictionary containing the filepath, filename key-value pairs :param compression: desired compression methods (see zipfile documentation) :param compressionlevel: compression level (see zipfile documentation) :return: void Extract a file from a archive and write it to the destination. If the destination path already exists extract_archdata will not overwrite but will throw a "FileExists" error. :param archivepath: The path to the archive containing the file :type archivepath: str :param filename: The archive name of the desired file. :type filename: str :param destination: The path at which the extracted file is to be placed. :type destination: str :return: void :rtype: None # check if destination path already exists # create directories for the destination Read a list of files from an archive and return the file data as a dictionary of filename, data key-value pairs. :param archivepath: the path to the archive :param filelist: list of filenames to read :return: dictionary with filename, data key-value pairs :rtype: dict Read the diff-log.csv from a given archive file. :param archivepath: The path to the zip-archive :type archivepath: str :return: The diff-log.csv contents in ascii string form. :rtype: str Extract a list of files to a specific location :param archivepath: the path to the zip-archive :param filelist: list of member filenames to extract :param extractpath: path for the extracted files :return: void | 4.062939 | 4 |
src/query_planner/abstract_scan_plan.py | imvinod/Eva | 1 | 8159 | <filename>src/query_planner/abstract_scan_plan.py
"""Abstract class for all the scan planners
https://www.postgresql.org/docs/9.1/using-explain.html
https://www.postgresql.org/docs/9.5/runtime-config-query.html
"""
from src.query_planner.abstract_plan import AbstractPlan
from typing import List
class AbstractScan(AbstractPlan):
"""Abstract class for all the scan based planners
Arguments:
predicate : Expression
video : video on which the scan will be executed
columns_id :
"""
def __init__(self, predicate: Expression, video: Storage,
column_ids: List[int]):
super(AbstractScan, self).__init__()
self._predicate = predicate
self._column_ids = column_ids
self._video = video
@property
def video(self) -> Storage:
return self._video
@property
def predicate(self) -> Expression:
return self._predicate
@property
def column_ids(self) -> List:
return self._column_ids
| <filename>src/query_planner/abstract_scan_plan.py
"""Abstract class for all the scan planners
https://www.postgresql.org/docs/9.1/using-explain.html
https://www.postgresql.org/docs/9.5/runtime-config-query.html
"""
from src.query_planner.abstract_plan import AbstractPlan
from typing import List
class AbstractScan(AbstractPlan):
"""Abstract class for all the scan based planners
Arguments:
predicate : Expression
video : video on which the scan will be executed
columns_id :
"""
def __init__(self, predicate: Expression, video: Storage,
column_ids: List[int]):
super(AbstractScan, self).__init__()
self._predicate = predicate
self._column_ids = column_ids
self._video = video
@property
def video(self) -> Storage:
return self._video
@property
def predicate(self) -> Expression:
return self._predicate
@property
def column_ids(self) -> List:
return self._column_ids
| en | 0.635108 | Abstract class for all the scan planners https://www.postgresql.org/docs/9.1/using-explain.html https://www.postgresql.org/docs/9.5/runtime-config-query.html Abstract class for all the scan based planners Arguments: predicate : Expression video : video on which the scan will be executed columns_id : | 2.810113 | 3 |
tests/tools/test-tcp4-client.py | jimmy-huang/zephyr.js | 0 | 8160 | # !usr/bin/python
# coding:utf-8
import time
import socket
def main():
print "Socket client creat successful"
host = "192.0.2.1"
port = 9876
bufSize = 1024
addr = (host, port)
Timeout = 300
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.settimeout(Timeout)
mySocket.connect(addr)
while 1:
try :
Data = mySocket.recv(bufSize)
Data = Data.strip()
print "Got data: ", Data
time.sleep(2)
if Data == "close":
mySocket.close()
print "close socket"
break
else:
mySocket.sendall(Data)
print "Send data: ", Data
except KeyboardInterrupt :
print "exit client"
break
except :
print "time out"
continue
if __name__ == "__main__" :
main()
| # !usr/bin/python
# coding:utf-8
import time
import socket
def main():
print "Socket client creat successful"
host = "192.0.2.1"
port = 9876
bufSize = 1024
addr = (host, port)
Timeout = 300
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.settimeout(Timeout)
mySocket.connect(addr)
while 1:
try :
Data = mySocket.recv(bufSize)
Data = Data.strip()
print "Got data: ", Data
time.sleep(2)
if Data == "close":
mySocket.close()
print "close socket"
break
else:
mySocket.sendall(Data)
print "Send data: ", Data
except KeyboardInterrupt :
print "exit client"
break
except :
print "time out"
continue
if __name__ == "__main__" :
main()
| en | 0.714691 | # !usr/bin/python # coding:utf-8 | 3.154387 | 3 |
kinto/__main__.py | s-utsch/kinto | 0 | 8161 | import argparse
import sys
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='init/start/migrate')
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_init.add_argument('--config_file', required=False,
help='Config file may be passed as argument')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
if args['which'] == 'init':
if(args['config_file'] is None):
env = bootstrap('config/kinto.ini')
else:
config_file = format(args['config_file'])
env = bootstrap(config_file)
elif args['which'] == 'migrate':
env = bootstrap('config/kinto.ini')
cliquet.init_schema(env)
elif args['which'] == 'start':
pserve_argv = ['pserve', 'config/kinto.ini', '--reload']
pserve.main(pserve_argv)
if __name__ == "__main__":
main()
| import argparse
import sys
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
from pyramid.paster import bootstrap
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto commands")
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='init/start/migrate')
parser_init = subparsers.add_parser('init')
parser_init.set_defaults(which='init')
parser_init.add_argument('--config_file', required=False,
help='Config file may be passed as argument')
parser_migrate = subparsers.add_parser('migrate')
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
if args['which'] == 'init':
if(args['config_file'] is None):
env = bootstrap('config/kinto.ini')
else:
config_file = format(args['config_file'])
env = bootstrap(config_file)
elif args['which'] == 'migrate':
env = bootstrap('config/kinto.ini')
cliquet.init_schema(env)
elif args['which'] == 'start':
pserve_argv = ['pserve', 'config/kinto.ini', '--reload']
pserve.main(pserve_argv)
if __name__ == "__main__":
main()
| en | 0.838268 | The main routine. | 2.596503 | 3 |
apis/admin.py | JumboCode/GroundWorkSomerville | 0 | 8162 | from django.contrib import admin
from django.contrib.auth.models import User
from .models import Vegetable, Harvest, Transaction, Merchandise, MerchandisePrice
from .models import PurchasedItem, UserProfile, VegetablePrice, StockedVegetable
from .models import MerchandisePhotos
admin.site.register(Vegetable)
admin.site.register(StockedVegetable)
admin.site.register(Harvest)
admin.site.register(VegetablePrice)
admin.site.register(PurchasedItem)
admin.site.register(Transaction)
admin.site.register(UserProfile)
admin.site.register(Merchandise)
admin.site.register(MerchandisePrice)
admin.site.register(MerchandisePhotos)
| from django.contrib import admin
from django.contrib.auth.models import User
from .models import Vegetable, Harvest, Transaction, Merchandise, MerchandisePrice
from .models import PurchasedItem, UserProfile, VegetablePrice, StockedVegetable
from .models import MerchandisePhotos
admin.site.register(Vegetable)
admin.site.register(StockedVegetable)
admin.site.register(Harvest)
admin.site.register(VegetablePrice)
admin.site.register(PurchasedItem)
admin.site.register(Transaction)
admin.site.register(UserProfile)
admin.site.register(Merchandise)
admin.site.register(MerchandisePrice)
admin.site.register(MerchandisePhotos)
| none | 1 | 1.427339 | 1 |
|
tests/unit/media/test_synthesis.py | AnantTiwari-Naman/pyglet | 0 | 8163 | <gh_stars>0
from ctypes import sizeof
from io import BytesIO
import unittest
from pyglet.media.synthesis import *
local_dir = os.path.dirname(__file__)
test_data_path = os.path.abspath(os.path.join(local_dir, '..', '..', 'data'))
del local_dir
def get_test_data_file(*file_parts):
"""Get a file from the test data directory in an OS independent way.
Supply relative file name as you would in os.path.join().
"""
return os.path.join(test_data_path, *file_parts)
class SynthesisSourceTest:
"""Simple test to check if synthesized sources provide data."""
source_class = None
def test_default(self):
source = self.source_class(1.)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source)
def test_sample_rate_11025(self):
source = self.source_class(1., sample_rate=11025)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source, sample_rate=11025)
def _test_total_duration(self, source):
total_bytes = source.audio_format.bytes_per_second
self._check_audio_data(source, total_bytes, 1.)
def _check_audio_data(self, source, expected_bytes, expected_duration):
data = source.get_audio_data(expected_bytes + 100)
self.assertIsNotNone(data)
self.assertAlmostEqual(expected_bytes, data.length, delta=20)
self.assertAlmostEqual(expected_duration, data.duration)
self.assertIsNotNone(data.data)
self.assertAlmostEqual(expected_bytes, len(data.data), delta=20)
# Should now be out of data
last_data = source.get_audio_data(100)
self.assertIsNone(last_data)
def test_seek_default(self):
source = self.source_class(1.)
self._test_seek(source)
def _test_seek(self, source):
seek_time = .5
bytes_left = source.audio_format.bytes_per_second * .5
source.seek(seek_time)
self._check_audio_data(source, bytes_left, .5)
def _test_generated_bytes(self, source, sample_rate=44800, sample_size=16):
source_name = self.source_class.__name__.lower()
filename = "synthesis_{0}_{1}_{2}_1ch.wav".format(source_name, sample_size, sample_rate)
with open(get_test_data_file('media', filename), 'rb') as f:
# discard the wave header:
loaded_bytes = f.read()[44:]
source.seek(0)
generated_data = source.get_audio_data(source._max_offset)
bytes_buffer = BytesIO(generated_data.data).getvalue()
# Compare a small chunk, to avoid hanging on mismatch:
assert bytes_buffer[:1000] == loaded_bytes[:1000],\
"Generated bytes do not match sample wave file."
class SilenceTest(SynthesisSourceTest, unittest.TestCase):
source_class = Silence
class WhiteNoiseTest(SynthesisSourceTest, unittest.TestCase):
source_class = WhiteNoise
class SineTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sine
class TriangleTest(SynthesisSourceTest, unittest.TestCase):
source_class = Triangle
class SawtoothTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sawtooth
class SquareTest(SynthesisSourceTest, unittest.TestCase):
source_class = Square
class FMTest(SynthesisSourceTest, unittest.TestCase):
source_class = SimpleFM
| from ctypes import sizeof
from io import BytesIO
import unittest
from pyglet.media.synthesis import *
local_dir = os.path.dirname(__file__)
test_data_path = os.path.abspath(os.path.join(local_dir, '..', '..', 'data'))
del local_dir
def get_test_data_file(*file_parts):
"""Get a file from the test data directory in an OS independent way.
Supply relative file name as you would in os.path.join().
"""
return os.path.join(test_data_path, *file_parts)
class SynthesisSourceTest:
"""Simple test to check if synthesized sources provide data."""
source_class = None
def test_default(self):
source = self.source_class(1.)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source)
def test_sample_rate_11025(self):
source = self.source_class(1., sample_rate=11025)
self._test_total_duration(source)
if self.source_class is not WhiteNoise:
self._test_generated_bytes(source, sample_rate=11025)
def _test_total_duration(self, source):
total_bytes = source.audio_format.bytes_per_second
self._check_audio_data(source, total_bytes, 1.)
def _check_audio_data(self, source, expected_bytes, expected_duration):
data = source.get_audio_data(expected_bytes + 100)
self.assertIsNotNone(data)
self.assertAlmostEqual(expected_bytes, data.length, delta=20)
self.assertAlmostEqual(expected_duration, data.duration)
self.assertIsNotNone(data.data)
self.assertAlmostEqual(expected_bytes, len(data.data), delta=20)
# Should now be out of data
last_data = source.get_audio_data(100)
self.assertIsNone(last_data)
def test_seek_default(self):
source = self.source_class(1.)
self._test_seek(source)
def _test_seek(self, source):
seek_time = .5
bytes_left = source.audio_format.bytes_per_second * .5
source.seek(seek_time)
self._check_audio_data(source, bytes_left, .5)
def _test_generated_bytes(self, source, sample_rate=44800, sample_size=16):
source_name = self.source_class.__name__.lower()
filename = "synthesis_{0}_{1}_{2}_1ch.wav".format(source_name, sample_size, sample_rate)
with open(get_test_data_file('media', filename), 'rb') as f:
# discard the wave header:
loaded_bytes = f.read()[44:]
source.seek(0)
generated_data = source.get_audio_data(source._max_offset)
bytes_buffer = BytesIO(generated_data.data).getvalue()
# Compare a small chunk, to avoid hanging on mismatch:
assert bytes_buffer[:1000] == loaded_bytes[:1000],\
"Generated bytes do not match sample wave file."
class SilenceTest(SynthesisSourceTest, unittest.TestCase):
source_class = Silence
class WhiteNoiseTest(SynthesisSourceTest, unittest.TestCase):
source_class = WhiteNoise
class SineTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sine
class TriangleTest(SynthesisSourceTest, unittest.TestCase):
source_class = Triangle
class SawtoothTest(SynthesisSourceTest, unittest.TestCase):
source_class = Sawtooth
class SquareTest(SynthesisSourceTest, unittest.TestCase):
source_class = Square
class FMTest(SynthesisSourceTest, unittest.TestCase):
source_class = SimpleFM | en | 0.845843 | Get a file from the test data directory in an OS independent way. Supply relative file name as you would in os.path.join(). Simple test to check if synthesized sources provide data. # Should now be out of data # discard the wave header: # Compare a small chunk, to avoid hanging on mismatch: | 2.607367 | 3 |
Ejercicio/Ejercicio7.py | tavo1599/F.P2021 | 1 | 8164 | <filename>Ejercicio/Ejercicio7.py<gh_stars>1-10
#Datos de entrada
num=int(input("Ingrese un numero: "))
# Proceso
if num==10:
print("Calificacion: A")
elif num==9:
print("Calificacion: B")
elif num==8:
print("Calificacion: C")
elif num==7 and num==6:
print("Calificacion: D")
elif num<=5 and num>=0:
print("Calificacion: F")
| <filename>Ejercicio/Ejercicio7.py<gh_stars>1-10
#Datos de entrada
num=int(input("Ingrese un numero: "))
# Proceso
if num==10:
print("Calificacion: A")
elif num==9:
print("Calificacion: B")
elif num==8:
print("Calificacion: C")
elif num==7 and num==6:
print("Calificacion: D")
elif num<=5 and num>=0:
print("Calificacion: F")
| es | 0.936896 | #Datos de entrada # Proceso | 3.555574 | 4 |
2015/day-2/part2.py | nairraghav/advent-of-code-2019 | 0 | 8165 | <gh_stars>0
ribbon_needed = 0
with open("input.txt", "r") as puzzle_input:
for line in puzzle_input:
length, width, height = [int(item) for item in line.split("x")]
dimensions = [length, width, height]
smallest_side = min(dimensions)
dimensions.remove(smallest_side)
second_smallest_side = min(dimensions)
ribbon_needed += 2*smallest_side + 2*second_smallest_side + length*width*height
print(ribbon_needed)
| ribbon_needed = 0
with open("input.txt", "r") as puzzle_input:
for line in puzzle_input:
length, width, height = [int(item) for item in line.split("x")]
dimensions = [length, width, height]
smallest_side = min(dimensions)
dimensions.remove(smallest_side)
second_smallest_side = min(dimensions)
ribbon_needed += 2*smallest_side + 2*second_smallest_side + length*width*height
print(ribbon_needed) | none | 1 | 3.689836 | 4 |
|
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py | aaronwJordan/Lean | 0 | 8166 | <filename>Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py<gh_stars>0
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Indicators import *
from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel
from datetime import timedelta, datetime
from math import ceil
from itertools import chain
#
# This alpha picks stocks according to <NAME>'s Magic Formula.
# First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock
# that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has
# the tenth lowest EV/EBITDA score would be assigned 10 points.
#
# Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC).
# Similarly, a stock that has the highest ROC value in the universe gets one score point.
# The stocks that receive the lowest combined score are chosen for insights.
#
# Source: <NAME>. (2010) The Little Book That Beats the Market
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open
# sourced so the community and client funds can see an example of an alpha.
#
class GreenblattMagicFormulaAlgorithm(QCAlgorithmFramework):
''' Alpha Streams: Benchmark Alpha: Pick stocks according to <NAME>'s Magic Formula'''
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
#Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# select stocks using MagicFormulaUniverseSelectionModel
self.SetUniverseSelection(GreenBlattMagicFormulaUniverseSelectionModel())
# Use MagicFormulaAlphaModel to establish insights
self.SetAlpha(RateOfChangeAlphaModel())
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class RateOfChangeAlphaModel(AlphaModel):
'''Uses Rate of Change (ROC) to create magnitude prediction for insights.'''
def __init__(self, *args, **kwargs):
self.lookback = kwargs['lookback'] if 'lookback' in kwargs else 1
self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), self.lookback)
self.symbolDataBySymbol = {}
def Update(self, algorithm, data):
insights = []
for symbol, symbolData in self.symbolDataBySymbol.items():
if symbolData.CanEmit:
insights.append(Insight.Price(symbol, self.predictionInterval, InsightDirection.Up, symbolData.Return, None))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
# clean up data for removed securities
for removed in changes.RemovedSecurities:
symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
if symbolData is not None:
symbolData.RemoveConsolidators(algorithm)
# initialize data for added securities
symbols = [ x.Symbol for x in changes.AddedSecurities ]
history = algorithm.History(symbols, self.lookback, self.resolution)
if history.empty: return
tickers = history.index.levels[0]
for ticker in tickers:
symbol = SymbolCache.GetSymbol(ticker)
if symbol not in self.symbolDataBySymbol:
symbolData = SymbolData(symbol, self.lookback)
self.symbolDataBySymbol[symbol] = symbolData
symbolData.RegisterIndicators(algorithm, self.resolution)
symbolData.WarmUpIndicators(history.loc[ticker])
class SymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback):
self.Symbol = symbol
self.ROC = RateOfChange('{}.ROC({})'.format(symbol, lookback), lookback)
self.Consolidator = None
self.previous = 0
def RegisterIndicators(self, algorithm, resolution):
self.Consolidator = algorithm.ResolveConsolidator(self.Symbol, resolution)
algorithm.RegisterIndicator(self.Symbol, self.ROC, self.Consolidator)
def RemoveConsolidators(self, algorithm):
if self.Consolidator is not None:
algorithm.SubscriptionManager.RemoveConsolidator(self.Symbol, self.Consolidator)
def WarmUpIndicators(self, history):
for tuple in history.itertuples():
self.ROC.Update(tuple.Index, tuple.close)
@property
def Return(self):
return float(self.ROC.Current.Value)
@property
def CanEmit(self):
if self.previous == self.ROC.Samples:
return False
self.previous = self.ROC.Samples
return self.ROC.IsReady
def __str__(self, **kwargs):
return '{}: {:.2%}'.format(self.ROC.Name, (1 + self.Return)**252 - 1)
class GreenBlattMagicFormulaUniverseSelectionModel(FundamentalUniverseSelectionModel):
'''Defines a universe according to <NAME>'s Magic Formula, as a universe selection model for the framework algorithm.
From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA).
'''
def __init__(self,
filterFineData = True,
universeSettings = None,
securityInitializer = None):
'''Initializes a new default instance of the MagicFormulaUniverseSelectionModel'''
super().__init__(filterFineData, universeSettings, securityInitializer)
# Number of stocks in Coarse Universe
self.NumberOfSymbolsCoarse = 500
# Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA)
self.NumberOfSymbolsFine = 20
# Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA)
self.NumberOfSymbolsInPortfolio = 10
self.lastMonth = -1
self.dollarVolumeBySymbol = {}
self.symbols = []
def SelectCoarse(self, algorithm, coarse):
'''Performs coarse selection for constituents.
The stocks must have fundamental data
The stock must have positive previous-day close price
The stock must have positive volume on the previous trading day'''
month = algorithm.Time.month
if month == self.lastMonth:
return self.symbols
self.lastMonth = month
# The stocks must have fundamental data
# The stock must have positive previous-day close price
# The stock must have positive volume on the previous trading day
filtered = [x for x in coarse if x.HasFundamentalData
and x.Volume > 0
and x.Price > 0]
# sort the stocks by dollar volume and take the top 1000
top = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)[:self.NumberOfSymbolsCoarse]
self.dollarVolumeBySymbol = { i.Symbol: i.DollarVolume for i in top }
self.symbols = list(self.dollarVolumeBySymbol.keys())
return self.symbols
def SelectFine(self, algorithm, fine):
'''QC500: Performs fine selection for the coarse selection constituents
The company's headquarter must in the U.S.
The stock must be traded on either the NYSE or NASDAQ
At least half a year since its initial public offering
The stock's market cap must be greater than 500 million
Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)'''
# QC500:
## The company's headquarter must in the U.S.
## The stock must be traded on either the NYSE or NASDAQ
## At least half a year since its initial public offering
## The stock's market cap must be greater than 500 million
filteredFine = [x for x in fine if x.CompanyReference.CountryId == "USA"
and (x.CompanyReference.PrimaryExchangeID == "NYS" or x.CompanyReference.PrimaryExchangeID == "NAS")
and (algorithm.Time - x.SecurityReference.IPODate).days > 180
and x.EarningReports.BasicAverageShares.ThreeMonths * x.EarningReports.BasicEPS.TwelveMonths * x.ValuationRatios.PERatio > 5e8]
count = len(filteredFine)
if count == 0: return []
myDict = dict()
percent = float(self.NumberOfSymbolsFine / count)
# select stocks with top dollar volume in every single sector
for key in ["N", "M", "U", "T", "B", "I"]:
value = [x for x in filteredFine if x.CompanyReference.IndustryTemplateCode == key]
value = sorted(value, key=lambda x: self.dollarVolumeBySymbol[x.Symbol], reverse = True)
myDict[key] = value[:ceil(len(value) * percent)]
# stocks in QC500 universe
topFine = list(chain.from_iterable(myDict.values()))[:self.NumberOfSymbolsCoarse]
# Magic Formula:
## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)
# sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio
sortedByEVToEBITDA = sorted(topFine, key=lambda x: x.ValuationRatios.EVToEBITDA , reverse=True)
# sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA)
sortedByROA = sorted(sortedByEVToEBITDA[:self.NumberOfSymbolsFine], key=lambda x: x.ValuationRatios.ForwardROA, reverse=False)
# retrieve list of securites in portfolio
top = sortedByROA[:self.NumberOfSymbolsInPortfolio]
self.symbols = [f.Symbol for f in top]
return self.symbols
| <filename>Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py<gh_stars>0
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Algorithm.Framework")
from System import *
from QuantConnect import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Data.UniverseSelection import *
from QuantConnect.Indicators import *
from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel
from datetime import timedelta, datetime
from math import ceil
from itertools import chain
#
# This alpha picks stocks according to <NAME>'s Magic Formula.
# First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock
# that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has
# the tenth lowest EV/EBITDA score would be assigned 10 points.
#
# Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC).
# Similarly, a stock that has the highest ROC value in the universe gets one score point.
# The stocks that receive the lowest combined score are chosen for insights.
#
# Source: <NAME>. (2010) The Little Book That Beats the Market
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open
# sourced so the community and client funds can see an example of an alpha.
#
class GreenblattMagicFormulaAlgorithm(QCAlgorithmFramework):
''' Alpha Streams: Benchmark Alpha: Pick stocks according to <NAME>'s Magic Formula'''
def Initialize(self):
self.SetStartDate(2018, 1, 1)
self.SetCash(100000)
#Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# select stocks using MagicFormulaUniverseSelectionModel
self.SetUniverseSelection(GreenBlattMagicFormulaUniverseSelectionModel())
# Use MagicFormulaAlphaModel to establish insights
self.SetAlpha(RateOfChangeAlphaModel())
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class RateOfChangeAlphaModel(AlphaModel):
'''Uses Rate of Change (ROC) to create magnitude prediction for insights.'''
def __init__(self, *args, **kwargs):
self.lookback = kwargs['lookback'] if 'lookback' in kwargs else 1
self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily
self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), self.lookback)
self.symbolDataBySymbol = {}
def Update(self, algorithm, data):
insights = []
for symbol, symbolData in self.symbolDataBySymbol.items():
if symbolData.CanEmit:
insights.append(Insight.Price(symbol, self.predictionInterval, InsightDirection.Up, symbolData.Return, None))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
# clean up data for removed securities
for removed in changes.RemovedSecurities:
symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
if symbolData is not None:
symbolData.RemoveConsolidators(algorithm)
# initialize data for added securities
symbols = [ x.Symbol for x in changes.AddedSecurities ]
history = algorithm.History(symbols, self.lookback, self.resolution)
if history.empty: return
tickers = history.index.levels[0]
for ticker in tickers:
symbol = SymbolCache.GetSymbol(ticker)
if symbol not in self.symbolDataBySymbol:
symbolData = SymbolData(symbol, self.lookback)
self.symbolDataBySymbol[symbol] = symbolData
symbolData.RegisterIndicators(algorithm, self.resolution)
symbolData.WarmUpIndicators(history.loc[ticker])
class SymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback):
self.Symbol = symbol
self.ROC = RateOfChange('{}.ROC({})'.format(symbol, lookback), lookback)
self.Consolidator = None
self.previous = 0
def RegisterIndicators(self, algorithm, resolution):
self.Consolidator = algorithm.ResolveConsolidator(self.Symbol, resolution)
algorithm.RegisterIndicator(self.Symbol, self.ROC, self.Consolidator)
def RemoveConsolidators(self, algorithm):
if self.Consolidator is not None:
algorithm.SubscriptionManager.RemoveConsolidator(self.Symbol, self.Consolidator)
def WarmUpIndicators(self, history):
for tuple in history.itertuples():
self.ROC.Update(tuple.Index, tuple.close)
@property
def Return(self):
return float(self.ROC.Current.Value)
@property
def CanEmit(self):
if self.previous == self.ROC.Samples:
return False
self.previous = self.ROC.Samples
return self.ROC.IsReady
def __str__(self, **kwargs):
return '{}: {:.2%}'.format(self.ROC.Name, (1 + self.Return)**252 - 1)
class GreenBlattMagicFormulaUniverseSelectionModel(FundamentalUniverseSelectionModel):
'''Defines a universe according to <NAME>'s Magic Formula, as a universe selection model for the framework algorithm.
From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA).
'''
def __init__(self,
filterFineData = True,
universeSettings = None,
securityInitializer = None):
'''Initializes a new default instance of the MagicFormulaUniverseSelectionModel'''
super().__init__(filterFineData, universeSettings, securityInitializer)
# Number of stocks in Coarse Universe
self.NumberOfSymbolsCoarse = 500
# Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA)
self.NumberOfSymbolsFine = 20
# Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA)
self.NumberOfSymbolsInPortfolio = 10
self.lastMonth = -1
self.dollarVolumeBySymbol = {}
self.symbols = []
def SelectCoarse(self, algorithm, coarse):
'''Performs coarse selection for constituents.
The stocks must have fundamental data
The stock must have positive previous-day close price
The stock must have positive volume on the previous trading day'''
month = algorithm.Time.month
if month == self.lastMonth:
return self.symbols
self.lastMonth = month
# The stocks must have fundamental data
# The stock must have positive previous-day close price
# The stock must have positive volume on the previous trading day
filtered = [x for x in coarse if x.HasFundamentalData
and x.Volume > 0
and x.Price > 0]
# sort the stocks by dollar volume and take the top 1000
top = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)[:self.NumberOfSymbolsCoarse]
self.dollarVolumeBySymbol = { i.Symbol: i.DollarVolume for i in top }
self.symbols = list(self.dollarVolumeBySymbol.keys())
return self.symbols
def SelectFine(self, algorithm, fine):
'''QC500: Performs fine selection for the coarse selection constituents
The company's headquarter must in the U.S.
The stock must be traded on either the NYSE or NASDAQ
At least half a year since its initial public offering
The stock's market cap must be greater than 500 million
Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)'''
# QC500:
## The company's headquarter must in the U.S.
## The stock must be traded on either the NYSE or NASDAQ
## At least half a year since its initial public offering
## The stock's market cap must be greater than 500 million
filteredFine = [x for x in fine if x.CompanyReference.CountryId == "USA"
and (x.CompanyReference.PrimaryExchangeID == "NYS" or x.CompanyReference.PrimaryExchangeID == "NAS")
and (algorithm.Time - x.SecurityReference.IPODate).days > 180
and x.EarningReports.BasicAverageShares.ThreeMonths * x.EarningReports.BasicEPS.TwelveMonths * x.ValuationRatios.PERatio > 5e8]
count = len(filteredFine)
if count == 0: return []
myDict = dict()
percent = float(self.NumberOfSymbolsFine / count)
# select stocks with top dollar volume in every single sector
for key in ["N", "M", "U", "T", "B", "I"]:
value = [x for x in filteredFine if x.CompanyReference.IndustryTemplateCode == key]
value = sorted(value, key=lambda x: self.dollarVolumeBySymbol[x.Symbol], reverse = True)
myDict[key] = value[:ceil(len(value) * percent)]
# stocks in QC500 universe
topFine = list(chain.from_iterable(myDict.values()))[:self.NumberOfSymbolsCoarse]
# Magic Formula:
## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA)
## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)
# sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio
sortedByEVToEBITDA = sorted(topFine, key=lambda x: x.ValuationRatios.EVToEBITDA , reverse=True)
# sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA)
sortedByROA = sorted(sortedByEVToEBITDA[:self.NumberOfSymbolsFine], key=lambda x: x.ValuationRatios.ForwardROA, reverse=False)
# retrieve list of securites in portfolio
top = sortedByROA[:self.NumberOfSymbolsInPortfolio]
self.symbols = [f.Symbol for f in top]
return self.symbols
| en | 0.875752 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This alpha picks stocks according to <NAME>'s Magic Formula. # First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock # that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has # the tenth lowest EV/EBITDA score would be assigned 10 points. # # Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC). # Similarly, a stock that has the highest ROC value in the universe gets one score point. # The stocks that receive the lowest combined score are chosen for insights. # # Source: <NAME>. (2010) The Little Book That Beats the Market # # This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open # sourced so the community and client funds can see an example of an alpha. # Alpha Streams: Benchmark Alpha: Pick stocks according to <NAME>'s Magic Formula #Set zero transaction fees # select stocks using MagicFormulaUniverseSelectionModel # Use MagicFormulaAlphaModel to establish insights # Equally weigh securities in portfolio, based on insights ## Set Immediate Execution Model ## Set Null Risk Management Model Uses Rate of Change (ROC) to create magnitude prediction for insights. # clean up data for removed securities # initialize data for added securities Contains data specific to a symbol required by this model Defines a universe according to <NAME>'s Magic Formula, as a universe selection model for the framework algorithm. From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA). Initializes a new default instance of the MagicFormulaUniverseSelectionModel # Number of stocks in Coarse Universe # Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA) # Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA) Performs coarse selection for constituents. The stocks must have fundamental data The stock must have positive previous-day close price The stock must have positive volume on the previous trading day # The stocks must have fundamental data # The stock must have positive previous-day close price # The stock must have positive volume on the previous trading day # sort the stocks by dollar volume and take the top 1000 QC500: Performs fine selection for the coarse selection constituents The company's headquarter must in the U.S. The stock must be traded on either the NYSE or NASDAQ At least half a year since its initial public offering The stock's market cap must be greater than 500 million Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA) Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA) # QC500: ## The company's headquarter must in the U.S. ## The stock must be traded on either the NYSE or NASDAQ ## At least half a year since its initial public offering ## The stock's market cap must be greater than 500 million # select stocks with top dollar volume in every single sector # stocks in QC500 universe # Magic Formula: ## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA) ## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA) # sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio # sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA) # retrieve list of securites in portfolio | 2.172408 | 2 |
hw2/deeplearning/style_transfer.py | axelbr/berkeley-cs182-deep-neural-networks | 0 | 8167 | <gh_stars>0
import numpy as np
import torch
import torch.nn.functional as F
def content_loss(content_weight, content_current, content_target):
"""
Compute the content loss for style transfer.
Inputs:
- content_weight: Scalar giving the weighting for the content loss.
- content_current: features of the current image; this is a PyTorch Tensor of shape
(1, C_l, H_l, W_l).
- content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l).
Returns:
- scalar content loss
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
_, C, H, W = content_current.shape
current_features = content_current.view(C, H*W)
target_features = content_target.view(C, H*W)
loss = content_weight * torch.sum(torch.square(current_features - target_features))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: PyTorch Variable of shape (N, C, H, W) giving features for
a batch of N images.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: PyTorch Variable of shape (N, C, C) giving the
(optionally normalized) Gram matrices for the N input images.
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
C, H, W = features.shape[-3], features.shape[-2], features.shape[-1]
reshaped = features.view(-1, C, H*W)
G = reshaped @ reshaped.transpose(dim0=1, dim1=2)
if normalize:
G = G / (H*W*C)
return G
##############################################################################
# END OF YOUR CODE #
##############################################################################
def style_loss(feats, style_layers, style_targets, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image, as produced by
the extract_features function.
- style_layers: List of layer indices into feats giving the layers to include in the
style loss.
- style_targets: List of the same length as style_layers, where style_targets[i] is
a PyTorch Variable giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_layers, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A PyTorch Variable holding a scalar giving the style loss.
"""
# Hint: you can do this with one for loop over the style layers, and should
# not be very much code (~5 lines). You will need to use your gram_matrix function.
##############################################################################
# YOUR CODE HERE #
##############################################################################
loss = 0
for i, l in enumerate(style_layers):
A, G = style_targets[i], gram_matrix(feats[l])
loss += style_weights[i] * torch.sum(torch.square(G - A))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def tv_loss(img, tv_weight):
"""
Compute total variation loss.
Inputs:
- img: PyTorch Variable of shape (1, 3, H, W) holding an input image.
- tv_weight: Scalar giving the weight w_t to use for the TV loss.
Returns:
- loss: PyTorch Variable holding a scalar giving the total variation loss
for img weighted by tv_weight.
"""
# Your implementation should be vectorized and not require any loops!
##############################################################################
# YOUR CODE HERE #
##############################################################################
tv = torch.square(img[..., 1:, :-1] - img[..., :-1, :-1]) + torch.square(img[..., :-1, 1:] - img[..., :-1, :-1])
return tv_weight * torch.sum(tv)
##############################################################################
# END OF YOUR CODE #
##############################################################################
| import numpy as np
import torch
import torch.nn.functional as F
def content_loss(content_weight, content_current, content_target):
"""
Compute the content loss for style transfer.
Inputs:
- content_weight: Scalar giving the weighting for the content loss.
- content_current: features of the current image; this is a PyTorch Tensor of shape
(1, C_l, H_l, W_l).
- content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l).
Returns:
- scalar content loss
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
_, C, H, W = content_current.shape
current_features = content_current.view(C, H*W)
target_features = content_target.view(C, H*W)
loss = content_weight * torch.sum(torch.square(current_features - target_features))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: PyTorch Variable of shape (N, C, H, W) giving features for
a batch of N images.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: PyTorch Variable of shape (N, C, C) giving the
(optionally normalized) Gram matrices for the N input images.
"""
##############################################################################
# YOUR CODE HERE #
##############################################################################
C, H, W = features.shape[-3], features.shape[-2], features.shape[-1]
reshaped = features.view(-1, C, H*W)
G = reshaped @ reshaped.transpose(dim0=1, dim1=2)
if normalize:
G = G / (H*W*C)
return G
##############################################################################
# END OF YOUR CODE #
##############################################################################
def style_loss(feats, style_layers, style_targets, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image, as produced by
the extract_features function.
- style_layers: List of layer indices into feats giving the layers to include in the
style loss.
- style_targets: List of the same length as style_layers, where style_targets[i] is
a PyTorch Variable giving the Gram matrix the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_layers, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A PyTorch Variable holding a scalar giving the style loss.
"""
# Hint: you can do this with one for loop over the style layers, and should
# not be very much code (~5 lines). You will need to use your gram_matrix function.
##############################################################################
# YOUR CODE HERE #
##############################################################################
loss = 0
for i, l in enumerate(style_layers):
A, G = style_targets[i], gram_matrix(feats[l])
loss += style_weights[i] * torch.sum(torch.square(G - A))
return loss
##############################################################################
# END OF YOUR CODE #
##############################################################################
def tv_loss(img, tv_weight):
"""
Compute total variation loss.
Inputs:
- img: PyTorch Variable of shape (1, 3, H, W) holding an input image.
- tv_weight: Scalar giving the weight w_t to use for the TV loss.
Returns:
- loss: PyTorch Variable holding a scalar giving the total variation loss
for img weighted by tv_weight.
"""
# Your implementation should be vectorized and not require any loops!
##############################################################################
# YOUR CODE HERE #
##############################################################################
tv = torch.square(img[..., 1:, :-1] - img[..., :-1, :-1]) + torch.square(img[..., :-1, 1:] - img[..., :-1, :-1])
return tv_weight * torch.sum(tv)
##############################################################################
# END OF YOUR CODE #
############################################################################## | de | 0.349936 | Compute the content loss for style transfer. Inputs: - content_weight: Scalar giving the weighting for the content loss. - content_current: features of the current image; this is a PyTorch Tensor of shape (1, C_l, H_l, W_l). - content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l). Returns: - scalar content loss ############################################################################## # YOUR CODE HERE # ############################################################################## ############################################################################## # END OF YOUR CODE # ############################################################################## Compute the Gram matrix from features. Inputs: - features: PyTorch Variable of shape (N, C, H, W) giving features for a batch of N images. - normalize: optional, whether to normalize the Gram matrix If True, divide the Gram matrix by the number of neurons (H * W * C) Returns: - gram: PyTorch Variable of shape (N, C, C) giving the (optionally normalized) Gram matrices for the N input images. ############################################################################## # YOUR CODE HERE # ############################################################################## ############################################################################## # END OF YOUR CODE # ############################################################################## Computes the style loss at a set of layers. Inputs: - feats: list of the features at every layer of the current image, as produced by the extract_features function. - style_layers: List of layer indices into feats giving the layers to include in the style loss. - style_targets: List of the same length as style_layers, where style_targets[i] is a PyTorch Variable giving the Gram matrix the source style image computed at layer style_layers[i]. - style_weights: List of the same length as style_layers, where style_weights[i] is a scalar giving the weight for the style loss at layer style_layers[i]. Returns: - style_loss: A PyTorch Variable holding a scalar giving the style loss. # Hint: you can do this with one for loop over the style layers, and should # not be very much code (~5 lines). You will need to use your gram_matrix function. ############################################################################## # YOUR CODE HERE # ############################################################################## ############################################################################## # END OF YOUR CODE # ############################################################################## Compute total variation loss. Inputs: - img: PyTorch Variable of shape (1, 3, H, W) holding an input image. - tv_weight: Scalar giving the weight w_t to use for the TV loss. Returns: - loss: PyTorch Variable holding a scalar giving the total variation loss for img weighted by tv_weight. # Your implementation should be vectorized and not require any loops! ############################################################################## # YOUR CODE HERE # ############################################################################## ############################################################################## # END OF YOUR CODE # ############################################################################## | 3.090132 | 3 |
submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Transformers/Encode/HTMLDecode.py | brittjay0104/rose6icse | 0 | 8168 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import xml.sax.saxutils
from Peach.transformer import Transformer
class HtmlDecode(Transformer):
"""Decode HTML encoded string."""
def realEncode(self, data):
return xml.sax.saxutils.unescape(data)
def realEncode(self, data):
return xml.sax.saxutils.escape(data)
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import xml.sax.saxutils
from Peach.transformer import Transformer
class HtmlDecode(Transformer):
"""Decode HTML encoded string."""
def realEncode(self, data):
return xml.sax.saxutils.unescape(data)
def realEncode(self, data):
return xml.sax.saxutils.escape(data)
| en | 0.897677 | # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. Decode HTML encoded string. | 1.929572 | 2 |
src/archive/greatcircle.py | AuraUAS/aura-core | 8 | 8169 | <gh_stars>1-10
# From: http://williams.best.vwh.net/avform.htm#GCF
import math
EPS = 0.0001
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
rad2nm = (180.0 * 60.0) / math.pi
nm2rad = 1.0 / rad2nm
nm2meter = 1852
meter2nm = 1.0 / nm2meter
# p1 = (lat1(deg), lon1(deg))
# p2 = (lat2(deg), lon2(deg))
def course_and_dist(p1, p2):
# this formulations uses postive lon = W (opposite of usual, so we
# invert the longitude.)
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
lat2 = p2[0] * d2r
lon2 = -p2[1] * d2r
dist_rad = 2.0 * math.asin(math.sqrt((math.sin((lat1-lat2)/2.0))**2.0 + math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2.0))**2))
# if starting point is on a pole
if math.cos(lat1) < EPS:
# EPS a small number ~ machine precision
if (lat1 > 0.0):
# starting from N pole
tc1_rad = math.pi
else:
# starting from S pole
tc1_rad = 2.0 * math.pi
# For starting points other than the poles:
if dist_rad < 0.000000001:
# about a cm
tc1_rad = 0.0
else:
num1 = math.sin(lat2) - math.sin(lat1)*math.cos(dist_rad)
den1 = math.sin(dist_rad) * math.cos(lat1)
tmp1 = num1 / den1
if tmp1 < -1.0:
#print "CLIPPING TMP1 to -1.0!"
tmp1 = -1.0
if tmp1 > 1.0:
#print "CLIPPING TMP1 to 1.0!"
tmp1 = 1.0
if math.sin(lon2-lon1) < 0.0:
tc1_rad = math.acos(tmp1)
else:
tc1_rad = 2.0 * math.pi - math.acos(tmp1)
dist_nm = dist_rad * rad2nm
dist_m = dist_nm * nm2meter
tc1_deg = tc1_rad * r2d
return (tc1_deg, dist_m)
def project_course_distance(p1, course_deg, dist_m):
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
tc = course_deg * d2r
d = dist_m * meter2nm * nm2rad
lat = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc))
if math.cos(lat) < EPS:
lon = lon1 # endpoint a pole
else:
lon = math.fmod(lon1-math.asin(math.sin(tc)*math.sin(d)/math.cos(lat))+math.pi, 2*math.pi) - math.pi
return (lat*r2d, -lon*r2d)
| # From: http://williams.best.vwh.net/avform.htm#GCF
import math
EPS = 0.0001
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
rad2nm = (180.0 * 60.0) / math.pi
nm2rad = 1.0 / rad2nm
nm2meter = 1852
meter2nm = 1.0 / nm2meter
# p1 = (lat1(deg), lon1(deg))
# p2 = (lat2(deg), lon2(deg))
def course_and_dist(p1, p2):
# this formulations uses postive lon = W (opposite of usual, so we
# invert the longitude.)
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
lat2 = p2[0] * d2r
lon2 = -p2[1] * d2r
dist_rad = 2.0 * math.asin(math.sqrt((math.sin((lat1-lat2)/2.0))**2.0 + math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2.0))**2))
# if starting point is on a pole
if math.cos(lat1) < EPS:
# EPS a small number ~ machine precision
if (lat1 > 0.0):
# starting from N pole
tc1_rad = math.pi
else:
# starting from S pole
tc1_rad = 2.0 * math.pi
# For starting points other than the poles:
if dist_rad < 0.000000001:
# about a cm
tc1_rad = 0.0
else:
num1 = math.sin(lat2) - math.sin(lat1)*math.cos(dist_rad)
den1 = math.sin(dist_rad) * math.cos(lat1)
tmp1 = num1 / den1
if tmp1 < -1.0:
#print "CLIPPING TMP1 to -1.0!"
tmp1 = -1.0
if tmp1 > 1.0:
#print "CLIPPING TMP1 to 1.0!"
tmp1 = 1.0
if math.sin(lon2-lon1) < 0.0:
tc1_rad = math.acos(tmp1)
else:
tc1_rad = 2.0 * math.pi - math.acos(tmp1)
dist_nm = dist_rad * rad2nm
dist_m = dist_nm * nm2meter
tc1_deg = tc1_rad * r2d
return (tc1_deg, dist_m)
def project_course_distance(p1, course_deg, dist_m):
lat1 = p1[0] * d2r
lon1 = -p1[1] * d2r
tc = course_deg * d2r
d = dist_m * meter2nm * nm2rad
lat = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc))
if math.cos(lat) < EPS:
lon = lon1 # endpoint a pole
else:
lon = math.fmod(lon1-math.asin(math.sin(tc)*math.sin(d)/math.cos(lat))+math.pi, 2*math.pi) - math.pi
return (lat*r2d, -lon*r2d) | en | 0.689753 | # From: http://williams.best.vwh.net/avform.htm#GCF # p1 = (lat1(deg), lon1(deg)) # p2 = (lat2(deg), lon2(deg)) # this formulations uses postive lon = W (opposite of usual, so we # invert the longitude.) # if starting point is on a pole # EPS a small number ~ machine precision # starting from N pole # starting from S pole # For starting points other than the poles: # about a cm #print "CLIPPING TMP1 to -1.0!" #print "CLIPPING TMP1 to 1.0!" # endpoint a pole | 2.730258 | 3 |
app/__init__.py | JoeCare/flask_geolocation_api | 0 | 8170 | <filename>app/__init__.py
import connexion, os
from connexion.resolver import RestyResolver
from flask import json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# Globally accessible libraries
db = SQLAlchemy()
mm = Marshmallow()
def init_app():
"""Initialize the Connexion application."""
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
openapi_path = os.path.join(BASE_DIR, "../")
conn_app = connexion.FlaskApp(
__name__, specification_dir=openapi_path, options={
"swagger_ui": True,
"serve_spec": True
}
)
conn_app.add_api("openapi.yaml", resolver=RestyResolver('run'),
strict_validation=True)
# Flask app and getting into app_context
app = conn_app.app
# Load application config
app.config.from_object('config.ProdConfig')
app.json_encoder = json.JSONEncoder
# Initialize Plugins
db.init_app(app)
mm.init_app(app)
with app.app_context():
# Include our Routes/views
import run
# Register Blueprints
# app.register_blueprint(auth.auth_bp)
# app.register_blueprint(admin.admin_bp)
return app
| <filename>app/__init__.py
import connexion, os
from connexion.resolver import RestyResolver
from flask import json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
# Globally accessible libraries
db = SQLAlchemy()
mm = Marshmallow()
def init_app():
"""Initialize the Connexion application."""
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
openapi_path = os.path.join(BASE_DIR, "../")
conn_app = connexion.FlaskApp(
__name__, specification_dir=openapi_path, options={
"swagger_ui": True,
"serve_spec": True
}
)
conn_app.add_api("openapi.yaml", resolver=RestyResolver('run'),
strict_validation=True)
# Flask app and getting into app_context
app = conn_app.app
# Load application config
app.config.from_object('config.ProdConfig')
app.json_encoder = json.JSONEncoder
# Initialize Plugins
db.init_app(app)
mm.init_app(app)
with app.app_context():
# Include our Routes/views
import run
# Register Blueprints
# app.register_blueprint(auth.auth_bp)
# app.register_blueprint(admin.admin_bp)
return app
| en | 0.660795 | # Globally accessible libraries Initialize the Connexion application. # Flask app and getting into app_context # Load application config # Initialize Plugins # Include our Routes/views # Register Blueprints # app.register_blueprint(auth.auth_bp) # app.register_blueprint(admin.admin_bp) | 2.208681 | 2 |
RIPv2-Simulation/Router.py | vkmanojk/Networks-VirtualLAN | 0 | 8171 | '''
Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table.
'''
import configparser
import select
import socket
import sys
import time
import threading
import struct
import datetime
from random import randint, randrange
DEBUG = False
HOST = '127.0.0.1' # localhost
BASE_TIMER = 5
MAX_METRIC = 16
ROUTE_TIMEOUT = BASE_TIMER * 6
DELETE_TIMEOUT = BASE_TIMER * 4
AF_INET = 2
# ===========================================================================
# TRANSITIONS
class Transistion():
'''Class Representing a transition between states.'''
def __init__(self, to_state):
self.to_state = to_state
def execute(self):
'''Run the transition functions'''
pass
# ===========================================================================
# STATES
class State():
'''Class Representing a generic state'''
def __init__(self, fsm):
self.fsm = fsm
def enter(self):
'''Execute functions for entering a state'''
pass
def execute(self):
'''Execute functions while in state'''
pass
def exit(self):
'''Execute functions for leaving a state'''
pass
class StartUp(State):
'''Class Representing the Start up state which reads the configuration file
'''
def __init__(self, fsm):
super(StartUp, self).__init__(fsm)
def execute(self):
'''Execute the configuration functions'''
print_message("Loading Configuration File: '"
+ self.fsm.router.config_file + "'")
config = configparser.ConfigParser()
config.read(self.fsm.router.config_file)
self.get_router_id(config)
self.setup_inputs(config)
self.get_outputs(config)
self.setup_routing_table()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
'''Print complete message'''
print_message("Router Setup Complete.")
def get_router_id(self, config):
'''Read the router id number from the configuration file'''
if 1 <= int(config['Settings']['router-id']) <= 64000:
self.fsm.router.router_settings['id'] = \
int(config['Settings']['router-id'])
else:
raise Exception('Invalid Router ID Number')
def get_outputs(self, config):
'''Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file'''
outputs = config['Settings']['outputs'].split(', ')
outputs = [i.split('-') for i in outputs]
self.fsm.router.router_settings['outputs'] = {}
existing_ports = []
for output in outputs:
is_valid_port = 1024 <= int(output[0]) <= 64000 and not \
int(output[0]) in existing_ports
is_valid_cost = 1 <= int(output[1]) < 16
is_valid_id = 1 <= int(output[2]) <= 64000
if is_valid_port and is_valid_cost and is_valid_id:
existing_ports.append(int(output[0]))
self.fsm.router.router_settings['outputs'][int(output[2])] = \
{'metric': int(output[1]),
'port': int(output[0])}
else:
raise Exception('Invalid Outputs')
def setup_inputs(self, config):
'''Create input sockets from the inputs specified in the config file'''
# get inputs from configuration file
ports = config['Settings']['input-ports'].split(', ')
inputs = []
for port in ports:
if 1024 <= int(port) <= 64000 and not int(port) in inputs:
inputs.append(int(port))
else:
raise Exception('Invalid Port Number')
self.fsm.router.router_settings['inputs'] = {}
# create socket for each input port
for port in inputs:
try:
self.fsm.router.router_settings['inputs'][port] = \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print_message('Socket ' + str(port) + ' Created.')
except socket.error as msg:
print('Failed to create socket. Message: ' + str(msg))
sys.exit()
# bind port to socket
try:
self.fsm.router.router_settings['inputs'][port].bind(
(HOST, port))
print_message('Socket ' + str(port) + ' Bind Complete.')
except socket.error as msg:
print('Failed to create socket. Message ' + str(msg))
sys.exit()
def setup_routing_table(self):
'''Setup routing table with the outputs specified in the config file'''
self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \
RIPRouteEntry(address=self.fsm.router.router_settings['id'],
nexthop=0,
metric=0,
imported=True)
class Waiting(State):
'''
Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state.
'''
def __init__(self, fsm):
super(Waiting, self).__init__(fsm)
def enter(self):
'''Display State entry message'''
print_message("Entering idle state...")
def execute(self):
'''Waits for input sockets to be readable and then changes the state
to process the received message.'''
readable = select.select(
self.fsm.router.router_settings['inputs'].values(), [], [])
if readable[0]:
self.fsm.router.readable_ports = readable[0]
self.fsm.to_transition("toReadMessage")
def exit(self):
'''Display State exit message'''
print_message("Message Received")
class ReadMessage(State):
'''Class representing the state for reading messages received on the input
sockets'''
def __init__(self, fsm):
super(ReadMessage, self).__init__(fsm)
def enter(self):
print_message("Reading Messages...")
def execute(self):
for port in self.fsm.router.readable_ports:
packet = RIPPacket(port.recvfrom(1024)[0])
self.fsm.router.update_routing_table(packet)
if self.fsm.router.route_change:
self.fsm.router.trigger_update()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
print_message("Messages Read.")
# ===========================================================================
# FINITE STATE MACHINE
class RouterFSM():
'''Class representing the Router finite state machine'''
def __init__(self, rip_router):
self.router = rip_router
self.states = {}
self.transitions = {}
self.cur_state = None
self.trans = None
def add_transistion(self, trans_name, transition):
'''Add a new transition to the FSM'''
self.transitions[trans_name] = transition
def add_state(self, state_name, state):
'''Add a new state to the FSM'''
self.states[state_name] = state
def set_state(self, state_name):
'''Set the current state of the FSM'''
self.cur_state = self.states[state_name]
def to_transition(self, to_trans):
'''Set the current transition of the FSM'''
self.trans = self.transitions[to_trans]
def execute(self):
'''Run the FSM'''
if self.trans:
self.cur_state.exit()
self.trans.execute()
self.set_state(self.trans.to_state)
self.cur_state.enter()
self.trans = None
self.cur_state.execute()
# ===========================================================================
# IMPLEMENTATION
class RIPPacket:
'''Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4.'''
def __init__(self, data=None, header=None, rtes=None):
if data:
self._init_from_network(data)
elif header and rtes:
self._init_from_host(header, rtes)
else:
raise ValueError
def __repr__(self):
return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \
format(self.header.cmd, self.header.ver, len(self.rtes))
def _init_from_network(self, data):
'''Init for RIPPacket if data is from the network'''
# Packet Validation
datalen = len(data)
if datalen < RIPHeader.SIZE:
raise FormatException
malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE
if malformed_rtes:
raise FormatException
# Convert bytes in packet to header and RTE data
num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE)
self.header = RIPHeader(data[0:RIPHeader.SIZE])
self.rtes = []
rte_start = RIPHeader.SIZE
rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE
# Loop over data packet to obtain each RTE
for i in range(num_rtes):
self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end],
src_id=self.header.src))
rte_start += RIPRouteEntry.SIZE
rte_end += RIPRouteEntry.SIZE
def _init_from_host(self, header, rtes):
'''Init for imported data'''
if header.ver != 2:
raise ValueError("Only Version 2 is supported.")
self.header = header
self.rtes = rtes
def serialize(self):
'''Return the byte sting representing this packet for network
transmission'''
packed = self.header.serialize()
for rte in self.rtes:
packed += rte.serialize()
return packed
class RIPHeader:
'''Class representing the header of a RIP packet'''
FORMAT = "!BBH"
SIZE = struct.calcsize(FORMAT)
TYPE_RESPONSE = 2
VERSION = 2
def __init__(self, rawdata=None, router_id=None):
self.packed = None
if rawdata:
self._init_from_network(rawdata)
elif router_id:
self._init_from_host(router_id)
else:
raise ValueError
def __repr__(self):
return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd,
self.ver,
self.src)
def _init_from_network(self, rawdata):
'''init for data from network'''
header = struct.unpack(self.FORMAT, rawdata)
self.cmd = header[0]
self.ver = header[1]
self.src = header[2]
def _init_from_host(self, router_id):
'''Init for data from host'''
self.cmd = self.TYPE_RESPONSE
self.ver = self.VERSION
self.src = router_id
def serialize(self):
'''Return the byte sting representing this header for network
transmission'''
return struct.pack(self.FORMAT, self.cmd, self.ver, self.src)
class RIPRouteEntry:
'''Class representing a single RIP route entry (RTE)'''
FORMAT = "!HHIII"
SIZE = struct.calcsize(FORMAT)
MIN_METRIC = 0
MAX_METRIC = 16
def __init__(self, rawdata=None, src_id=None, address=None,
nexthop=None, metric=None, imported=False):
self.changed = False
self.imported = imported
self.init_timeout()
if rawdata and src_id != None:
self._init_from_network(rawdata, src_id)
elif address and nexthop != None and metric != None:
self._init_from_host(address, nexthop, metric)
else:
raise ValueError
def __repr__(self):
template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|"
# Check that timeout is set
if self.timeout == None:
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
str(self.timeout))
else:
timeout = (datetime.datetime.now() - self.timeout).total_seconds()
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
round(timeout, 1))
def _init_from_host(self, address, nexthop, metric):
'''Init for data from host'''
self.afi = AF_INET
self.tag = 0 # not used
self.addr = address
self.nexthop = nexthop
self.metric = metric
def _init_from_network(self, rawdata, src_id):
'''Init for data received from network'''
rte = struct.unpack(self.FORMAT, rawdata)
self.afi = rte[0]
self.tag = rte[1]
self.addr = rte[2]
self.set_nexthop(rte[3])
self.metric = rte[4]
if self.nexthop == 0:
self.nexthop = src_id
# Validation
if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC:
raise FormatException
def init_timeout(self):
'''Initialize the timeout property'''
if self.imported:
self.timeout = None
else:
self.timeout = datetime.datetime.now()
self.garbage = False
self.marked_for_delection = False
def __eq__(self, other):
if self.afi == other.afi and \
self.addr == other.addr and \
self.tag == other.tag and \
self.nexthop == other.nexthop and \
self.metric == other.metric:
return True
else:
return False
def set_nexthop(self, nexthop):
'''Set the nexthop property'''
self.nexthop = nexthop
def serialize(self):
'''Pack entries into typical RIPv2 packet format for sending over the
network. '''
return struct.pack(self.FORMAT, self.afi, self.tag, self.addr,
self.nexthop, self.metric)
class FormatException(Exception):
'''Class representing the Format Exception'''
def __init__(self, message=""):
self.message = message
class Router:
'''Class representing a single router'''
def __init__(self, config_file):
self.fsm = RouterFSM(self)
self.config_file = config_file
# Dictionary of router settings, including router-id, inputs and
# outputs
self.router_settings = {}
self.readable_ports = []
# Dictionary of routing table
self.routing_table = {}
self.route_change = False
# STATES
self.fsm.add_state("StartUp", StartUp(self.fsm))
self.fsm.add_state("Waiting", Waiting(self.fsm))
self.fsm.add_state("ReadMessage", ReadMessage(self.fsm))
# TRANSITIONS
self.fsm.add_transistion("toWaiting", Transistion("Waiting"))
self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage"))
self.fsm.set_state("StartUp")
def execute(self):
'''Run the router's finite state machine'''
self.fsm.execute()
def update_routing_table(self, packet):
'''Update Routing table if new route info exist'''
for rte in packet.rtes:
# ignore RTEs of self
if rte.addr != self.fsm.router.router_settings['id']:
bestroute = self.routing_table.get(rte.addr)
# set nexthop to source router and calculate metric
rte.set_nexthop(packet.header.src)
rte.metric = min(rte.metric +
self.router_settings['outputs'][
packet.header.src]['metric'],
RIPRouteEntry.MAX_METRIC)
# Route dosn't yet exist
if not bestroute:
# ignore RTEs with a metric of MAX_METRIC
if rte.metric == RIPRouteEntry.MAX_METRIC:
return
# Add new RTE to routing table
rte.changed = True
self.route_change = True
self.routing_table[rte.addr] = rte
print_message("RTE added for Router: " + str(rte.addr))
return
else:
# Route already exists
if rte.nexthop == bestroute.nexthop:
if bestroute.metric != rte.metric:
if bestroute.metric != RIPRouteEntry.MAX_METRIC \
and rte.metric >= RIPRouteEntry.MAX_METRIC:
# mark for garbage collection
bestroute.metric = RIPRouteEntry.MAX_METRIC
bestroute.garbage = True
bestroute.changed = True
self.route_change = True
else:
self.update_route(bestroute, rte)
# Route still exists with same values
elif not bestroute.garbage:
bestroute.init_timeout()
# Lower metric on existing route
elif rte.metric < bestroute.metric:
self.update_route(bestroute, rte)
def update_route(self, bestroute, rte):
'''Update an existing route entry with new route info'''
bestroute.init_timeout()
bestroute.garbage = False
bestroute.changed = True
bestroute.metric = rte.metric
bestroute.nexthop = rte.nexthop
self.route_change = True
print_message("RTE for Router: " + str(rte.addr) +
" updated with metric=" + str(rte.metric) +
", nexthop=" + str(rte.nexthop) + ".")
def print_routing_table(self):
'''Print the routing table to the terminal'''
line = "+-----------+----------+-----------+---------------+----------+-------------+"
print(line)
print(
"| Routing Table (Router "
+ str(self.router_settings['id']) + ") |")
print(line)
print(
"|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |")
print(line)
print(self.routing_table[self.router_settings['id']])
print(
"+===========+==========+===========+===============+==========+=============+")
for entry in self.routing_table:
if entry != self.router_settings['id']:
print(self.routing_table[entry])
print(line)
print('\n')
def trigger_update(self):
'''Send Routing update for only the routes which have changed'''
changed_rtes = []
print_message("Sending Trigger update.")
for rte in self.routing_table.values():
if rte.changed:
changed_rtes.append(rte)
rte.changed = False
self.route_change = False
# send update with random delay between 1 and 5 seconds
delay = randint(1, 5)
threading.Timer(delay, self.update, [changed_rtes])
def update(self, entries):
'''Send a message to all output ports'''
if self.router_settings != {}:
sock = list(self.router_settings['inputs'].values())[1]
local_header = RIPHeader(router_id=self.router_settings['id'])
for output in self.router_settings['outputs']:
# Split horizon
# Remove RTES for which nexthop == output
split_horizon_entries = []
for entry in entries:
if entry.nexthop != output:
split_horizon_entries.append(entry)
else:
# Poison reverse
# Create new entry to get around some funky referencing
# When doing poisoned_entry = entry
poisoned_entry = RIPRouteEntry(rawdata=None,
src_id=None, address=entry.addr,
nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC,
imported=entry.imported)
split_horizon_entries.append(poisoned_entry)
# comment out to disable split horizon
packet = RIPPacket(
header=local_header, rtes=split_horizon_entries)
# Uncomment to disable split horizon
# packet = RIPPacket(header=local_header, rtes=entries)
sock.sendto(packet.serialize(),
(HOST,
self.router_settings['outputs'][output]["port"]))
print_message("Message Sent To Router: " + str(output))
def check_timeout(self):
'''Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer.'''
print_message("Checking timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.timeout != None and \
(datetime.datetime.now() - rte.timeout).total_seconds() \
>= ROUTE_TIMEOUT:
rte.garbage = True
rte.changed = True
self.route_change = True
rte.metric = RIPRouteEntry.MAX_METRIC
rte.timeout = datetime.datetime.now()
self.print_routing_table()
print_message("Router: " + str(rte.addr) + " timed out.")
def garbage_timer(self):
'''Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion'''
print_message("Checking garbage timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.garbage:
if (datetime.datetime.now() - rte.timeout).total_seconds() \
>= DELETE_TIMEOUT:
rte.marked_for_delection = True
def garbage_collection(self):
'''Check the routing table for RTE's that are marked for deletion and
remove them.'''
print_message("Collecting Garbage...")
if self.routing_table != {}:
delete_routes = []
for rte in self.routing_table.values():
if rte.marked_for_delection:
delete_routes.append(rte.addr)
print_message("Router: " + str(rte.addr) + " has been " +
"removed from the routing table.")
for entry in delete_routes:
del self.routing_table[entry]
self.print_routing_table()
def timer(self, function, param=None):
'''Start a periodic timer which calls a specified function'''
if param != None:
function(list(param.values()))
period = BASE_TIMER * randrange(8, 12, 1) / 10
else:
period = BASE_TIMER
function()
threading.Timer(period, self.timer, [function, param]).start()
def start_timers(self):
'''Start the timers on separate threads'''
self.timer(self.update, param=self.routing_table)
self.timer(self.check_timeout)
self.timer(self.garbage_timer)
self.timer(self.garbage_collection)
def main_loop(self):
'''Start the main loop for the program.'''
while True:
self.execute()
# RUN THE PROGRAM
def print_message(message):
'''Print the given message with the current time before it'''
if DEBUG:
print("[" + time.strftime("%H:%M:%S") + "]: " + message)
def main():
'''Main function to run the program.'''
if __name__ == "__main__":
router = Router(str(sys.argv[-1]))
router.start_timers()
router.main_loop()
main()
| '''
Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table.
'''
import configparser
import select
import socket
import sys
import time
import threading
import struct
import datetime
from random import randint, randrange
DEBUG = False
HOST = '127.0.0.1' # localhost
BASE_TIMER = 5
MAX_METRIC = 16
ROUTE_TIMEOUT = BASE_TIMER * 6
DELETE_TIMEOUT = BASE_TIMER * 4
AF_INET = 2
# ===========================================================================
# TRANSITIONS
class Transistion():
'''Class Representing a transition between states.'''
def __init__(self, to_state):
self.to_state = to_state
def execute(self):
'''Run the transition functions'''
pass
# ===========================================================================
# STATES
class State():
'''Class Representing a generic state'''
def __init__(self, fsm):
self.fsm = fsm
def enter(self):
'''Execute functions for entering a state'''
pass
def execute(self):
'''Execute functions while in state'''
pass
def exit(self):
'''Execute functions for leaving a state'''
pass
class StartUp(State):
'''Class Representing the Start up state which reads the configuration file
'''
def __init__(self, fsm):
super(StartUp, self).__init__(fsm)
def execute(self):
'''Execute the configuration functions'''
print_message("Loading Configuration File: '"
+ self.fsm.router.config_file + "'")
config = configparser.ConfigParser()
config.read(self.fsm.router.config_file)
self.get_router_id(config)
self.setup_inputs(config)
self.get_outputs(config)
self.setup_routing_table()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
'''Print complete message'''
print_message("Router Setup Complete.")
def get_router_id(self, config):
'''Read the router id number from the configuration file'''
if 1 <= int(config['Settings']['router-id']) <= 64000:
self.fsm.router.router_settings['id'] = \
int(config['Settings']['router-id'])
else:
raise Exception('Invalid Router ID Number')
def get_outputs(self, config):
'''Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file'''
outputs = config['Settings']['outputs'].split(', ')
outputs = [i.split('-') for i in outputs]
self.fsm.router.router_settings['outputs'] = {}
existing_ports = []
for output in outputs:
is_valid_port = 1024 <= int(output[0]) <= 64000 and not \
int(output[0]) in existing_ports
is_valid_cost = 1 <= int(output[1]) < 16
is_valid_id = 1 <= int(output[2]) <= 64000
if is_valid_port and is_valid_cost and is_valid_id:
existing_ports.append(int(output[0]))
self.fsm.router.router_settings['outputs'][int(output[2])] = \
{'metric': int(output[1]),
'port': int(output[0])}
else:
raise Exception('Invalid Outputs')
def setup_inputs(self, config):
'''Create input sockets from the inputs specified in the config file'''
# get inputs from configuration file
ports = config['Settings']['input-ports'].split(', ')
inputs = []
for port in ports:
if 1024 <= int(port) <= 64000 and not int(port) in inputs:
inputs.append(int(port))
else:
raise Exception('Invalid Port Number')
self.fsm.router.router_settings['inputs'] = {}
# create socket for each input port
for port in inputs:
try:
self.fsm.router.router_settings['inputs'][port] = \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print_message('Socket ' + str(port) + ' Created.')
except socket.error as msg:
print('Failed to create socket. Message: ' + str(msg))
sys.exit()
# bind port to socket
try:
self.fsm.router.router_settings['inputs'][port].bind(
(HOST, port))
print_message('Socket ' + str(port) + ' Bind Complete.')
except socket.error as msg:
print('Failed to create socket. Message ' + str(msg))
sys.exit()
def setup_routing_table(self):
'''Setup routing table with the outputs specified in the config file'''
self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \
RIPRouteEntry(address=self.fsm.router.router_settings['id'],
nexthop=0,
metric=0,
imported=True)
class Waiting(State):
'''
Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state.
'''
def __init__(self, fsm):
super(Waiting, self).__init__(fsm)
def enter(self):
'''Display State entry message'''
print_message("Entering idle state...")
def execute(self):
'''Waits for input sockets to be readable and then changes the state
to process the received message.'''
readable = select.select(
self.fsm.router.router_settings['inputs'].values(), [], [])
if readable[0]:
self.fsm.router.readable_ports = readable[0]
self.fsm.to_transition("toReadMessage")
def exit(self):
'''Display State exit message'''
print_message("Message Received")
class ReadMessage(State):
'''Class representing the state for reading messages received on the input
sockets'''
def __init__(self, fsm):
super(ReadMessage, self).__init__(fsm)
def enter(self):
print_message("Reading Messages...")
def execute(self):
for port in self.fsm.router.readable_ports:
packet = RIPPacket(port.recvfrom(1024)[0])
self.fsm.router.update_routing_table(packet)
if self.fsm.router.route_change:
self.fsm.router.trigger_update()
self.fsm.router.print_routing_table()
self.fsm.to_transition("toWaiting")
def exit(self):
print_message("Messages Read.")
# ===========================================================================
# FINITE STATE MACHINE
class RouterFSM():
'''Class representing the Router finite state machine'''
def __init__(self, rip_router):
self.router = rip_router
self.states = {}
self.transitions = {}
self.cur_state = None
self.trans = None
def add_transistion(self, trans_name, transition):
'''Add a new transition to the FSM'''
self.transitions[trans_name] = transition
def add_state(self, state_name, state):
'''Add a new state to the FSM'''
self.states[state_name] = state
def set_state(self, state_name):
'''Set the current state of the FSM'''
self.cur_state = self.states[state_name]
def to_transition(self, to_trans):
'''Set the current transition of the FSM'''
self.trans = self.transitions[to_trans]
def execute(self):
'''Run the FSM'''
if self.trans:
self.cur_state.exit()
self.trans.execute()
self.set_state(self.trans.to_state)
self.cur_state.enter()
self.trans = None
self.cur_state.execute()
# ===========================================================================
# IMPLEMENTATION
class RIPPacket:
'''Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4.'''
def __init__(self, data=None, header=None, rtes=None):
if data:
self._init_from_network(data)
elif header and rtes:
self._init_from_host(header, rtes)
else:
raise ValueError
def __repr__(self):
return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \
format(self.header.cmd, self.header.ver, len(self.rtes))
def _init_from_network(self, data):
'''Init for RIPPacket if data is from the network'''
# Packet Validation
datalen = len(data)
if datalen < RIPHeader.SIZE:
raise FormatException
malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE
if malformed_rtes:
raise FormatException
# Convert bytes in packet to header and RTE data
num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE)
self.header = RIPHeader(data[0:RIPHeader.SIZE])
self.rtes = []
rte_start = RIPHeader.SIZE
rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE
# Loop over data packet to obtain each RTE
for i in range(num_rtes):
self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end],
src_id=self.header.src))
rte_start += RIPRouteEntry.SIZE
rte_end += RIPRouteEntry.SIZE
def _init_from_host(self, header, rtes):
'''Init for imported data'''
if header.ver != 2:
raise ValueError("Only Version 2 is supported.")
self.header = header
self.rtes = rtes
def serialize(self):
'''Return the byte sting representing this packet for network
transmission'''
packed = self.header.serialize()
for rte in self.rtes:
packed += rte.serialize()
return packed
class RIPHeader:
'''Class representing the header of a RIP packet'''
FORMAT = "!BBH"
SIZE = struct.calcsize(FORMAT)
TYPE_RESPONSE = 2
VERSION = 2
def __init__(self, rawdata=None, router_id=None):
self.packed = None
if rawdata:
self._init_from_network(rawdata)
elif router_id:
self._init_from_host(router_id)
else:
raise ValueError
def __repr__(self):
return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd,
self.ver,
self.src)
def _init_from_network(self, rawdata):
'''init for data from network'''
header = struct.unpack(self.FORMAT, rawdata)
self.cmd = header[0]
self.ver = header[1]
self.src = header[2]
def _init_from_host(self, router_id):
'''Init for data from host'''
self.cmd = self.TYPE_RESPONSE
self.ver = self.VERSION
self.src = router_id
def serialize(self):
'''Return the byte sting representing this header for network
transmission'''
return struct.pack(self.FORMAT, self.cmd, self.ver, self.src)
class RIPRouteEntry:
'''Class representing a single RIP route entry (RTE)'''
FORMAT = "!HHIII"
SIZE = struct.calcsize(FORMAT)
MIN_METRIC = 0
MAX_METRIC = 16
def __init__(self, rawdata=None, src_id=None, address=None,
nexthop=None, metric=None, imported=False):
self.changed = False
self.imported = imported
self.init_timeout()
if rawdata and src_id != None:
self._init_from_network(rawdata, src_id)
elif address and nexthop != None and metric != None:
self._init_from_host(address, nexthop, metric)
else:
raise ValueError
def __repr__(self):
template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|"
# Check that timeout is set
if self.timeout == None:
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
str(self.timeout))
else:
timeout = (datetime.datetime.now() - self.timeout).total_seconds()
return template.format(self.addr, self.metric, self.nexthop,
self.changed, self.garbage,
round(timeout, 1))
def _init_from_host(self, address, nexthop, metric):
'''Init for data from host'''
self.afi = AF_INET
self.tag = 0 # not used
self.addr = address
self.nexthop = nexthop
self.metric = metric
def _init_from_network(self, rawdata, src_id):
'''Init for data received from network'''
rte = struct.unpack(self.FORMAT, rawdata)
self.afi = rte[0]
self.tag = rte[1]
self.addr = rte[2]
self.set_nexthop(rte[3])
self.metric = rte[4]
if self.nexthop == 0:
self.nexthop = src_id
# Validation
if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC:
raise FormatException
def init_timeout(self):
'''Initialize the timeout property'''
if self.imported:
self.timeout = None
else:
self.timeout = datetime.datetime.now()
self.garbage = False
self.marked_for_delection = False
def __eq__(self, other):
if self.afi == other.afi and \
self.addr == other.addr and \
self.tag == other.tag and \
self.nexthop == other.nexthop and \
self.metric == other.metric:
return True
else:
return False
def set_nexthop(self, nexthop):
'''Set the nexthop property'''
self.nexthop = nexthop
def serialize(self):
'''Pack entries into typical RIPv2 packet format for sending over the
network. '''
return struct.pack(self.FORMAT, self.afi, self.tag, self.addr,
self.nexthop, self.metric)
class FormatException(Exception):
'''Class representing the Format Exception'''
def __init__(self, message=""):
self.message = message
class Router:
'''Class representing a single router'''
def __init__(self, config_file):
self.fsm = RouterFSM(self)
self.config_file = config_file
# Dictionary of router settings, including router-id, inputs and
# outputs
self.router_settings = {}
self.readable_ports = []
# Dictionary of routing table
self.routing_table = {}
self.route_change = False
# STATES
self.fsm.add_state("StartUp", StartUp(self.fsm))
self.fsm.add_state("Waiting", Waiting(self.fsm))
self.fsm.add_state("ReadMessage", ReadMessage(self.fsm))
# TRANSITIONS
self.fsm.add_transistion("toWaiting", Transistion("Waiting"))
self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage"))
self.fsm.set_state("StartUp")
def execute(self):
'''Run the router's finite state machine'''
self.fsm.execute()
def update_routing_table(self, packet):
'''Update Routing table if new route info exist'''
for rte in packet.rtes:
# ignore RTEs of self
if rte.addr != self.fsm.router.router_settings['id']:
bestroute = self.routing_table.get(rte.addr)
# set nexthop to source router and calculate metric
rte.set_nexthop(packet.header.src)
rte.metric = min(rte.metric +
self.router_settings['outputs'][
packet.header.src]['metric'],
RIPRouteEntry.MAX_METRIC)
# Route dosn't yet exist
if not bestroute:
# ignore RTEs with a metric of MAX_METRIC
if rte.metric == RIPRouteEntry.MAX_METRIC:
return
# Add new RTE to routing table
rte.changed = True
self.route_change = True
self.routing_table[rte.addr] = rte
print_message("RTE added for Router: " + str(rte.addr))
return
else:
# Route already exists
if rte.nexthop == bestroute.nexthop:
if bestroute.metric != rte.metric:
if bestroute.metric != RIPRouteEntry.MAX_METRIC \
and rte.metric >= RIPRouteEntry.MAX_METRIC:
# mark for garbage collection
bestroute.metric = RIPRouteEntry.MAX_METRIC
bestroute.garbage = True
bestroute.changed = True
self.route_change = True
else:
self.update_route(bestroute, rte)
# Route still exists with same values
elif not bestroute.garbage:
bestroute.init_timeout()
# Lower metric on existing route
elif rte.metric < bestroute.metric:
self.update_route(bestroute, rte)
def update_route(self, bestroute, rte):
'''Update an existing route entry with new route info'''
bestroute.init_timeout()
bestroute.garbage = False
bestroute.changed = True
bestroute.metric = rte.metric
bestroute.nexthop = rte.nexthop
self.route_change = True
print_message("RTE for Router: " + str(rte.addr) +
" updated with metric=" + str(rte.metric) +
", nexthop=" + str(rte.nexthop) + ".")
def print_routing_table(self):
'''Print the routing table to the terminal'''
line = "+-----------+----------+-----------+---------------+----------+-------------+"
print(line)
print(
"| Routing Table (Router "
+ str(self.router_settings['id']) + ") |")
print(line)
print(
"|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |")
print(line)
print(self.routing_table[self.router_settings['id']])
print(
"+===========+==========+===========+===============+==========+=============+")
for entry in self.routing_table:
if entry != self.router_settings['id']:
print(self.routing_table[entry])
print(line)
print('\n')
def trigger_update(self):
'''Send Routing update for only the routes which have changed'''
changed_rtes = []
print_message("Sending Trigger update.")
for rte in self.routing_table.values():
if rte.changed:
changed_rtes.append(rte)
rte.changed = False
self.route_change = False
# send update with random delay between 1 and 5 seconds
delay = randint(1, 5)
threading.Timer(delay, self.update, [changed_rtes])
def update(self, entries):
'''Send a message to all output ports'''
if self.router_settings != {}:
sock = list(self.router_settings['inputs'].values())[1]
local_header = RIPHeader(router_id=self.router_settings['id'])
for output in self.router_settings['outputs']:
# Split horizon
# Remove RTES for which nexthop == output
split_horizon_entries = []
for entry in entries:
if entry.nexthop != output:
split_horizon_entries.append(entry)
else:
# Poison reverse
# Create new entry to get around some funky referencing
# When doing poisoned_entry = entry
poisoned_entry = RIPRouteEntry(rawdata=None,
src_id=None, address=entry.addr,
nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC,
imported=entry.imported)
split_horizon_entries.append(poisoned_entry)
# comment out to disable split horizon
packet = RIPPacket(
header=local_header, rtes=split_horizon_entries)
# Uncomment to disable split horizon
# packet = RIPPacket(header=local_header, rtes=entries)
sock.sendto(packet.serialize(),
(HOST,
self.router_settings['outputs'][output]["port"]))
print_message("Message Sent To Router: " + str(output))
def check_timeout(self):
'''Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer.'''
print_message("Checking timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.timeout != None and \
(datetime.datetime.now() - rte.timeout).total_seconds() \
>= ROUTE_TIMEOUT:
rte.garbage = True
rte.changed = True
self.route_change = True
rte.metric = RIPRouteEntry.MAX_METRIC
rte.timeout = datetime.datetime.now()
self.print_routing_table()
print_message("Router: " + str(rte.addr) + " timed out.")
def garbage_timer(self):
'''Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion'''
print_message("Checking garbage timeout...")
if self.routing_table != {}:
for rte in self.routing_table.values():
if rte.garbage:
if (datetime.datetime.now() - rte.timeout).total_seconds() \
>= DELETE_TIMEOUT:
rte.marked_for_delection = True
def garbage_collection(self):
'''Check the routing table for RTE's that are marked for deletion and
remove them.'''
print_message("Collecting Garbage...")
if self.routing_table != {}:
delete_routes = []
for rte in self.routing_table.values():
if rte.marked_for_delection:
delete_routes.append(rte.addr)
print_message("Router: " + str(rte.addr) + " has been " +
"removed from the routing table.")
for entry in delete_routes:
del self.routing_table[entry]
self.print_routing_table()
def timer(self, function, param=None):
'''Start a periodic timer which calls a specified function'''
if param != None:
function(list(param.values()))
period = BASE_TIMER * randrange(8, 12, 1) / 10
else:
period = BASE_TIMER
function()
threading.Timer(period, self.timer, [function, param]).start()
def start_timers(self):
'''Start the timers on separate threads'''
self.timer(self.update, param=self.routing_table)
self.timer(self.check_timeout)
self.timer(self.garbage_timer)
self.timer(self.garbage_collection)
def main_loop(self):
'''Start the main loop for the program.'''
while True:
self.execute()
# RUN THE PROGRAM
def print_message(message):
'''Print the given message with the current time before it'''
if DEBUG:
print("[" + time.strftime("%H:%M:%S") + "]: " + message)
def main():
'''Main function to run the program.'''
if __name__ == "__main__":
router = Router(str(sys.argv[-1]))
router.start_timers()
router.main_loop()
main()
| en | 0.828527 | Summary: Program that implements a routing deamon based on the
RIP version 2 protocol from RFC2453.
Usage: python3 Router.py <router_config_file>
Configuration File:
The user supplies a router configuration file of the format:
[Settings]
router-id = <router_number>
input-ports = <input> [, <input>, ...]
outputs = <output>-<metric>-<destination_router>
[, <output>-<metric>-<destination_router>, ...]
where,
router_number: ID of router between 1 - 64000.
input: port number between 1024 - 64000.
output: port number between 1024 - 6400,
not equal to any inputs.
metric: metric of output between 1 - 16.
destination_router: ID of destination router.
Description:
This program implements a basic RIPv2 routing protocol from RFC2453
for routing computations in computer networks. It takes a configuration
file as shown above and sets up a router with a new socket for each
input-port.
The RIPv2 protocol uses a routing table to keep track of all reachable
routers on the network along with their metric/cost and the direct
next hop router ID along the route to that destination router. However,
it can only send messages to the direct neighbours specified in outputs.
The protocol uses the Bellman-Ford distance vector algorithm to compute
the lowest cost route to each router in the network. If the metric is
16 or greater, the router is considered unreachable.
The routing table initially starts with a single route entry (RTE) for
itself with a metric of zero. The routing table is periodically
transmitted too each of its direct output ports via an unsolicited
response message as defined in RFC2453 section 3.9.2 and 4. This is
performed on a separate thread so it does not interfere with other
operations
The receives messages from other routers by using the python select()
function which blocks until a message is ready to be read. Once a
message is received the header and contents are validated.
If the message is valid each RTE is processed according to RFC2453
section 3.9.2.
If a new router is found the RTE is added
to the routing table, adding the cost to the metric for the output
the message was received on.
If the RTE already exists, but the metric is smaller, the metric
is updated to the lower metric.
If the lower metric is from a different next hop router, change the
next hop.
If nothing has changed, restart the timeout timer.
If RTE metric >= max metric of 16, mark the entry for
garbage collection and update the metric in the table.
If any change has occurred in the routing table as a result of a
received message, a triggered update (RFC2453 section 3.10.1) is sent
to all outputs with the updated entries. Triggered updates are sent with
a random delay between 1 - 5 seconds to prevent synchronized updates.
Request messages are not implemented in this program.
Timers (all timers are on separate threads) (RFC2453 section 3.8):
Update timer - Periodic unsolicited response message sent to all
outputs. The period is adjusted each time to a random value
between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent
synchronized updates.
Timeout - used to check the routing table for RTEs which have
have not been updated within the ROUTE_TIMEOUT interval. If
a router has not been heard from within this time, then set the
metric to the max metric of 16 and start the garbage collection
timer.
Garbage timer - used to check the routing table for RTEs set
for garbage collection. If the timeout >= DELETE_TIMEOUT,
mark the RTE for deletion.
Garbage Collection - used to check the routing table for RTEs
marked for deletion, and removes those entries from the table. # localhost # =========================================================================== # TRANSITIONS Class Representing a transition between states. Run the transition functions # =========================================================================== # STATES Class Representing a generic state Execute functions for entering a state Execute functions while in state Execute functions for leaving a state Class Representing the Start up state which reads the configuration file Execute the configuration functions Print complete message Read the router id number from the configuration file Return a dictionary of outputs containing port, cost and destination
router id from the Configuration file Create input sockets from the inputs specified in the config file # get inputs from configuration file # create socket for each input port # bind port to socket Setup routing table with the outputs specified in the config file Class representing the waiting state of the FSM where the router waits
for messages to be received on its input sockets. When a message is
received the state changes to the ReadMeassage state. Display State entry message Waits for input sockets to be readable and then changes the state
to process the received message. Display State exit message Class representing the state for reading messages received on the input
sockets # =========================================================================== # FINITE STATE MACHINE Class representing the Router finite state machine Add a new transition to the FSM Add a new state to the FSM Set the current state of the FSM Set the current transition of the FSM Run the FSM # =========================================================================== # IMPLEMENTATION Class representing a RIP packet containing a header and body as defined
in RFC2453 RIPv2 section 4. Init for RIPPacket if data is from the network # Packet Validation # Convert bytes in packet to header and RTE data # Loop over data packet to obtain each RTE Init for imported data Return the byte sting representing this packet for network
transmission Class representing the header of a RIP packet init for data from network Init for data from host Return the byte sting representing this header for network
transmission Class representing a single RIP route entry (RTE) # Check that timeout is set Init for data from host # not used Init for data received from network # Validation Initialize the timeout property Set the nexthop property Pack entries into typical RIPv2 packet format for sending over the
network. Class representing the Format Exception Class representing a single router # Dictionary of router settings, including router-id, inputs and # outputs # Dictionary of routing table # STATES # TRANSITIONS Run the router's finite state machine Update Routing table if new route info exist # ignore RTEs of self # set nexthop to source router and calculate metric # Route dosn't yet exist # ignore RTEs with a metric of MAX_METRIC # Add new RTE to routing table # Route already exists # mark for garbage collection # Route still exists with same values # Lower metric on existing route Update an existing route entry with new route info Print the routing table to the terminal Send Routing update for only the routes which have changed # send update with random delay between 1 and 5 seconds Send a message to all output ports # Split horizon # Remove RTES for which nexthop == output # Poison reverse # Create new entry to get around some funky referencing # When doing poisoned_entry = entry # comment out to disable split horizon # Uncomment to disable split horizon # packet = RIPPacket(header=local_header, rtes=entries) Check the current timeout value for each RTE in the routing table.
If the time difference with now is greater than ROUTE_TIMEOUT, then
set the metric to 16 and start the garbage collection timer. Check the status of the garbage property of each RTE. If true, and
the timeout value difference with now is greater than DELETE_TIMEOUT,
mark it for deletion Check the routing table for RTE's that are marked for deletion and
remove them. Start a periodic timer which calls a specified function Start the timers on separate threads Start the main loop for the program. # RUN THE PROGRAM Print the given message with the current time before it Main function to run the program. | 2.951302 | 3 |
atlaselectrophysiology/extract_files.py | alowet/iblapps | 0 | 8172 | <filename>atlaselectrophysiology/extract_files.py
from ibllib.io import spikeglx
import numpy as np
import ibllib.dsp as dsp
from scipy import signal
from ibllib.misc import print_progress
from pathlib import Path
import alf.io as aio
import logging
import ibllib.ephys.ephysqc as ephysqc
from phylib.io import alf
_logger = logging.getLogger('ibllib')
RMS_WIN_LENGTH_SECS = 3
WELCH_WIN_LENGTH_SAMPLES = 1024
def rmsmap(fbin, spectra=True):
"""
Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales
"""
if not isinstance(fbin, spikeglx.Reader):
sglx = spikeglx.Reader(fbin)
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
# the window generator will generates window indices
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
# pre-allocate output dictionary of numpy arrays
win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)),
'nsamples': np.zeros((wingen.nwin,)),
'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True),
'tscale': wingen.tscale(fs=sglx.fs)}
win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
# loop through the whole session
for first, last in wingen.firstlast:
D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose()
# remove low frequency noise below 1 Hz
D = dsp.hp(D, 1 / sglx.fs, [0, 1])
iw = wingen.iw
win['TRMS'][iw, :] = dsp.rms(D)
win['nsamples'][iw] = D.shape[1]
if spectra:
# the last window may be smaller than what is needed for welch
if last - first < WELCH_WIN_LENGTH_SAMPLES:
continue
# compute a smoothed spectrum using welch method
_, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES,
detrend='constant', return_onesided=True, scaling='density',
axis=-1)
win['spectral_density'] += w.T
# print at least every 20 windows
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
print_progress(iw, wingen.nwin)
return win
def extract_rmsmap(fbin, out_folder=None, spectra=True):
"""
Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: None
"""
_logger.info(f"Computing QC for {fbin}")
sglx = spikeglx.Reader(fbin)
# check if output ALF files exist already:
if out_folder is None:
out_folder = Path(fbin).parent
else:
out_folder = Path(out_folder)
alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}'
alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}'
# crunch numbers
rms = rmsmap(fbin, spectra=spectra)
# output ALF files, single precision with the optional label as suffix before extension
if not out_folder.exists():
out_folder.mkdir()
tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_time, dico=tdict)
if spectra:
fdict = {'power': rms['spectral_density'].astype(np.single),
'freqs': rms['fscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_freq, dico=fdict)
def _sample2v(ap_file):
"""
Convert raw ephys data to Volts
"""
md = spikeglx.read_meta_data(ap_file.with_suffix('.meta'))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v['ap'][0]
def ks2_to_alf(ks_path, bin_path, out_path, bin_file=None, ampfactor=1, label=None, force=True):
"""
Convert Kilosort 2 output to ALF dataset for single probe data
:param ks_path:
:param bin_path: path of raw data
:param out_path:
:return:
"""
m = ephysqc.phy_model_from_ks2_path(ks2_path=ks_path, bin_path=bin_path, bin_file=bin_file)
ephysqc.spike_sorting_metrics_ks2(ks_path, m, save=True)
ac = alf.EphysAlfCreator(m)
ac.convert(out_path, label=label, force=force, ampfactor=ampfactor)
def extract_data(ks_path, ephys_path, out_path):
efiles = spikeglx.glob_ephys_files(ephys_path)
for efile in efiles:
if efile.get('ap') and efile.ap.exists():
ks2_to_alf(ks_path, ephys_path, out_path, bin_file=efile.ap,
ampfactor=_sample2v(efile.ap), label=None, force=True)
extract_rmsmap(efile.ap, out_folder=out_path, spectra=False)
if efile.get('lf') and efile.lf.exists():
extract_rmsmap(efile.lf, out_folder=out_path)
# if __name__ == '__main__':
#
# ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data')
# ks_path = Path('C:/Users/Mayo/Downloads/KS2')
# out_path = Path('C:/Users/Mayo/Downloads/alf')
# extract_data(ks_path, ephys_path, out_path)
| <filename>atlaselectrophysiology/extract_files.py
from ibllib.io import spikeglx
import numpy as np
import ibllib.dsp as dsp
from scipy import signal
from ibllib.misc import print_progress
from pathlib import Path
import alf.io as aio
import logging
import ibllib.ephys.ephysqc as ephysqc
from phylib.io import alf
_logger = logging.getLogger('ibllib')
RMS_WIN_LENGTH_SECS = 3
WELCH_WIN_LENGTH_SAMPLES = 1024
def rmsmap(fbin, spectra=True):
"""
Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales
"""
if not isinstance(fbin, spikeglx.Reader):
sglx = spikeglx.Reader(fbin)
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
# the window generator will generates window indices
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
# pre-allocate output dictionary of numpy arrays
win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)),
'nsamples': np.zeros((wingen.nwin,)),
'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True),
'tscale': wingen.tscale(fs=sglx.fs)}
win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
# loop through the whole session
for first, last in wingen.firstlast:
D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose()
# remove low frequency noise below 1 Hz
D = dsp.hp(D, 1 / sglx.fs, [0, 1])
iw = wingen.iw
win['TRMS'][iw, :] = dsp.rms(D)
win['nsamples'][iw] = D.shape[1]
if spectra:
# the last window may be smaller than what is needed for welch
if last - first < WELCH_WIN_LENGTH_SAMPLES:
continue
# compute a smoothed spectrum using welch method
_, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES,
detrend='constant', return_onesided=True, scaling='density',
axis=-1)
win['spectral_density'] += w.T
# print at least every 20 windows
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
print_progress(iw, wingen.nwin)
return win
def extract_rmsmap(fbin, out_folder=None, spectra=True):
"""
Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: None
"""
_logger.info(f"Computing QC for {fbin}")
sglx = spikeglx.Reader(fbin)
# check if output ALF files exist already:
if out_folder is None:
out_folder = Path(fbin).parent
else:
out_folder = Path(out_folder)
alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}'
alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}'
# crunch numbers
rms = rmsmap(fbin, spectra=spectra)
# output ALF files, single precision with the optional label as suffix before extension
if not out_folder.exists():
out_folder.mkdir()
tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_time, dico=tdict)
if spectra:
fdict = {'power': rms['spectral_density'].astype(np.single),
'freqs': rms['fscale'].astype(np.single)}
aio.save_object_npy(out_folder, object=alf_object_freq, dico=fdict)
def _sample2v(ap_file):
"""
Convert raw ephys data to Volts
"""
md = spikeglx.read_meta_data(ap_file.with_suffix('.meta'))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v['ap'][0]
def ks2_to_alf(ks_path, bin_path, out_path, bin_file=None, ampfactor=1, label=None, force=True):
"""
Convert Kilosort 2 output to ALF dataset for single probe data
:param ks_path:
:param bin_path: path of raw data
:param out_path:
:return:
"""
m = ephysqc.phy_model_from_ks2_path(ks2_path=ks_path, bin_path=bin_path, bin_file=bin_file)
ephysqc.spike_sorting_metrics_ks2(ks_path, m, save=True)
ac = alf.EphysAlfCreator(m)
ac.convert(out_path, label=label, force=force, ampfactor=ampfactor)
def extract_data(ks_path, ephys_path, out_path):
efiles = spikeglx.glob_ephys_files(ephys_path)
for efile in efiles:
if efile.get('ap') and efile.ap.exists():
ks2_to_alf(ks_path, ephys_path, out_path, bin_file=efile.ap,
ampfactor=_sample2v(efile.ap), label=None, force=True)
extract_rmsmap(efile.ap, out_folder=out_path, spectra=False)
if efile.get('lf') and efile.lf.exists():
extract_rmsmap(efile.lf, out_folder=out_path)
# if __name__ == '__main__':
#
# ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data')
# ks_path = Path('C:/Users/Mayo/Downloads/KS2')
# out_path = Path('C:/Users/Mayo/Downloads/alf')
# extract_data(ks_path, ephys_path, out_path)
| en | 0.670358 | Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales # the window generator will generates window indices # pre-allocate output dictionary of numpy arrays # loop through the whole session # remove low frequency noise below 1 Hz # the last window may be smaller than what is needed for welch # compute a smoothed spectrum using welch method # print at least every 20 windows Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param spectra: whether to compute the power spectrum (only need for lfp data)
:type: bool
:return: None # check if output ALF files exist already: # crunch numbers # output ALF files, single precision with the optional label as suffix before extension Convert raw ephys data to Volts Convert Kilosort 2 output to ALF dataset for single probe data
:param ks_path:
:param bin_path: path of raw data
:param out_path:
:return: # if __name__ == '__main__': # # ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data') # ks_path = Path('C:/Users/Mayo/Downloads/KS2') # out_path = Path('C:/Users/Mayo/Downloads/alf') # extract_data(ks_path, ephys_path, out_path) | 2.353299 | 2 |
site_settings/models.py | shervinbdndev/Django-Shop | 13 | 8173 | from django.db import models
class SiteSettings(models.Model):
site_name = models.CharField(max_length=200 , verbose_name='Site Name')
site_url = models.CharField(max_length=200 , verbose_name='Site URL')
site_address = models.CharField(max_length=300 , verbose_name='Site Address')
site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone')
site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax')
site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email')
about_us_text = models.TextField(verbose_name='About Us Text')
site_copy_right = models.TextField(verbose_name='Copyright Text')
site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo')
is_main_setting = models.BooleanField(verbose_name='Site Main Settings')
def __str__(self) -> str:
super(SiteSettings , self).__str__()
return self.site_name
class Meta:
verbose_name = 'Site Setting'
verbose_name_plural = 'Site Settings'
class FooterLinkBox(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
def __str__(self) -> str:
super(FooterLinkBox , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link Setting'
verbose_name_plural = 'Footer Link Settings'
class FooterLink(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
url = models.URLField(max_length=500 , verbose_name='Links')
footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE)
def __str__(self) -> str:
super(FooterLink , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link'
verbose_name_plural = 'Footer Links'
class Slider(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
description = models.TextField(verbose_name='Slider Description')
url_title = models.CharField(max_length=200 , verbose_name='URL Title')
url = models.URLField(max_length=200 , verbose_name='URL Address')
image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image')
is_active = models.BooleanField(default=False , verbose_name='Active / Inactive')
def __str__(self) -> str:
super(Slider , self).__str__()
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural = 'Sliders' | from django.db import models
class SiteSettings(models.Model):
site_name = models.CharField(max_length=200 , verbose_name='Site Name')
site_url = models.CharField(max_length=200 , verbose_name='Site URL')
site_address = models.CharField(max_length=300 , verbose_name='Site Address')
site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone')
site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax')
site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email')
about_us_text = models.TextField(verbose_name='About Us Text')
site_copy_right = models.TextField(verbose_name='Copyright Text')
site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo')
is_main_setting = models.BooleanField(verbose_name='Site Main Settings')
def __str__(self) -> str:
super(SiteSettings , self).__str__()
return self.site_name
class Meta:
verbose_name = 'Site Setting'
verbose_name_plural = 'Site Settings'
class FooterLinkBox(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
def __str__(self) -> str:
super(FooterLinkBox , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link Setting'
verbose_name_plural = 'Footer Link Settings'
class FooterLink(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
url = models.URLField(max_length=500 , verbose_name='Links')
footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE)
def __str__(self) -> str:
super(FooterLink , self).__str__()
return self.title
class Meta:
verbose_name = 'Footer Link'
verbose_name_plural = 'Footer Links'
class Slider(models.Model):
title = models.CharField(max_length=200 , verbose_name='Title')
description = models.TextField(verbose_name='Slider Description')
url_title = models.CharField(max_length=200 , verbose_name='URL Title')
url = models.URLField(max_length=200 , verbose_name='URL Address')
image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image')
is_active = models.BooleanField(default=False , verbose_name='Active / Inactive')
def __str__(self) -> str:
super(Slider , self).__str__()
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural = 'Sliders' | none | 1 | 2.016893 | 2 |
|
examples/basics/visuals/line_prototype.py | 3DAlgoLab/vispy | 2,617 | 8174 | # -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| # -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| en | 0.831172 | # -*- coding: utf-8 -*- # vispy: gallery 10 # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. Example of a very simple GL-line visual. This shows the minimal set of methods that need to be reimplemented to make a new visual class. attribute vec2 a_pos; void main() { gl_Position = $transform(vec4(a_pos, 0., 1.)); gl_PointSize = 10.; } void main() { gl_FragColor = $color; } # The Visual superclass contains a MultiProgram, which is an object # that behaves like a normal shader program (you can assign shader # code, upload values, set template variables, etc.) but internally # manages multiple ModularProgram instances, one per view. # The MultiProgram is accessed via the `shared_program` property, so # the following modifications to the program will be applied to all # views: # Visual keeps track of draw mode, index buffer, and GL state. These # are shared between all views. This method is called immediately before each draw. The *view* argument indicates which view is about to be drawn. # Note that pos_buf is shared between all views, so we have no need # to use the *view* argument in this example. This will be true # for most visuals. Another simple visual class. Due to the simplicity of these example classes, it was only necessary to subclass from LineVisual and set the draw mode to 'points'. A more fully-featured PointVisual class might not follow this approach. An example compound visual that draws lines and points. To the user, the compound visual behaves exactly like a normal visual--it has a transform system, draw() and bounds() methods, etc. Internally, the compound visual automatically manages proxying these transforms and methods to its sub-visuals. Thin wrapper around a point collection. Note: This is currently broken! Attach this tranform to a canvas # Make a line visual # Attach color filter to all views (current and future) of the visual # Attach a clipper just to this view. The Clipper filter requires a # transform that maps from the framebuffer coordinate system to the # clipping coordinates. # Make a view of the line that will draw its shadow # And make a second view of the line with different clipping bounds # Make a compound visual # And make a view on the compound # And a shadow for the view # Example of a collection visual # tr = shadow3.transforms.get_transform('framebuffer', 'canvas') # shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2) # Set canvas viewport and reconfigure visual transforms to match. | 2.312623 | 2 |
h1st/tests/core/test_schemas_inferrer.py | Mou-Ikkai/h1st | 2 | 8175 | from unittest import TestCase
from datetime import datetime
import pyarrow as pa
import numpy as np
import pandas as pd
from h1st.schema import SchemaInferrer
class SchemaInferrerTestCase(TestCase):
def test_infer_python(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(1), pa.int64())
self.assertEqual(inferrer.infer_schema(1.1), pa.float64())
self.assertEqual(inferrer.infer_schema({
'test1': 1,
'test2': "hello",
'test3': b"hello",
'today': datetime.now(),
}), {
'type': dict,
'fields': {
'test1': pa.int64(),
'test2': pa.string(),
'test3': pa.binary(),
'today': pa.date64(),
}
})
self.assertEqual(inferrer.infer_schema((
1, 2, 3
)), pa.list_(pa.int64()))
self.assertEqual(inferrer.infer_schema((
1.2, 1.3, 1.4
)), pa.list_(pa.float64()))
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array(["a", "b", "c"])],
['c1', 'c2']
)
self.assertEqual(inferrer.infer_schema(table), table.schema)
def test_infer_numpy(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(np.random.random((100, 28, 28))), {
'type': np.ndarray,
'item': pa.float64(),
'shape': (None, 28, 28)
})
self.assertEqual(inferrer.infer_schema(np.array(["1", "2", "3"])), {
'type': np.ndarray,
'item': pa.string()
})
def test_infer_dataframe(self):
inferrer = SchemaInferrer()
df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': ['a', 'b', 'c'],
'f3': [0.1, 0.2, 0.9]
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'f1': pa.int64(),
'f2': pa.string(),
'f3': pa.float64()
}
})
df = pd.DataFrame({
'Timestamp': [1.1, 2.2, 3.1],
'CarSpeed': [0.1, 0.2, 0.9],
'Gx': [0.1, 0.2, 0.9],
'Gy': [0.1, 0.2, 0.9],
'Label': ['1', '0', '1']
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
})
self.assertEqual(inferrer.infer_schema(pd.Series([1, 2, 3])), {
'type': pd.Series,
'item': pa.int64()
})
def test_infer_dict(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema({
'test': 123,
}), {
'type': dict,
'fields': {
'test': pa.int64(),
}
})
self.assertEqual(inferrer.infer_schema({
'test': 123,
'indices': [1, 2, 3]
}), {
'type': dict,
'fields': {
'test': pa.int64(),
'indices': pa.list_(pa.int64())
}
})
self.assertEqual(inferrer.infer_schema({
'results': pd.DataFrame({
'CarSpeed': [0, 1, 2],
'Label': ['a', 'b', 'c']
})
}), {
'type': dict,
'fields': {
'results': {
'type': pd.DataFrame,
'fields': {
'CarSpeed': pa.int64(),
'Label': pa.string(),
}
}
}
})
def test_infer_list(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema([
{'test': 123},
{'test': 345},
]), {
'type': list,
'item': {
'type': dict,
'fields': {
'test': pa.int64()
}
}
})
| from unittest import TestCase
from datetime import datetime
import pyarrow as pa
import numpy as np
import pandas as pd
from h1st.schema import SchemaInferrer
class SchemaInferrerTestCase(TestCase):
def test_infer_python(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(1), pa.int64())
self.assertEqual(inferrer.infer_schema(1.1), pa.float64())
self.assertEqual(inferrer.infer_schema({
'test1': 1,
'test2': "hello",
'test3': b"hello",
'today': datetime.now(),
}), {
'type': dict,
'fields': {
'test1': pa.int64(),
'test2': pa.string(),
'test3': pa.binary(),
'today': pa.date64(),
}
})
self.assertEqual(inferrer.infer_schema((
1, 2, 3
)), pa.list_(pa.int64()))
self.assertEqual(inferrer.infer_schema((
1.2, 1.3, 1.4
)), pa.list_(pa.float64()))
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array(["a", "b", "c"])],
['c1', 'c2']
)
self.assertEqual(inferrer.infer_schema(table), table.schema)
def test_infer_numpy(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema(np.random.random((100, 28, 28))), {
'type': np.ndarray,
'item': pa.float64(),
'shape': (None, 28, 28)
})
self.assertEqual(inferrer.infer_schema(np.array(["1", "2", "3"])), {
'type': np.ndarray,
'item': pa.string()
})
def test_infer_dataframe(self):
inferrer = SchemaInferrer()
df = pd.DataFrame({
'f1': [1, 2, 3],
'f2': ['a', 'b', 'c'],
'f3': [0.1, 0.2, 0.9]
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'f1': pa.int64(),
'f2': pa.string(),
'f3': pa.float64()
}
})
df = pd.DataFrame({
'Timestamp': [1.1, 2.2, 3.1],
'CarSpeed': [0.1, 0.2, 0.9],
'Gx': [0.1, 0.2, 0.9],
'Gy': [0.1, 0.2, 0.9],
'Label': ['1', '0', '1']
})
self.assertEqual(inferrer.infer_schema(df), {
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
})
self.assertEqual(inferrer.infer_schema(pd.Series([1, 2, 3])), {
'type': pd.Series,
'item': pa.int64()
})
def test_infer_dict(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema({
'test': 123,
}), {
'type': dict,
'fields': {
'test': pa.int64(),
}
})
self.assertEqual(inferrer.infer_schema({
'test': 123,
'indices': [1, 2, 3]
}), {
'type': dict,
'fields': {
'test': pa.int64(),
'indices': pa.list_(pa.int64())
}
})
self.assertEqual(inferrer.infer_schema({
'results': pd.DataFrame({
'CarSpeed': [0, 1, 2],
'Label': ['a', 'b', 'c']
})
}), {
'type': dict,
'fields': {
'results': {
'type': pd.DataFrame,
'fields': {
'CarSpeed': pa.int64(),
'Label': pa.string(),
}
}
}
})
def test_infer_list(self):
inferrer = SchemaInferrer()
self.assertEqual(inferrer.infer_schema([
{'test': 123},
{'test': 345},
]), {
'type': list,
'item': {
'type': dict,
'fields': {
'test': pa.int64()
}
}
})
| none | 1 | 2.654358 | 3 |
|
c_core_librairies/exercise_a.py | nicolasessisbreton/pyzehe | 1 | 8176 | """
# refactoring
Refactoring is the key to successfull projects.
Refactor:
1) annuity_factor such that:
conversion to integer is handled,
no extra printing
2) policy_book into a class such that:
a function generates the book and the premium
stats and visualizations functions are avalaible
3) book_report such that:
it uses all the previous improvements
""" | """
# refactoring
Refactoring is the key to successfull projects.
Refactor:
1) annuity_factor such that:
conversion to integer is handled,
no extra printing
2) policy_book into a class such that:
a function generates the book and the premium
stats and visualizations functions are avalaible
3) book_report such that:
it uses all the previous improvements
""" | en | 0.875689 | # refactoring Refactoring is the key to successfull projects. Refactor: 1) annuity_factor such that: conversion to integer is handled, no extra printing 2) policy_book into a class such that: a function generates the book and the premium stats and visualizations functions are avalaible 3) book_report such that: it uses all the previous improvements | 2.19889 | 2 |
util/util.py | harshitAgr/vess2ret | 111 | 8177 | """Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
| """Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
| en | 0.683507 | Auxiliary methods. Dictionary that allows to access elements with dot notation. ex: >> d = MyDict({'key': 'val'}) >> d.key 'val' >> d.key2 = 'val2' >> d {'key2': 'val2', 'key': 'val'} Given an image, make sure it has 3 channels and that it is between 0 and 1. Image must have 3 dimensions (channels x height x width). Given {0} Unsupported number of channels. Must be 1 or 3, given {0}. Place a and b side by side to be plotted. A and B must have the same size. {0} != {1} # ap.shape and bp.shape must have the same size here Compose the log_dir with the experiment name. Create a directory if it does not exist. Create the experiment directory and return it. # Create directories if they do not exist # Save the parameters Plot a loss function and save it in a file. Log losses and atob results. # Save the losses for further inspection ########################################################################### # PLOT THE LOSSES # ########################################################################### ########################################################################### # PLOT THE A->B RESULTS # ########################################################################### # Make sure all the figures are closed. Save the weights of the models into a file. Load the weights into the corresponding models. Load the weights of the model m. Load the losses of the given experiment. Load the parameters of an experiment and return them. The params passed as argument will be merged with the new params dict. If there is a conflict with a key, the params passed as argument prevails. # Update the loaded parameters with the current parameters. This will # override conflicting keys as expected. | 2.693789 | 3 |
services/apiRequests.py | CakeCrusher/voon-video_processing | 0 | 8178 | from github import Github
def parseGithubURL(url):
splitURL = url.split('/')
owner = splitURL[3]
repo = splitURL[4]
return {
"owner": owner,
"repo": repo
}
def fetchRepoFiles(owner, repo):
files = []
g = Github('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')
repo = g.get_repo(f'{owner}/{repo}')
contents = repo.get_contents('')
while contents:
file_content = contents.pop(0)
if file_content.type == 'dir':
contents.extend(repo.get_contents(file_content.path))
else:
files.append(file_content.path)
return files
# parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer')
# filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo'])
# files = [path.split('/')[-1] for path in filePaths]
# print(files)
| from github import Github
def parseGithubURL(url):
splitURL = url.split('/')
owner = splitURL[3]
repo = splitURL[4]
return {
"owner": owner,
"repo": repo
}
def fetchRepoFiles(owner, repo):
files = []
g = Github('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')
repo = g.get_repo(f'{owner}/{repo}')
contents = repo.get_contents('')
while contents:
file_content = contents.pop(0)
if file_content.type == 'dir':
contents.extend(repo.get_contents(file_content.path))
else:
files.append(file_content.path)
return files
# parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer')
# filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo'])
# files = [path.split('/')[-1] for path in filePaths]
# print(files)
| en | 0.315714 | # parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer') # filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo']) # files = [path.split('/')[-1] for path in filePaths] # print(files) | 2.843209 | 3 |
utils/tricks.py | HouchangX-AI/Dialog-Solution | 3 | 8179 | <gh_stars>1-10
#-*- coding: utf-8 -*-
import codecs
import random
from utils.global_names import GlobalNames, get_file_path
def modify_tokens(tokens):
new_tokens = []
pos = 0
len_ = len(tokens)
while pos < len_:
if tokens[pos] == "[":
if pos+2 < len_ and tokens[pos+2] == "]":
token = "".join(tokens[pos:pos+3])
new_tokens.append(token)
pos += 3
elif pos+3 < len_ and tokens[pos+3] == "]":
if tokens[pos+2].isdigit():
tokens[pos+2] = "_digit_"
token = "".join(tokens[pos:pos+4])
new_tokens.append(token)
pos += 4
else:
pos += 1
else:
new_tokens.append(tokens[pos])
pos += 1
return new_tokens
def length_weight(corpus, orders, length_limit=6):
for idx, _ in enumerate(orders):
if len(corpus[idx]) > length_limit:
return idx
return 0
| #-*- coding: utf-8 -*-
import codecs
import random
from utils.global_names import GlobalNames, get_file_path
def modify_tokens(tokens):
new_tokens = []
pos = 0
len_ = len(tokens)
while pos < len_:
if tokens[pos] == "[":
if pos+2 < len_ and tokens[pos+2] == "]":
token = "".join(tokens[pos:pos+3])
new_tokens.append(token)
pos += 3
elif pos+3 < len_ and tokens[pos+3] == "]":
if tokens[pos+2].isdigit():
tokens[pos+2] = "_digit_"
token = "".join(tokens[pos:pos+4])
new_tokens.append(token)
pos += 4
else:
pos += 1
else:
new_tokens.append(tokens[pos])
pos += 1
return new_tokens
def length_weight(corpus, orders, length_limit=6):
for idx, _ in enumerate(orders):
if len(corpus[idx]) > length_limit:
return idx
return 0 | en | 0.636498 | #-*- coding: utf-8 -*- | 2.727939 | 3 |
test/functional/test_device.py | Jagadambass/Graph-Neural-Networks | 0 | 8180 | <gh_stars>0
from graphgallery.functional import device
import tensorflow as tf
import torch
def test_device():
# how about other backend?
# tf
assert isinstance(device("cpu", "tf"), str)
assert device() == 'cpu'
assert device("cpu", "tf") == 'CPU'
assert device("cpu", "tf") == 'cpu'
assert device("device/cpu", "tf") == 'cpu'
try:
assert device("gpu", "tf") == 'GPU'
assert device("cuda", "tf") == 'GPU'
except RuntimeError:
pass
device = tf.device("cpu")
assert device(device, "tf") == device._device_name
# ?? torch
device = device("cpu", "torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
device = device(backend="torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
try:
assert 'cuda' in str(device("gpu", "torch"))
assert 'cuda' in str(device("cuda", "torch"))
except RuntimeError:
pass
device = torch.device("cpu")
assert device(device, "torch") == device
if __name__ == "__main__":
test_device()
| from graphgallery.functional import device
import tensorflow as tf
import torch
def test_device():
# how about other backend?
# tf
assert isinstance(device("cpu", "tf"), str)
assert device() == 'cpu'
assert device("cpu", "tf") == 'CPU'
assert device("cpu", "tf") == 'cpu'
assert device("device/cpu", "tf") == 'cpu'
try:
assert device("gpu", "tf") == 'GPU'
assert device("cuda", "tf") == 'GPU'
except RuntimeError:
pass
device = tf.device("cpu")
assert device(device, "tf") == device._device_name
# ?? torch
device = device("cpu", "torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
device = device(backend="torch")
assert isinstance(device, torch.device) and 'cpu' in str(device)
try:
assert 'cuda' in str(device("gpu", "torch"))
assert 'cuda' in str(device("cuda", "torch"))
except RuntimeError:
pass
device = torch.device("cpu")
assert device(device, "torch") == device
if __name__ == "__main__":
test_device() | en | 0.708209 | # how about other backend? # tf # ?? torch | 2.669219 | 3 |
py_hanabi/card.py | krinj/hanabi-simulator | 1 | 8181 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
A card (duh).
"""
import random
import uuid
from enum import Enum
from typing import List
from py_hanabi.settings import CARD_DECK_DISTRIBUTION
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class Color(Enum):
RED = 1
BLUE = 2
GREEN = 3
YELLOW = 4
WHITE = 5
class Card:
def __init__(self, number: int, color: Color):
self._number: int = number
self._color: Color = color
self._id: str = uuid.uuid4().hex
self._hint_number_counter: int = 0
self._hint_color_counter: int = 0
# self._index_hinted: List[int] = []
# self._lone_hinted: List[bool] = []
# According to hints, these are the ones we know it is NOT.
self.not_color: List[Color] = []
self.not_number: List[int] = []
def __repr__(self):
hint_str = ""
if self.hint_received_color:
hint_str += "C"
if self.hint_received_number:
hint_str += "N"
return f"[{self.color} {self.number} {hint_str}]"
def __eq__(self, other: 'Card'):
return self.color == other.color and self.number == other.number
def receive_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter += 1
else:
self.not_number.append(number)
def receive_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter += 1
else:
self.not_color.append(color)
def remove_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter -= 1
else:
self.not_number.pop()
def remove_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter -= 1
else:
self.not_color.pop()
@property
def label(self):
return f"{self.number} of {self.get_color_label(self.color)}"
@property
def id(self) -> str:
return self._id
@property
def key(self) -> tuple:
return self.get_key(self.color, self.number)
@staticmethod
def get_key(c: Color, n: int) -> tuple:
return c, n
@property
def number(self) -> int:
return self._number
@property
def color(self) -> Color:
return self._color
@property
def observed_color(self) -> Color:
return None if not self.hint_received_color else self._color
@property
def observed_number(self) -> int:
return None if not self.hint_received_number else self._number
@property
def hint_received_number(self) -> bool:
return self._hint_number_counter > 0
@property
def hint_received_color(self) -> bool:
return self._hint_color_counter > 0
@staticmethod
def generate_deck() -> List['Card']:
""" Generate the starting deck for the game. """
deck: List[Card] = []
for color in Color:
for i in CARD_DECK_DISTRIBUTION:
card = Card(i, color)
deck.append(card)
random.shuffle(deck)
return deck
@staticmethod
def get_color_label(color: Color) -> str:
color_labels = {
Color.BLUE: "Blue",
Color.RED: "Red",
Color.YELLOW: "Yellow",
Color.GREEN: "Green",
Color.WHITE: "White",
}
return color_labels[color]
| # -*- coding: utf-8 -*-
"""
A card (duh).
"""
import random
import uuid
from enum import Enum
from typing import List
from py_hanabi.settings import CARD_DECK_DISTRIBUTION
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class Color(Enum):
RED = 1
BLUE = 2
GREEN = 3
YELLOW = 4
WHITE = 5
class Card:
def __init__(self, number: int, color: Color):
self._number: int = number
self._color: Color = color
self._id: str = uuid.uuid4().hex
self._hint_number_counter: int = 0
self._hint_color_counter: int = 0
# self._index_hinted: List[int] = []
# self._lone_hinted: List[bool] = []
# According to hints, these are the ones we know it is NOT.
self.not_color: List[Color] = []
self.not_number: List[int] = []
def __repr__(self):
hint_str = ""
if self.hint_received_color:
hint_str += "C"
if self.hint_received_number:
hint_str += "N"
return f"[{self.color} {self.number} {hint_str}]"
def __eq__(self, other: 'Card'):
return self.color == other.color and self.number == other.number
def receive_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter += 1
else:
self.not_number.append(number)
def receive_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter += 1
else:
self.not_color.append(color)
def remove_hint_number(self, number: int):
if number == self.number:
self._hint_number_counter -= 1
else:
self.not_number.pop()
def remove_hint_color(self, color: Color):
if color == self.color:
self._hint_color_counter -= 1
else:
self.not_color.pop()
@property
def label(self):
return f"{self.number} of {self.get_color_label(self.color)}"
@property
def id(self) -> str:
return self._id
@property
def key(self) -> tuple:
return self.get_key(self.color, self.number)
@staticmethod
def get_key(c: Color, n: int) -> tuple:
return c, n
@property
def number(self) -> int:
return self._number
@property
def color(self) -> Color:
return self._color
@property
def observed_color(self) -> Color:
return None if not self.hint_received_color else self._color
@property
def observed_number(self) -> int:
return None if not self.hint_received_number else self._number
@property
def hint_received_number(self) -> bool:
return self._hint_number_counter > 0
@property
def hint_received_color(self) -> bool:
return self._hint_color_counter > 0
@staticmethod
def generate_deck() -> List['Card']:
""" Generate the starting deck for the game. """
deck: List[Card] = []
for color in Color:
for i in CARD_DECK_DISTRIBUTION:
card = Card(i, color)
deck.append(card)
random.shuffle(deck)
return deck
@staticmethod
def get_color_label(color: Color) -> str:
color_labels = {
Color.BLUE: "Blue",
Color.RED: "Red",
Color.YELLOW: "Yellow",
Color.GREEN: "Green",
Color.WHITE: "White",
}
return color_labels[color] | en | 0.89508 | # -*- coding: utf-8 -*- A card (duh). # self._index_hinted: List[int] = [] # self._lone_hinted: List[bool] = [] # According to hints, these are the ones we know it is NOT. Generate the starting deck for the game. | 3.185484 | 3 |
facetools/test/testcases.py | bigsassy/django-facetools | 2 | 8182 | import types
import django.test.testcases
from django.conf import settings
from facetools.models import TestUser
from facetools.common import _create_signed_request
from facetools.test import TestUserNotLoaded
from facetools.signals import sync_facebook_test_user, setup_facebook_test_client
from facetools.common import _get_facetools_test_fixture_name
class FacebookTestCaseMixin(object):
"""
TestCase which makes it possible to test views when the FacebookMiddleware
and SyncFacebookUser middlewares are activated. Must use the Client
attached to this object (i.e. self.client).
"""
facebook_test_user = None
def set_client_signed_request(self, facebook_id, access_token):
"""
Allow code to configure the test client so it has a signed request
of the specified test user for each request
"""
setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request(
settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token))
def _pre_setup(self):
if self.facebook_test_user:
if type(self.facebook_test_user) not in [str, unicode]:
raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user))
app_name = get_app_name_from_test_case(type(self).__module__)
facetools_fixture_name = _get_facetools_test_fixture_name(app_name)
if not hasattr(self, 'fixtures'):
self.fixtures = []
if facetools_fixture_name not in self.fixtures:
self.fixtures.append(facetools_fixture_name)
super(FacebookTestCaseMixin, self)._pre_setup()
# Make sure anybody that needs to sync their models loaded from fixtures
# has a chance to do so now that the refreshed user test data is available.
try:
for test_user in TestUser.objects.all():
sync_facebook_test_user.send(sender=None, test_user=test_user)
self.test_user = TestUser.objects.get(name=self.facebook_test_user)
self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token)
except TestUser.DoesNotExist:
raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" %
(self.facebook_test_user, facetools_fixture_name))
else:
super(FacebookTestCaseMixin, self)._pre_setup()
def get_app_name_from_test_case(module_path_string):
"""
Gets thet Django app from the __class__ attribute of a TestCase in a Django app.
class_string should look something like this: 'facetools_tests.tests.test_test_module'
"""
packages = module_path_string.split(".")
try:
tests_location = packages.index("tests")
except ValueError:
raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string)
if tests_location == 0:
raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string)
app_name = packages[tests_location - 1]
if app_name not in settings.INSTALLED_APPS:
raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string))
return app_name
# -----------------------------------------------------------------------------
# Test Cases
# -----------------------------------------------------------------------------
class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase):
def _pre_setup(self):
super(FacebookTransactionTestCase, self)._pre_setup()
class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase):
def _pre_setup(self):
super(FacebookTestCase, self)._pre_setup()
if 'LiveServerTestCase' in dir(django.test.testcases):
class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase):
def _pre_setup(self):
super(FacebookLiveServerTestCase, self)._pre_setup()
| import types
import django.test.testcases
from django.conf import settings
from facetools.models import TestUser
from facetools.common import _create_signed_request
from facetools.test import TestUserNotLoaded
from facetools.signals import sync_facebook_test_user, setup_facebook_test_client
from facetools.common import _get_facetools_test_fixture_name
class FacebookTestCaseMixin(object):
"""
TestCase which makes it possible to test views when the FacebookMiddleware
and SyncFacebookUser middlewares are activated. Must use the Client
attached to this object (i.e. self.client).
"""
facebook_test_user = None
def set_client_signed_request(self, facebook_id, access_token):
"""
Allow code to configure the test client so it has a signed request
of the specified test user for each request
"""
setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request(
settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token))
def _pre_setup(self):
if self.facebook_test_user:
if type(self.facebook_test_user) not in [str, unicode]:
raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user))
app_name = get_app_name_from_test_case(type(self).__module__)
facetools_fixture_name = _get_facetools_test_fixture_name(app_name)
if not hasattr(self, 'fixtures'):
self.fixtures = []
if facetools_fixture_name not in self.fixtures:
self.fixtures.append(facetools_fixture_name)
super(FacebookTestCaseMixin, self)._pre_setup()
# Make sure anybody that needs to sync their models loaded from fixtures
# has a chance to do so now that the refreshed user test data is available.
try:
for test_user in TestUser.objects.all():
sync_facebook_test_user.send(sender=None, test_user=test_user)
self.test_user = TestUser.objects.get(name=self.facebook_test_user)
self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token)
except TestUser.DoesNotExist:
raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" %
(self.facebook_test_user, facetools_fixture_name))
else:
super(FacebookTestCaseMixin, self)._pre_setup()
def get_app_name_from_test_case(module_path_string):
"""
Gets thet Django app from the __class__ attribute of a TestCase in a Django app.
class_string should look something like this: 'facetools_tests.tests.test_test_module'
"""
packages = module_path_string.split(".")
try:
tests_location = packages.index("tests")
except ValueError:
raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string)
if tests_location == 0:
raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string)
app_name = packages[tests_location - 1]
if app_name not in settings.INSTALLED_APPS:
raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string))
return app_name
# -----------------------------------------------------------------------------
# Test Cases
# -----------------------------------------------------------------------------
class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase):
def _pre_setup(self):
super(FacebookTransactionTestCase, self)._pre_setup()
class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase):
def _pre_setup(self):
super(FacebookTestCase, self)._pre_setup()
if 'LiveServerTestCase' in dir(django.test.testcases):
class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase):
def _pre_setup(self):
super(FacebookLiveServerTestCase, self)._pre_setup()
| en | 0.746631 | TestCase which makes it possible to test views when the FacebookMiddleware and SyncFacebookUser middlewares are activated. Must use the Client attached to this object (i.e. self.client). Allow code to configure the test client so it has a signed request of the specified test user for each request # Make sure anybody that needs to sync their models loaded from fixtures # has a chance to do so now that the refreshed user test data is available. Gets thet Django app from the __class__ attribute of a TestCase in a Django app. class_string should look something like this: 'facetools_tests.tests.test_test_module' # ----------------------------------------------------------------------------- # Test Cases # ----------------------------------------------------------------------------- | 2.056466 | 2 |
setup.py | d2gex/distpickymodel | 0 | 8183 | import setuptools
import distpickymodel
def get_long_desc():
with open("README.rst", "r") as fh:
return fh.read()
setuptools.setup(
name="distpickymodel",
version=distpickymodel.__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A shared Mongoengine-based model library",
long_description=get_long_desc(),
url="https://github.com/d2gex/distpickymodel",
# Exclude 'tests' and 'docs'
packages=['distpickymodel'],
python_requires='>=3.6',
install_requires=['pymongo>=3.7.2', 'mongoengine>=0.17.0', 'six'],
tests_require=['pytest>=4.4.0', 'PyYAML>=5.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| import setuptools
import distpickymodel
def get_long_desc():
with open("README.rst", "r") as fh:
return fh.read()
setuptools.setup(
name="distpickymodel",
version=distpickymodel.__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A shared Mongoengine-based model library",
long_description=get_long_desc(),
url="https://github.com/d2gex/distpickymodel",
# Exclude 'tests' and 'docs'
packages=['distpickymodel'],
python_requires='>=3.6',
install_requires=['pymongo>=3.7.2', 'mongoengine>=0.17.0', 'six'],
tests_require=['pytest>=4.4.0', 'PyYAML>=5.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| en | 0.872563 | # Exclude 'tests' and 'docs' | 1.777753 | 2 |
credentials_test.py | tinatasha/passwordgenerator | 0 | 8184 | <reponame>tinatasha/passwordgenerator
import unittest
from password import Credentials
class TestCredentials(unittest.TestCase):
"""
Class to test behaviour of the credentials class
"""
def setUp(self):
"""
Setup method that defines instructions
"""
self.new_credentials = Credentials("Github","Tina","blackfaffp1")
def tearDown(self):
"""
Method that cleans up after each test
"""
Credentials.credentials_list = []
def test_init(self):
"""
Test for correct initialization
"""
self.assertEqual(self.new_credentials.account_name,"Github")
self.assertEqual(self.new_credentials.username,"tinatasga")
self.assertEqual(self.new_credentials.password,"<PASSWORD>")
def test_save_credentials(self):
"""
Test to check whether app saves account credentials
"""
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
"""
Test for saving multiple credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("AllFootball","Kibet","messithegoat")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_view_credentials(self):
"""
Test to view an account credential
"""
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_delete_credentials(self):
"""
Test to delete account credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("i","love","cats")
test_credentials.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
if __name__ == '__main__':
unittest.main() | import unittest
from password import Credentials
class TestCredentials(unittest.TestCase):
"""
Class to test behaviour of the credentials class
"""
def setUp(self):
"""
Setup method that defines instructions
"""
self.new_credentials = Credentials("Github","Tina","blackfaffp1")
def tearDown(self):
"""
Method that cleans up after each test
"""
Credentials.credentials_list = []
def test_init(self):
"""
Test for correct initialization
"""
self.assertEqual(self.new_credentials.account_name,"Github")
self.assertEqual(self.new_credentials.username,"tinatasga")
self.assertEqual(self.new_credentials.password,"<PASSWORD>")
def test_save_credentials(self):
"""
Test to check whether app saves account credentials
"""
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_save_multiple_credentials(self):
"""
Test for saving multiple credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("AllFootball","Kibet","messithegoat")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_view_credentials(self):
"""
Test to view an account credential
"""
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_delete_credentials(self):
"""
Test to delete account credentials
"""
self.new_credentials.save_credentials()
test_credentials = Credentials("i","love","cats")
test_credentials.save_credentials()
self.new_credentials.delete_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
if __name__ == '__main__':
unittest.main() | en | 0.862567 | Class to test behaviour of the credentials class Setup method that defines instructions Method that cleans up after each test Test for correct initialization Test to check whether app saves account credentials Test for saving multiple credentials Test to view an account credential Test to delete account credentials | 3.734887 | 4 |
homework_08/calc_fitness.py | ufpa-organization-repositories/evolutionary-computing | 0 | 8185 | <gh_stars>0
def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
| def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao) | pt | 0.74066 | # só atribui a fitness a cromossomos que ainda não possuem fitness # print(elem[0], elem[1]) # x = elem[0] # y = elem[1] # populacao = [[0,0],[-3,1]] # calc_fitness(pop=populacao) # print(populacao) | 3.519131 | 4 |
pichetprofile/__init__.py | jamenor/pichetprofile | 2 | 8186 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from oopschool.school import Student,Tesla,SpecialStudent,Teacher
from oopschool.newschool import Test | # -*- coding: utf-8 -*-
from oopschool.school import Student,Tesla,SpecialStudent,Teacher
from oopschool.newschool import Test | en | 0.769321 | # -*- coding: utf-8 -*- | 1.004334 | 1 |
leetcode/group2/461.py | HPluseven/playground | 1 | 8187 | <filename>leetcode/group2/461.py
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
if xor & 1:
distance += 1
xor = xor >> 1
return distance
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
distance += 1
xor = xor & (xor-1)
return distance
| <filename>leetcode/group2/461.py
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
if xor & 1:
distance += 1
xor = xor >> 1
return distance
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
distance = 0
while xor:
distance += 1
xor = xor & (xor-1)
return distance
| none | 1 | 3.887426 | 4 |
|
rdl/data_sources/DataSourceFactory.py | pageuppeople-opensource/relational-data-loader | 2 | 8188 | <reponame>pageuppeople-opensource/relational-data-loader
import logging
from rdl.data_sources.MsSqlDataSource import MsSqlDataSource
from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource
class DataSourceFactory(object):
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.sources = [MsSqlDataSource, AWSLambdaDataSource]
def create_source(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
self.logger.info(
f"Found handler '{source}' for given connection string."
)
return source(connection_string)
raise RuntimeError(
"There are no data sources that can handle this connection string"
)
def is_prefix_supported(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
return True
return False
def get_supported_source_prefixes(self):
return list(
map(lambda source: source.get_connection_string_prefix(), self.sources)
)
| import logging
from rdl.data_sources.MsSqlDataSource import MsSqlDataSource
from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource
class DataSourceFactory(object):
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.sources = [MsSqlDataSource, AWSLambdaDataSource]
def create_source(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
self.logger.info(
f"Found handler '{source}' for given connection string."
)
return source(connection_string)
raise RuntimeError(
"There are no data sources that can handle this connection string"
)
def is_prefix_supported(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
return True
return False
def get_supported_source_prefixes(self):
return list(
map(lambda source: source.get_connection_string_prefix(), self.sources)
) | none | 1 | 2.325039 | 2 |
|
ch05/ch05-02-timeseries.py | alexmalins/kagglebook | 13 | 8189 | # ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# As time-series data assume a period variable is set that changes with time
train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4)
train_x['period'] = np.clip(train_x['period'], 0, 3)
test_x['period'] = 4
# -----------------------------------
# Hold-out method for time-series data
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training
is_tr = train_x['period'] < 3
is_va = train_x['period'] == 3
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# -----------------------------------
# Cross validation for time-series data (use method that follows time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] < va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used
from sklearn.model_selection import TimeSeriesSplit
tss = TimeSeriesSplit(n_splits=4)
for tr_idx, va_idx in tss.split(train_x):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# -----------------------------------
# Cross validation for time-series data (method to simply partition by time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [0, 1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] != va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
| # ---------------------------------
# Prepare the data etc.
# ----------------------------------
import numpy as np
import pandas as pd
# train_x is the training data, train_y is the target values, and test_x is the test data
# stored in pandas DataFrames and Series (numpy arrays also used)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# As time-series data assume a period variable is set that changes with time
train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4)
train_x['period'] = np.clip(train_x['period'], 0, 3)
test_x['period'] = 4
# -----------------------------------
# Hold-out method for time-series data
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training
is_tr = train_x['period'] < 3
is_va = train_x['period'] == 3
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# -----------------------------------
# Cross validation for time-series data (use method that follows time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] < va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
# (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used
from sklearn.model_selection import TimeSeriesSplit
tss = TimeSeriesSplit(n_splits=4)
for tr_idx, va_idx in tss.split(train_x):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
# -----------------------------------
# Cross validation for time-series data (method to simply partition by time)
# -----------------------------------
# Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data)
# Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training
va_period_list = [0, 1, 2, 3]
for va_period in va_period_list:
is_tr = train_x['period'] != va_period
is_va = train_x['period'] == va_period
tr_x, va_x = train_x[is_tr], train_x[is_va]
tr_y, va_y = train_y[is_tr], train_y[is_va]
| en | 0.815375 | # --------------------------------- # Prepare the data etc. # ---------------------------------- # train_x is the training data, train_y is the target values, and test_x is the test data # stored in pandas DataFrames and Series (numpy arrays also used) # As time-series data assume a period variable is set that changes with time # ----------------------------------- # Hold-out method for time-series data # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training # ----------------------------------- # Cross validation for time-series data (use method that follows time) # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training # (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used # ----------------------------------- # Cross validation for time-series data (method to simply partition by time) # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training | 3.229084 | 3 |
server/WitClient.py | owo/jitalk | 1 | 8190 | <reponame>owo/jitalk
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import wit
import json
class WitClient(object):
"""docstring for WitClient"""
_access_token = '<KEY>'
def __init__(self):
wit.init()
def text_query(self, text):
res = json.loads(wit.text_query(text, WitClient._access_token))
return res["outcomes"]
def close_connection(self):
wit.close()
if __name__ == "__main__":
print "You ran the Wit client, nothing will happen. Exiting..." | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wit
import json
class WitClient(object):
"""docstring for WitClient"""
_access_token = '<KEY>'
def __init__(self):
wit.init()
def text_query(self, text):
res = json.loads(wit.text_query(text, WitClient._access_token))
return res["outcomes"]
def close_connection(self):
wit.close()
if __name__ == "__main__":
print "You ran the Wit client, nothing will happen. Exiting..." | en | 0.48183 | #!/usr/bin/env python # -*- coding: utf-8 -*- docstring for WitClient | 2.708996 | 3 |
HackerRank/Python/Easy/E0036.py | Mohammed-Shoaib/HackerRank-Problems | 54 | 8191 | <filename>HackerRank/Python/Easy/E0036.py
# Problem Statement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem
from itertools import combinations_with_replacement
S, k = input().split()
for comb in combinations_with_replacement(sorted(S), int(k)):
print(''.join(comb)) | <filename>HackerRank/Python/Easy/E0036.py
# Problem Statement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem
from itertools import combinations_with_replacement
S, k = input().split()
for comb in combinations_with_replacement(sorted(S), int(k)):
print(''.join(comb)) | en | 0.7307 | # Problem Statement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem | 3.496933 | 3 |
visual_genome/models.py | hayyubi/visual-genome-driver | 0 | 8192 | <reponame>hayyubi/visual-genome-driver
"""
Visual Genome Python API wrapper, models
"""
class Image:
"""
Image.
ID int
url hyperlink string
width int
height int
"""
def __init__(self, id, url, width, height, coco_id, flickr_id):
self.id = id
self.url = url
self.width = width
self.height = height
self.coco_id = coco_id
self.flickr_id = flickr_id
def __str__(self):
return 'id: %d, coco_id: %d, flickr_id: %d, width: %d, url: %s' \
% (self.id, -1
if self.coco_id is None
else self.coco_id, -1
if self.flickr_id is None
else self.flickr_id, self.width, self.url)
def __repr__(self):
return str(self)
class Region:
"""
Region.
image int
phrase string
x int
y int
width int
height int
"""
def __init__(self, id, image, phrase, x, y, width, height):
self.id = id
self.image = image
self.phrase = phrase
self.x = x
self.y = y
self.width = width
self.height = height
def __str__(self):
stat_str = 'id: {0}, x: {1}, y: {2}, width: {3},' \
'height: {4}, phrase: {5}, image: {6}'
return stat_str.format(self.id, self.x, self.y,
self.width, self.height, self.phrase,
self.image.id)
def __repr__(self):
return str(self)
class Graph:
"""
Graphs contain objects, relationships and attributes
image Image
bboxes Object array
relationships Relationship array
attributes Attribute array
"""
def __init__(self, image, objects, relationships, attributes):
self.image = image
self.objects = objects
self.relationships = relationships
self.attributes = attributes
class Object:
"""
Objects.
id int
x int
y int
width int
height int
names string array
synsets Synset array
"""
def __init__(self, id, x, y, width, height, names, synsets):
self.id = id
self.x = x
self.y = y
self.width = width
self.height = height
self.names = names[0]
self.synsets = synsets
self.bbox = [x, y, width, height]
def __str__(self):
name = self.names[0] if len(self.names) != 0 else 'None'
return '%s' % (name)
def __repr__(self):
return str(self)
class Relationship:
"""
Relationships. Ex, 'man - jumping over - fire hydrant'.
subject int
predicate string
object int
rel_canon Synset
"""
def __init__(self, id, subject, predicate, object, synset):
self.id = id
self.subject = subject
self.predicate = predicate
self.object = object
self.synset = synset
def __str__(self):
return "{0}: {1} {2} {3}".format(self.id, self.subject,
self.predicate, self.object)
def __repr__(self):
return str(self)
class Attribute:
"""
Attributes. Ex, 'man - old'.
subject Object
attribute string
synset Synset
"""
def __init__(self, id, subject, attribute, synset):
self.id = id
self.subject = subject
self.attribute = attribute
self.synset = synset
def __str__(self):
return "%d: %s is %s" % (self.id, self.subject, self.attribute)
def __repr__(self):
return str(self)
class QA:
"""
Question Answer Pairs.
ID int
image int
question string
answer string
q_objects QAObject array
a_objects QAObject array
"""
def __init__(self, id, image, question, answer,
question_objects, answer_objects):
self.id = id
self.image = image
self.question = question
self.answer = answer
self.q_objects = question_objects
self.a_objects = answer_objects
def __str__(self):
return 'id: %d, image: %d, question: %s, answer: %s' \
% (self.id, self.image.id, self.question, self.answer)
def __repr__(self):
return str(self)
class QAObject:
"""
Question Answer Objects are localized in the image and refer to a part
of the question text or the answer text.
start_idx int
end_idx int
name string
synset_name string
synset_definition string
"""
def __init__(self, start_idx, end_idx, name, synset):
self.start_idx = start_idx
self.end_idx = end_idx
self.name = name
self.synset = synset
def __repr__(self):
return str(self)
class Synset:
"""
Wordnet Synsets.
name string
definition string
"""
def __init__(self, name, definition):
self.name = name
self.definition = definition
def __str__(self):
return '{} - {}'.format(self.name, self.definition)
def __repr__(self):
return str(self)
| """
Visual Genome Python API wrapper, models
"""
class Image:
"""
Image.
ID int
url hyperlink string
width int
height int
"""
def __init__(self, id, url, width, height, coco_id, flickr_id):
self.id = id
self.url = url
self.width = width
self.height = height
self.coco_id = coco_id
self.flickr_id = flickr_id
def __str__(self):
return 'id: %d, coco_id: %d, flickr_id: %d, width: %d, url: %s' \
% (self.id, -1
if self.coco_id is None
else self.coco_id, -1
if self.flickr_id is None
else self.flickr_id, self.width, self.url)
def __repr__(self):
return str(self)
class Region:
"""
Region.
image int
phrase string
x int
y int
width int
height int
"""
def __init__(self, id, image, phrase, x, y, width, height):
self.id = id
self.image = image
self.phrase = phrase
self.x = x
self.y = y
self.width = width
self.height = height
def __str__(self):
stat_str = 'id: {0}, x: {1}, y: {2}, width: {3},' \
'height: {4}, phrase: {5}, image: {6}'
return stat_str.format(self.id, self.x, self.y,
self.width, self.height, self.phrase,
self.image.id)
def __repr__(self):
return str(self)
class Graph:
"""
Graphs contain objects, relationships and attributes
image Image
bboxes Object array
relationships Relationship array
attributes Attribute array
"""
def __init__(self, image, objects, relationships, attributes):
self.image = image
self.objects = objects
self.relationships = relationships
self.attributes = attributes
class Object:
"""
Objects.
id int
x int
y int
width int
height int
names string array
synsets Synset array
"""
def __init__(self, id, x, y, width, height, names, synsets):
self.id = id
self.x = x
self.y = y
self.width = width
self.height = height
self.names = names[0]
self.synsets = synsets
self.bbox = [x, y, width, height]
def __str__(self):
name = self.names[0] if len(self.names) != 0 else 'None'
return '%s' % (name)
def __repr__(self):
return str(self)
class Relationship:
"""
Relationships. Ex, 'man - jumping over - fire hydrant'.
subject int
predicate string
object int
rel_canon Synset
"""
def __init__(self, id, subject, predicate, object, synset):
self.id = id
self.subject = subject
self.predicate = predicate
self.object = object
self.synset = synset
def __str__(self):
return "{0}: {1} {2} {3}".format(self.id, self.subject,
self.predicate, self.object)
def __repr__(self):
return str(self)
class Attribute:
"""
Attributes. Ex, 'man - old'.
subject Object
attribute string
synset Synset
"""
def __init__(self, id, subject, attribute, synset):
self.id = id
self.subject = subject
self.attribute = attribute
self.synset = synset
def __str__(self):
return "%d: %s is %s" % (self.id, self.subject, self.attribute)
def __repr__(self):
return str(self)
class QA:
"""
Question Answer Pairs.
ID int
image int
question string
answer string
q_objects QAObject array
a_objects QAObject array
"""
def __init__(self, id, image, question, answer,
question_objects, answer_objects):
self.id = id
self.image = image
self.question = question
self.answer = answer
self.q_objects = question_objects
self.a_objects = answer_objects
def __str__(self):
return 'id: %d, image: %d, question: %s, answer: %s' \
% (self.id, self.image.id, self.question, self.answer)
def __repr__(self):
return str(self)
class QAObject:
"""
Question Answer Objects are localized in the image and refer to a part
of the question text or the answer text.
start_idx int
end_idx int
name string
synset_name string
synset_definition string
"""
def __init__(self, start_idx, end_idx, name, synset):
self.start_idx = start_idx
self.end_idx = end_idx
self.name = name
self.synset = synset
def __repr__(self):
return str(self)
class Synset:
"""
Wordnet Synsets.
name string
definition string
"""
def __init__(self, name, definition):
self.name = name
self.definition = definition
def __str__(self):
return '{} - {}'.format(self.name, self.definition)
def __repr__(self):
return str(self) | en | 0.482386 | Visual Genome Python API wrapper, models Image. ID int url hyperlink string width int height int Region. image int phrase string x int y int width int height int Graphs contain objects, relationships and attributes image Image bboxes Object array relationships Relationship array attributes Attribute array Objects. id int x int y int width int height int names string array synsets Synset array Relationships. Ex, 'man - jumping over - fire hydrant'. subject int predicate string object int rel_canon Synset Attributes. Ex, 'man - old'. subject Object attribute string synset Synset Question Answer Pairs. ID int image int question string answer string q_objects QAObject array a_objects QAObject array Question Answer Objects are localized in the image and refer to a part of the question text or the answer text. start_idx int end_idx int name string synset_name string synset_definition string Wordnet Synsets. name string definition string | 2.742899 | 3 |
python-scripts/plot_delay.py | GayashanNA/my-scripts | 0 | 8193 | <filename>python-scripts/plot_delay.py<gh_stars>0
import csv
import matplotlib.pyplot as plt
import time
PLOT_PER_WINDOW = False
WINDOW_LENGTH = 60000
BINS = 1000
delay_store = {}
perwindow_delay_store = {}
plotting_delay_store = {}
filename = "output-large.csv"
# filename = "output.csv"
# filename = "output-medium.csv"
# filename = "output-small.csv"
# filename = "output-tiny.csv"
with open(filename, "rU") as dataFile:
csvreader = csv.reader(dataFile)
for row in csvreader:
if len(row) > 2 and str(row[0]).isdigit():
delay_store[long(row[1])] = long(row[2])
window_begin = min(delay_store.keys())
window_end = max(delay_store.keys())
if PLOT_PER_WINDOW:
window_end = window_begin + WINDOW_LENGTH
# find the time delays that are within the window of choice
for (tapp, delay) in delay_store.iteritems():
if window_begin <= tapp <= window_end:
perwindow_delay_store[tapp] = delay
plotting_delay_store = perwindow_delay_store
else:
plotting_delay_store = delay_store
# the histogram of the data
n, bins, patches = plt.hist(plotting_delay_store.values(), BINS, histtype='stepfilled',
normed=True, cumulative=False, facecolor='blue', alpha=0.9)
# plt.axhline(y=0.95, color='red', label='0.95')
max_delay = max(plotting_delay_store.values())
min_delay = min(plotting_delay_store.values())
count = len(plotting_delay_store.values())
# format epoch time to date time to be shown in the plot figure
window_begin_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_begin / 1000))
window_end_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_end / 1000))
title = "Window begin: %s\n" % window_begin_in_datetime
title += "Window end: %s\n" % window_end_in_datetime
# title += "Window length: %dms\n" % WINDOW_LENGTH
title += "Window length: ~%dmins\n" % ((window_end - window_begin)/60000)
title += "Maximum delay: %dms\n" % max_delay
title += "Minimum delay: %dms\n" % min_delay
title += "Count: %d" % count
# start plotting
plt.xlabel('Delay (ms)')
plt.ylabel('Probability')
plt.grid(True)
plt.legend()
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
plt.show()
| <filename>python-scripts/plot_delay.py<gh_stars>0
import csv
import matplotlib.pyplot as plt
import time
PLOT_PER_WINDOW = False
WINDOW_LENGTH = 60000
BINS = 1000
delay_store = {}
perwindow_delay_store = {}
plotting_delay_store = {}
filename = "output-large.csv"
# filename = "output.csv"
# filename = "output-medium.csv"
# filename = "output-small.csv"
# filename = "output-tiny.csv"
with open(filename, "rU") as dataFile:
csvreader = csv.reader(dataFile)
for row in csvreader:
if len(row) > 2 and str(row[0]).isdigit():
delay_store[long(row[1])] = long(row[2])
window_begin = min(delay_store.keys())
window_end = max(delay_store.keys())
if PLOT_PER_WINDOW:
window_end = window_begin + WINDOW_LENGTH
# find the time delays that are within the window of choice
for (tapp, delay) in delay_store.iteritems():
if window_begin <= tapp <= window_end:
perwindow_delay_store[tapp] = delay
plotting_delay_store = perwindow_delay_store
else:
plotting_delay_store = delay_store
# the histogram of the data
n, bins, patches = plt.hist(plotting_delay_store.values(), BINS, histtype='stepfilled',
normed=True, cumulative=False, facecolor='blue', alpha=0.9)
# plt.axhline(y=0.95, color='red', label='0.95')
max_delay = max(plotting_delay_store.values())
min_delay = min(plotting_delay_store.values())
count = len(plotting_delay_store.values())
# format epoch time to date time to be shown in the plot figure
window_begin_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_begin / 1000))
window_end_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_end / 1000))
title = "Window begin: %s\n" % window_begin_in_datetime
title += "Window end: %s\n" % window_end_in_datetime
# title += "Window length: %dms\n" % WINDOW_LENGTH
title += "Window length: ~%dmins\n" % ((window_end - window_begin)/60000)
title += "Maximum delay: %dms\n" % max_delay
title += "Minimum delay: %dms\n" % min_delay
title += "Count: %d" % count
# start plotting
plt.xlabel('Delay (ms)')
plt.ylabel('Probability')
plt.grid(True)
plt.legend()
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
plt.show()
| en | 0.675943 | # filename = "output.csv" # filename = "output-medium.csv" # filename = "output-small.csv" # filename = "output-tiny.csv" # find the time delays that are within the window of choice # the histogram of the data # plt.axhline(y=0.95, color='red', label='0.95') # format epoch time to date time to be shown in the plot figure # title += "Window length: %dms\n" % WINDOW_LENGTH # start plotting | 2.952642 | 3 |
python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py | peterthorpe5/Methods_M.cerasi_R.padi_genome_assembly | 4 | 8194 | <filename>python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py
#!/usr/bin/env python
# author: <NAME> September 2015. The James Hutton Insitute, Dundee, UK.
# title rename single copy busco genes
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
########################################################################
# functions
def parse_busco_file(busco):
"""this is a function to open busco full ouput
and get a list of duplicated genes. This list is required
so we can ignore these genes later. Takes file,
return list"""
duplicated_list = []
with open(busco) as handle:
for line in handle:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
print ("your file is empty")
return False
line_info = line.rstrip().split("\t")
# first element
Busco_name = line_info[0]
# second element
status = line_info[1]
if status == "Duplicated" or status == "Fragmented":
duplicated_list.append(Busco_name)
return duplicated_list
def reformat_as_fasta(filename,prefix,outfile):
"this function re-write a file as a fasta file"
f= open(outfile, 'w')
fas = open(filename, "r")
for line in fas:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
return False
if not line.startswith(">"):
seq = line
title = ">" + prefix + "_" + filename.replace("BUSCOa", "").split(".fas")[0]
data = "%s\n%s\n" %(title, seq)
f.write(data)
f.close()
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.0.1"
sys.exit(0)
usage = """Use as follows:
converts
$ python renaem....py -p Mce -b full_table_BUSCO_output
script to walk through all files in a folder and rename the seq id
to start with Prefix.
Used for Busco output.
give it the busco full ouput table. The script will only return
complete single copy gene. Duplicate gene will be ignored.
"""
parser = OptionParser(usage=usage)
parser.add_option("-p", "--prefix", dest="prefix",
default=None,
help="Output filename",
metavar="FILE")
parser.add_option("-b", "--busco", dest="busco",
default=None,
help="full_table_*_BUSCO output from BUSCO",
metavar="FILE")
(options, args) = parser.parse_args()
prefix = options.prefix
busco = options.busco
# Run as script
if __name__ == '__main__':
#call function to get a list of dupicated gene.
#these genes will be ignored
duplicated_list = parse_busco_file(busco)
#iterate through the dir
for filename in os.listdir("."):
count = 1
if not filename.endswith(".fas"):
continue
#filter out the ones we dont want
if filename.split(".fa")[0] in duplicated_list:
continue
out_file = "../"+prefix+filename
out_file = out_file.replace("BUSCOa", "")
#out_file = "../"+filename
try:
#print filename
reformat_as_fasta(filename, prefix, out_file)
except:
ValueError
continue
| <filename>python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py
#!/usr/bin/env python
# author: <NAME> September 2015. The James Hutton Insitute, Dundee, UK.
# title rename single copy busco genes
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
########################################################################
# functions
def parse_busco_file(busco):
"""this is a function to open busco full ouput
and get a list of duplicated genes. This list is required
so we can ignore these genes later. Takes file,
return list"""
duplicated_list = []
with open(busco) as handle:
for line in handle:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
print ("your file is empty")
return False
line_info = line.rstrip().split("\t")
# first element
Busco_name = line_info[0]
# second element
status = line_info[1]
if status == "Duplicated" or status == "Fragmented":
duplicated_list.append(Busco_name)
return duplicated_list
def reformat_as_fasta(filename,prefix,outfile):
"this function re-write a file as a fasta file"
f= open(outfile, 'w')
fas = open(filename, "r")
for line in fas:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"):
continue
if not line:
return False
if not line.startswith(">"):
seq = line
title = ">" + prefix + "_" + filename.replace("BUSCOa", "").split(".fas")[0]
data = "%s\n%s\n" %(title, seq)
f.write(data)
f.close()
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.0.1"
sys.exit(0)
usage = """Use as follows:
converts
$ python renaem....py -p Mce -b full_table_BUSCO_output
script to walk through all files in a folder and rename the seq id
to start with Prefix.
Used for Busco output.
give it the busco full ouput table. The script will only return
complete single copy gene. Duplicate gene will be ignored.
"""
parser = OptionParser(usage=usage)
parser.add_option("-p", "--prefix", dest="prefix",
default=None,
help="Output filename",
metavar="FILE")
parser.add_option("-b", "--busco", dest="busco",
default=None,
help="full_table_*_BUSCO output from BUSCO",
metavar="FILE")
(options, args) = parser.parse_args()
prefix = options.prefix
busco = options.busco
# Run as script
if __name__ == '__main__':
#call function to get a list of dupicated gene.
#these genes will be ignored
duplicated_list = parse_busco_file(busco)
#iterate through the dir
for filename in os.listdir("."):
count = 1
if not filename.endswith(".fas"):
continue
#filter out the ones we dont want
if filename.split(".fa")[0] in duplicated_list:
continue
out_file = "../"+prefix+filename
out_file = out_file.replace("BUSCOa", "")
#out_file = "../"+filename
try:
#print filename
reformat_as_fasta(filename, prefix, out_file)
except:
ValueError
continue
| en | 0.668722 | #!/usr/bin/env python # author: <NAME> September 2015. The James Hutton Insitute, Dundee, UK. # title rename single copy busco genes ######################################################################## # functions this is a function to open busco full ouput and get a list of duplicated genes. This list is required so we can ignore these genes later. Takes file, return list # if the last line is blank # first element # second element # if the last line is blank Use as follows: converts $ python renaem....py -p Mce -b full_table_BUSCO_output script to walk through all files in a folder and rename the seq id to start with Prefix. Used for Busco output. give it the busco full ouput table. The script will only return complete single copy gene. Duplicate gene will be ignored. # Run as script #call function to get a list of dupicated gene. #these genes will be ignored #iterate through the dir #filter out the ones we dont want #out_file = "../"+filename #print filename | 3.236518 | 3 |
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py | afeld/api-snippets | 3 | 8195 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
did_delete = client.video\
.compositionHooks('HKXXXX')\
.delete()
if(did_delete):
print('Composition removed')
| # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
did_delete = client.video\
.compositionHooks('HKXXXX')\
.delete()
if(did_delete):
print('Composition removed')
| en | 0.74306 | # Download the Python helper library from twilio.com/docs/python/install # Your Account Sid and Auth Token from twilio.com/console | 2.212094 | 2 |
global_info.py | AkagiYui/AzurLaneTool | 0 | 8196 | from time import sleep
debug_mode = False
time_to_exit = False
exiting = False
exit_code = 0
def get_debug_mode():
return debug_mode
def trigger_exit(_exit_code):
global time_to_exit, exit_code
exit_code = _exit_code
time_to_exit = True
sleep(0.1)
| from time import sleep
debug_mode = False
time_to_exit = False
exiting = False
exit_code = 0
def get_debug_mode():
return debug_mode
def trigger_exit(_exit_code):
global time_to_exit, exit_code
exit_code = _exit_code
time_to_exit = True
sleep(0.1)
| none | 1 | 2.201809 | 2 |
|
advesarial_text/data/data_utils_test.py | slowy07/tensorflow-model-research | 0 | 8197 | from __future__ import absoulte_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
"hello! world, i've been\nwaiting\tfor\ryou for.a long time"
)
expected = [
"hello",
"world",
"i",
"ve",
"been",
"waiting",
"for",
"you",
"for",
"a",
"long",
"time",
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == "__main__":
tf.test.main()
| from __future__ import absoulte_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from data import data_utils
data = data_utils
class SequenceWrapperTest(tf.test.TestCase):
def testDefaultTimesteps(self):
seq = data.SequenceWrapper()
t1 = seq.add_timestep()
_ = seq.add_timestep()
self.assertEqual(len(seq), 2)
self.assertEqual(t1.weight, 0.0)
self.assertEqual(t1.label, 0)
self.assertEqual(t1.token, 0)
def testSettersAndGetters(self):
ts = data.SequenceWrapper().add_timestep()
ts.set_token(3)
ts.set_label(4)
ts.set_weight(2.0)
self.assertEqual(ts.token, 3)
self.assertEqual(ts.label, 4)
self.assertEqual(ts.weight, 2.0)
def testTimestepIteration(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(0)
seq.add_timestep().set_token(1)
seq.add_timestep().set_token(2)
for i, ts in enumerate(seq):
self.assertEqual(ts.token, i)
def testFillsSequenceExampleCorrectly(self):
seq = data.SequenceWrapper()
seq.add_timestep().set_token(1).set_label(2).set_weight(3.0)
seq.add_timestep().set_token(10).set_label(20).set_weight(30.0)
seq_ex = seq.seq
fl = seq_ex.feature_lists.feature_list
fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature
fl_label = fl[data.SequenceWrapper.F_LABEL].feature
fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature
_ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]]
self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10])
self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20])
self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0])
class DataUtilsTest(tf.test.TestCase):
def testSplitByPunct(self):
output = data.split_by_punct(
"hello! world, i've been\nwaiting\tfor\ryou for.a long time"
)
expected = [
"hello",
"world",
"i",
"ve",
"been",
"waiting",
"for",
"you",
"for",
"a",
"long",
"time",
]
self.assertListEqual(output, expected)
def _buildDummySequence(self):
seq = data.SequenceWrapper()
for i in range(10):
seq.add_timestep().set_token(i)
return seq
def testBuildLMSeq(self):
seq = self._buildDummySequence()
lm_seq = data.build_lm_sequence(seq)
for i, ts in enumerate(lm_seq):
# For end of sequence, the token and label should be same, and weight
# should be 0.0.
if i == len(lm_seq) - 1:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i)
self.assertEqual(ts.weight, 0.0)
else:
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, i + 1)
self.assertEqual(ts.weight, 1.0)
def testBuildSAESeq(self):
seq = self._buildDummySequence()
sa_seq = data.build_seq_ae_sequence(seq)
self.assertEqual(len(sa_seq), len(seq) * 2 - 1)
# Tokens should be sequence twice, minus the EOS token at the end
for i, ts in enumerate(sa_seq):
self.assertEqual(ts.token, seq[i % 10].token)
# Weights should be len-1 0.0's and len 1.0's.
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].weight, 0.0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].weight, 1.0)
# Labels should be len-1 0's, and then the sequence
for i in range(len(seq) - 1):
self.assertEqual(sa_seq[i].label, 0)
for i in range(len(seq) - 1, len(sa_seq)):
self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token)
def testBuildLabelSeq(self):
seq = self._buildDummySequence()
eos_id = len(seq) - 1
label_seq = data.build_labeled_sequence(seq, True)
for i, ts in enumerate(label_seq[:-1]):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testBuildBidirLabelSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
label_seq = data.build_labeled_sequence(bidir_seq, True)
for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = label_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 1)
self.assertEqual(final_timestep.weight, 1.0)
def testReverseSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
for i, ts in enumerate(reversed(reverse_seq[:-1])):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = reverse_seq[-1]
eos_id = len(seq) - 1
self.assertEqual(final_timestep.token, eos_id)
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testBidirSeq(self):
seq = self._buildDummySequence()
reverse_seq = data.build_reverse_sequence(seq)
bidir_seq = data.build_bidirectional_seq(seq, reverse_seq)
for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))):
self.assertAllEqual(ts.tokens, [i, j])
self.assertEqual(ts.label, 0)
self.assertEqual(ts.weight, 0.0)
final_timestep = bidir_seq[-1]
eos_id = len(seq) - 1
self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id])
self.assertEqual(final_timestep.label, 0)
self.assertEqual(final_timestep.weight, 0.0)
def testLabelGain(self):
seq = self._buildDummySequence()
label_seq = data.build_labeled_sequence(seq, True, label_gain=True)
for i, ts in enumerate(label_seq):
self.assertEqual(ts.token, i)
self.assertEqual(ts.label, 1)
self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3)
if __name__ == "__main__":
tf.test.main()
| en | 0.821909 | # For end of sequence, the token and label should be same, and weight # should be 0.0. # Tokens should be sequence twice, minus the EOS token at the end # Weights should be len-1 0.0's and len 1.0's. # Labels should be len-1 0's, and then the sequence | 2.313809 | 2 |
headlesspreview/apps.py | arush15june/wagtail-torchbox | 0 | 8198 | from django.apps import AppConfig
class HeadlesspreviewConfig(AppConfig):
name = 'headlesspreview'
| from django.apps import AppConfig
class HeadlesspreviewConfig(AppConfig):
name = 'headlesspreview'
| none | 1 | 1.118269 | 1 |
|
LipSDP/solve_sdp.py | revbucket/LipSDP | 1 | 8199 | <reponame>revbucket/LipSDP
import argparse
import numpy as np
import matlab.engine
from scipy.io import savemat
import os
from time import time
def main(args):
start_time = time()
eng = matlab.engine.start_matlab()
eng.addpath(os.path.join(file_dir, 'matlab_engine'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/weight_utils'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/error_messages'))
eng.addpath(os.path.join(file_dir, r'examples/saved_weights'))
network = {
'alpha': matlab.double([args.alpha]),
'beta': matlab.double([args.beta]),
'weight_path': args.weight_path,
}
lip_params = {
'formulation': args.form,
'split': matlab.logical([args.split]),
'parallel': matlab.logical([args.parallel]),
'verbose': matlab.logical([args.verbose]),
'split_size': matlab.double([args.split_size]),
'num_neurons': matlab.double([args.num_neurons]),
'num_workers': matlab.double([args.num_workers]),
'num_dec_vars': matlab.double([args.num_decision_vars])
}
L = eng.solve_LipSDP(network, lip_params, nargout=1)
if lip_params['verbose']:
print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of %.03f' % L)
print('Total time %.03f' % (time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--form',
default='neuron',
const='neuron',
nargs='?',
choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'),
help='LipSDP formulation to use')
parser.add_argument('-v', '--verbose',
action='store_true',
help='prints CVX output from solve if supplied')
parser.add_argument('--alpha',
type=float,
default=0,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--beta',
type=float,
default=1,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--num-neurons',
type=int,
default=100,
nargs=1,
help='number of neurons to couple for LipSDP-Network-rand formulation')
parser.add_argument('--split',
action='store_true',
help='splits network into subnetworks for more efficient solving if supplied')
parser.add_argument('--parallel',
action='store_true',
help='parallelizes solving for split formulations if supplied')
parser.add_argument('--split-size',
type=int,
default=2,
nargs=1,
help='number of layers in each subnetwork for splitting formulations')
parser.add_argument('--num-workers',
type=int,
default=0,
nargs=1,
help='number of workers for parallelization of splitting formulations')
parser.add_argument('--num-decision-vars',
type=int,
default=10,
nargs=1,
help='specify number of decision variables to be used for LipSDP')
parser.add_argument('--weight-path',
type=str,
required=True,
nargs=1,
help='path of weights corresponding to trained neural network model')
args = parser.parse_args()
if args.parallel is True and args.num_workers[0] < 1:
raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.')
if args.split is True and args.split_size[0] < 1:
raise ValueError('When you use --split, --split-size must be an integer >= 1.')
main(args)
| import argparse
import numpy as np
import matlab.engine
from scipy.io import savemat
import os
from time import time
def main(args):
start_time = time()
eng = matlab.engine.start_matlab()
eng.addpath(os.path.join(file_dir, 'matlab_engine'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/weight_utils'))
eng.addpath(os.path.join(file_dir, r'matlab_engine/error_messages'))
eng.addpath(os.path.join(file_dir, r'examples/saved_weights'))
network = {
'alpha': matlab.double([args.alpha]),
'beta': matlab.double([args.beta]),
'weight_path': args.weight_path,
}
lip_params = {
'formulation': args.form,
'split': matlab.logical([args.split]),
'parallel': matlab.logical([args.parallel]),
'verbose': matlab.logical([args.verbose]),
'split_size': matlab.double([args.split_size]),
'num_neurons': matlab.double([args.num_neurons]),
'num_workers': matlab.double([args.num_workers]),
'num_dec_vars': matlab.double([args.num_decision_vars])
}
L = eng.solve_LipSDP(network, lip_params, nargout=1)
if lip_params['verbose']:
print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of %.03f' % L)
print('Total time %.03f' % (time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--form',
default='neuron',
const='neuron',
nargs='?',
choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'),
help='LipSDP formulation to use')
parser.add_argument('-v', '--verbose',
action='store_true',
help='prints CVX output from solve if supplied')
parser.add_argument('--alpha',
type=float,
default=0,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--beta',
type=float,
default=1,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--num-neurons',
type=int,
default=100,
nargs=1,
help='number of neurons to couple for LipSDP-Network-rand formulation')
parser.add_argument('--split',
action='store_true',
help='splits network into subnetworks for more efficient solving if supplied')
parser.add_argument('--parallel',
action='store_true',
help='parallelizes solving for split formulations if supplied')
parser.add_argument('--split-size',
type=int,
default=2,
nargs=1,
help='number of layers in each subnetwork for splitting formulations')
parser.add_argument('--num-workers',
type=int,
default=0,
nargs=1,
help='number of workers for parallelization of splitting formulations')
parser.add_argument('--num-decision-vars',
type=int,
default=10,
nargs=1,
help='specify number of decision variables to be used for LipSDP')
parser.add_argument('--weight-path',
type=str,
required=True,
nargs=1,
help='path of weights corresponding to trained neural network model')
args = parser.parse_args()
if args.parallel is True and args.num_workers[0] < 1:
raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.')
if args.split is True and args.split_size[0] < 1:
raise ValueError('When you use --split, --split-size must be an integer >= 1.')
main(args) | none | 1 | 2.161298 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.