max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
app/display_modules/hmp/tests/test_module.py | MetaGenScope/metagenscope-server | 0 | 6631051 | """Test suite for HMP model."""
from mongoengine import ValidationError
from app.analysis_results.analysis_result_models import AnalysisResultWrapper
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.display_modules.hmp import HMPDisplayModule
from app.samples.sample_models import Sample
from app.display_modules.hmp.models import HMPResult
from app.display_modules.hmp.constants import MODULE_NAME
from app.display_modules.hmp.tests.factory import HMPFactory
from app.tool_results.hmp_sites.tests.factory import create_hmp_sites
from .factory import (
HMPFactory,
fake_categories,
fake_sites
)
class TestHMPResult(BaseDisplayModuleTest):
"""Test suite for HMP model."""
def test_get_hmp(self):
"""Ensure getting a single HMP behaves correctly."""
hmp = HMPFactory()
self.generic_getter_test(hmp, MODULE_NAME,
verify_fields=['categories', 'sites', 'data'])
def test_add_hmp(self):
"""Ensure HMP model is created correctly."""
hmp = HMPFactory()
self.generic_adder_test(hmp, MODULE_NAME)
def test_add_missing_category(self):
"""Ensure saving model fails if category is missing from `data`."""
hmp = HMPResult(categories=fake_categories(),
sites=fake_sites(),
data={})
wrapper = AnalysisResultWrapper(data=hmp)
self.assertRaises(ValidationError, wrapper.save)
def test_run_hmp_sample_group(self): # pylint: disable=invalid-name
"""Ensure hmp run_sample_group produces correct results."""
def create_sample(i):
"""Create unique sample for index i."""
data = create_hmp_sites()
return Sample(name=f'Sample{i}',
metadata={'foobar': f'baz{i}'},
hmp_site_dists=data).save()
self.generic_run_group_test(create_sample,
HMPDisplayModule)
| """Test suite for HMP model."""
from mongoengine import ValidationError
from app.analysis_results.analysis_result_models import AnalysisResultWrapper
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.display_modules.hmp import HMPDisplayModule
from app.samples.sample_models import Sample
from app.display_modules.hmp.models import HMPResult
from app.display_modules.hmp.constants import MODULE_NAME
from app.display_modules.hmp.tests.factory import HMPFactory
from app.tool_results.hmp_sites.tests.factory import create_hmp_sites
from .factory import (
HMPFactory,
fake_categories,
fake_sites
)
class TestHMPResult(BaseDisplayModuleTest):
"""Test suite for HMP model."""
def test_get_hmp(self):
"""Ensure getting a single HMP behaves correctly."""
hmp = HMPFactory()
self.generic_getter_test(hmp, MODULE_NAME,
verify_fields=['categories', 'sites', 'data'])
def test_add_hmp(self):
"""Ensure HMP model is created correctly."""
hmp = HMPFactory()
self.generic_adder_test(hmp, MODULE_NAME)
def test_add_missing_category(self):
"""Ensure saving model fails if category is missing from `data`."""
hmp = HMPResult(categories=fake_categories(),
sites=fake_sites(),
data={})
wrapper = AnalysisResultWrapper(data=hmp)
self.assertRaises(ValidationError, wrapper.save)
def test_run_hmp_sample_group(self): # pylint: disable=invalid-name
"""Ensure hmp run_sample_group produces correct results."""
def create_sample(i):
"""Create unique sample for index i."""
data = create_hmp_sites()
return Sample(name=f'Sample{i}',
metadata={'foobar': f'baz{i}'},
hmp_site_dists=data).save()
self.generic_run_group_test(create_sample,
HMPDisplayModule)
| en | 0.733959 | Test suite for HMP model. Test suite for HMP model. Ensure getting a single HMP behaves correctly. Ensure HMP model is created correctly. Ensure saving model fails if category is missing from `data`. # pylint: disable=invalid-name Ensure hmp run_sample_group produces correct results. Create unique sample for index i. | 2.461471 | 2 |
src/test/testcases/testMemPBA.py | open-power/sbe | 9 | 6631052 | # IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: src/test/testcases/testMemPBA.py $
#
# OpenPOWER sbe Project
#
# Contributors Listed Below - COPYRIGHT 2016,2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from __future__ import print_function
import sys
sys.path.append("targets/p9_nimbus/sbeTest" )
sys.path.append("targets/p9_axone/sbeTest" )
import testUtil
import testMemUtil
err = False
# MAIN Test Run Starts Here...
#-------------------------------------------------
def main( ):
# First Case without Fast Mode without LCO
testUtil.runCycles( 10000000 )
# Put mem PBA
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x02)
# Get mem PBA - WO FMODE, WO LCO
readData = testMemUtil.getmem(0x08000000, 128*2, 0x02)
if(data == readData):
print ("Success - Write-Read PBA - WO FMODE, WO LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x02)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x02)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x02)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - WO FMODE, WO LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
# Second Case with Fast Mode without LCO
testUtil.runCycles( 10000000 )
# Put mem PBA - W FMODE, WO LCO
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x22)
# Get mem PBA
readData = testMemUtil.getmem(0x08000000, 128*2, 0x22)
if(data == readData):
print ("Success - Write-Read PBA - W FMODE, WO LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x22)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x22)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x22)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - W FMODE, WO LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
# Third Case with Fast Mode with LCO
testUtil.runCycles( 10000000 )
# Put mem PBA - W FMODE, W LCO
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x62)
# Get mem PBA
readData = testMemUtil.getmem(0x08000000, 128*2, 0x62)
if(data == readData):
print ("Success - Write-Read PBA - W FMODE, W LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x62)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x62)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x62)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - W FMODE, W LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
#-------------------------------------------------
# Calling all test code
#-------------------------------------------------
if testUtil.getMachineName() == "axone":
try:
main()
except:
print ( "\nTest Suite completed with error(s)" )
testUtil.collectFFDC()
raise()
print ( "\nTest Suite completed with no errors" )
else:
main()
if err:
print ("\nTest Suite completed with error(s)")
#sys.exit(1)
else:
print ("\nTest Suite completed with no errors")
#sys.exit(0);
| # IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: src/test/testcases/testMemPBA.py $
#
# OpenPOWER sbe Project
#
# Contributors Listed Below - COPYRIGHT 2016,2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from __future__ import print_function
import sys
sys.path.append("targets/p9_nimbus/sbeTest" )
sys.path.append("targets/p9_axone/sbeTest" )
import testUtil
import testMemUtil
err = False
# MAIN Test Run Starts Here...
#-------------------------------------------------
def main( ):
# First Case without Fast Mode without LCO
testUtil.runCycles( 10000000 )
# Put mem PBA
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x02)
# Get mem PBA - WO FMODE, WO LCO
readData = testMemUtil.getmem(0x08000000, 128*2, 0x02)
if(data == readData):
print ("Success - Write-Read PBA - WO FMODE, WO LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x02)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x02)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x02)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - WO FMODE, WO LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
# Second Case with Fast Mode without LCO
testUtil.runCycles( 10000000 )
# Put mem PBA - W FMODE, WO LCO
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x22)
# Get mem PBA
readData = testMemUtil.getmem(0x08000000, 128*2, 0x22)
if(data == readData):
print ("Success - Write-Read PBA - W FMODE, WO LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x22)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x22)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x22)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - W FMODE, WO LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
# Third Case with Fast Mode with LCO
testUtil.runCycles( 10000000 )
# Put mem PBA - W FMODE, W LCO
data = os.urandom(128*2)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000, data, 0x62)
# Get mem PBA
readData = testMemUtil.getmem(0x08000000, 128*2, 0x62)
if(data == readData):
print ("Success - Write-Read PBA - W FMODE, W LCO")
else:
print(data)
print(readData)
raise Exception('data mistmach')
# Partial Write test
readData = testMemUtil.getmem(0x08000000, 128*3, 0x62)
data = os.urandom(128)
data = [ord(c) for c in data]
testMemUtil.putmem(0x08000000+128, data, 0x62)
readBackData = testMemUtil.getmem(0x08000000, 128*3, 0x62)
sandwichData = readData[:128]+data+readData[len(data)+128:]
if(sandwichData == readBackData):
print ("Success - Write_Part-Read PBA - W FMODE, W LCO")
else:
print(readData)
print(data)
print(readBackData)
print(sandwichData)
raise Exception('data mistmach')
#-------------------------------------------------
# Calling all test code
#-------------------------------------------------
if testUtil.getMachineName() == "axone":
try:
main()
except:
print ( "\nTest Suite completed with error(s)" )
testUtil.collectFFDC()
raise()
print ( "\nTest Suite completed with no errors" )
else:
main()
if err:
print ("\nTest Suite completed with error(s)")
#sys.exit(1)
else:
print ("\nTest Suite completed with no errors")
#sys.exit(0);
| en | 0.653911 | # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # $Source: src/test/testcases/testMemPBA.py $ # # OpenPOWER sbe Project # # Contributors Listed Below - COPYRIGHT 2016,2019 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # IBM_PROLOG_END_TAG # MAIN Test Run Starts Here... #------------------------------------------------- # First Case without Fast Mode without LCO # Put mem PBA # Get mem PBA - WO FMODE, WO LCO # Partial Write test # Second Case with Fast Mode without LCO # Put mem PBA - W FMODE, WO LCO # Get mem PBA # Partial Write test # Third Case with Fast Mode with LCO # Put mem PBA - W FMODE, W LCO # Get mem PBA # Partial Write test #------------------------------------------------- # Calling all test code #------------------------------------------------- #sys.exit(1) #sys.exit(0); | 1.849238 | 2 |
source/code/testing/kms.py | eshack94/aws-ops-automator | 1 | 6631053 | ######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
import services.kms_service
class Kms(object):
def __init__(self, region=None, session=None):
self.region = region if region is not None else boto3.Session().region_name
self.session = session if session is not None else boto3.Session(region_name=self.region)
self.kms_client = self.session.client("kms", region_name=self.region)
self.kms_service = services.create_service("kms", session=self.session)
def get_kms_key(self, keyid):
try:
key = self.kms_service.get(services.kms_service.KEY,
region=self.region,
KeyId=keyid)
return key
except Exception as ex:
if getattr(ex, "response", {}).get("Error", {}).get("Code") == "NotFoundException":
if not keyid.startswith("arn") and not keyid.startswith("alias/"):
return self.get_kms_key("alias/" + keyid)
return None
else:
raise ex
| ######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
import services.kms_service
class Kms(object):
def __init__(self, region=None, session=None):
self.region = region if region is not None else boto3.Session().region_name
self.session = session if session is not None else boto3.Session(region_name=self.region)
self.kms_client = self.session.client("kms", region_name=self.region)
self.kms_service = services.create_service("kms", session=self.session)
def get_kms_key(self, keyid):
try:
key = self.kms_service.get(services.kms_service.KEY,
region=self.region,
KeyId=keyid)
return key
except Exception as ex:
if getattr(ex, "response", {}).get("Error", {}).get("Code") == "NotFoundException":
if not keyid.startswith("arn") and not keyid.startswith("alias/"):
return self.get_kms_key("alias/" + keyid)
return None
else:
raise ex
| en | 0.51853 | ###################################################################################################################### # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://aws.amazon.com/asl/ # # # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### | 2.135765 | 2 |
tensorflow/python/ops/init_ops_v2.py | computationalartist/tensorflow | 190,993 | 6631054 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializers for TF 2."""
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops.init_ops import _compute_fans
from tensorflow.python.util.tf_export import tf_export
_PARTITION_SHAPE = "partition_shape"
_PARTITION_OFFSET = "partition_offset"
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
**kwargs: Additional keyword arguments. Accepted values:
`partition_shape` and `partition_offset`. Used when creating a single
partition in a partitioned variable. `partition_shape` is the shape of
the partition (i.e. the shape of the returned tensor) and
`partition_offset` is a tuple of `int` specifying the offset of this
partition w.r.t each axis. For example, a tensor of shape `(30, 100)`
can be partitioned into two partitions: `p0` of shape `(10, 100)` and
`p1` of shape `(20, 100)`; if the initializer is called with
`partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should
return the value for `p1`.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
config.pop("dtype", None)
return cls(**config)
def _validate_kwargs(self, kwargs, support_partition=True):
for kwarg in kwargs:
if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:
raise TypeError(
"Keyword argument should be one of "
f"{list([_PARTITION_SHAPE, _PARTITION_OFFSET])}. Received: {kwarg}")
elif not support_partition:
raise ValueError(
f"{self.__class__.__name__} initializer doesn't support "
"partition-related arguments")
@tf_export("zeros_initializer", v1=[])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.zeros_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
@tf_export("ones_initializer", v1=[])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
@tf_export("constant_initializer", v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
`tf.constant_initializer` returns an object which when called returns a tensor
populated with the `value` specified in the constructor. This `value` must be
convertible to the requested `dtype`.
The argument `value` can be a scalar constant value, or a list of
values. Scalars broadcast to whichever shape is requested from the
initializer.
If `value` is a list, then the length of the list must be equal to the number
of elements implied by the desired shape of the tensor. If the total number of
elements in `value` is not equal to the number of elements required by the
tensor shape, the initializer will raise a `TypeError`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.constant_initializer(2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([2., 2., 2.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> init = tf.constant_initializer(value)
>>> # Fitting shape
>>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))
<tf.Variable ...
array([[0., 1., 2., 3.],
[4., 5., 6., 7.]], dtype=float32)>
>>> # Larger shape
>>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...
>>> # Smaller shape
>>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
Raises:
TypeError: If the input `value` is not one of the expected types.
"""
def __init__(self, value=0):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
f"Invalid type for initial value: {type(value).__name__}. Expected "
"Python scalar, list or tuple of values, or numpy.ndarray.")
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided the dtype of the
tensor created will be the type of the inital value.
**kwargs: Additional keyword arguments.
Raises:
TypeError: If the initializer cannot create a tensor of the requested
dtype.
"""
self._validate_kwargs(kwargs, support_partition=False)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
return constant_op.constant(self.value, dtype=dtype, shape=shape)
def get_config(self):
return {"value": self.value}
@tf_export("random_uniform_initializer", v1=[])
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not numeric.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed
}
@tf_export("random_normal_initializer", v1=[])
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3,
... tf.random_normal_initializer(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
These values are similar to values from a `tf.initializers.RandomNormal`
except that values more than two standard deviations from the mean are
discarded and re-drawn. This is the recommended initializer for neural network
weights and filters.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(
... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("Argument `scale` must be a positive float. Received: "
f"{scale}")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Argument `mode` should be one of ('fan_in', 'fan_out', "
f"'fan_avg'). Received: {mode}")
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == "normal":
distribution = "truncated_normal"
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Argument `distribution` should be one of ('uniform', "
"'truncated_normal', 'untruncated_normal'). Received: "
f"{distribution}")
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.Orthogonal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.Orthogonal(gain=0.5))
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize, specified by argument `shape`"
" must be at least two-dimensional. Received shape="
f"{shape}")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Only usable for generating 2D matrices.
Examples:
>>> def make_variable(k, initializer):
... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32))
>>> make_variable(2, tf.initializers.Identity())
<tf.Variable ... shape=(2, 2) dtype=float32, numpy=
array([[1., 0.],
[0., 1.]], dtype=float32)>
>>> make_variable(3, tf.initializers.Identity(gain=0.5))
<tf.Variable ... shape=(3, 3) dtype=float32, numpy=
array([[0.5, 0. , 0. ],
[0. , 0.5, 0. ],
[0. , 0. , 0.5]], dtype=float32)>
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
ValueError: If the requested shape does not have exactly two axes.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
if len(shape) != 2:
raise ValueError("The tensor to initialize, specified by argument `shape`"
" must be at least two-dimensional. Received shape="
f"{shape}")
initializer = linalg_ops_impl.eye(*shape, dtype=dtype)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain}
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed)
def get_config(self):
return {"seed": self.seed}
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed)
def get_config(self):
return {"seed": self.seed}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
# pylint: enable=invalid-name
def lecun_normal(seed=None):
"""LeCun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
def he_normal(seed=None):
"""He normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError("Argument `dtype` is expected to be floating point. "
f"Received: {dtype}.")
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializers for TF 2."""
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops.init_ops import _compute_fans
from tensorflow.python.util.tf_export import tf_export
_PARTITION_SHAPE = "partition_shape"
_PARTITION_OFFSET = "partition_offset"
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
**kwargs: Additional keyword arguments. Accepted values:
`partition_shape` and `partition_offset`. Used when creating a single
partition in a partitioned variable. `partition_shape` is the shape of
the partition (i.e. the shape of the returned tensor) and
`partition_offset` is a tuple of `int` specifying the offset of this
partition w.r.t each axis. For example, a tensor of shape `(30, 100)`
can be partitioned into two partitions: `p0` of shape `(10, 100)` and
`p1` of shape `(20, 100)`; if the initializer is called with
`partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should
return the value for `p1`.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
config.pop("dtype", None)
return cls(**config)
def _validate_kwargs(self, kwargs, support_partition=True):
for kwarg in kwargs:
if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:
raise TypeError(
"Keyword argument should be one of "
f"{list([_PARTITION_SHAPE, _PARTITION_OFFSET])}. Received: {kwarg}")
elif not support_partition:
raise ValueError(
f"{self.__class__.__name__} initializer doesn't support "
"partition-related arguments")
@tf_export("zeros_initializer", v1=[])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.zeros_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
@tf_export("ones_initializer", v1=[])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
@tf_export("constant_initializer", v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
`tf.constant_initializer` returns an object which when called returns a tensor
populated with the `value` specified in the constructor. This `value` must be
convertible to the requested `dtype`.
The argument `value` can be a scalar constant value, or a list of
values. Scalars broadcast to whichever shape is requested from the
initializer.
If `value` is a list, then the length of the list must be equal to the number
of elements implied by the desired shape of the tensor. If the total number of
elements in `value` is not equal to the number of elements required by the
tensor shape, the initializer will raise a `TypeError`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.constant_initializer(2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([2., 2., 2.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> init = tf.constant_initializer(value)
>>> # Fitting shape
>>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))
<tf.Variable ...
array([[0., 1., 2., 3.],
[4., 5., 6., 7.]], dtype=float32)>
>>> # Larger shape
>>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...
>>> # Smaller shape
>>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
Raises:
TypeError: If the input `value` is not one of the expected types.
"""
def __init__(self, value=0):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
f"Invalid type for initial value: {type(value).__name__}. Expected "
"Python scalar, list or tuple of values, or numpy.ndarray.")
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided the dtype of the
tensor created will be the type of the inital value.
**kwargs: Additional keyword arguments.
Raises:
TypeError: If the initializer cannot create a tensor of the requested
dtype.
"""
self._validate_kwargs(kwargs, support_partition=False)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
return constant_op.constant(self.value, dtype=dtype, shape=shape)
def get_config(self):
return {"value": self.value}
@tf_export("random_uniform_initializer", v1=[])
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not numeric.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError("Argument `dtype` expected to be numeric or boolean. "
f"Received {dtype}.")
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed
}
@tf_export("random_normal_initializer", v1=[])
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3,
... tf.random_normal_initializer(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
These values are similar to values from a `tf.initializers.RandomNormal`
except that values more than two standard deviations from the mean are
discarded and re-drawn. This is the recommended initializer for neural network
weights and filters.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(
... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("Argument `scale` must be a positive float. Received: "
f"{scale}")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Argument `mode` should be one of ('fan_in', 'fan_out', "
f"'fan_avg'). Received: {mode}")
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == "normal":
distribution = "truncated_normal"
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Argument `distribution` should be one of ('uniform', "
"'truncated_normal', 'untruncated_normal'). Received: "
f"{distribution}")
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.Orthogonal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.Orthogonal(gain=0.5))
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize, specified by argument `shape`"
" must be at least two-dimensional. Received shape="
f"{shape}")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Only usable for generating 2D matrices.
Examples:
>>> def make_variable(k, initializer):
... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32))
>>> make_variable(2, tf.initializers.Identity())
<tf.Variable ... shape=(2, 2) dtype=float32, numpy=
array([[1., 0.],
[0., 1.]], dtype=float32)>
>>> make_variable(3, tf.initializers.Identity(gain=0.5))
<tf.Variable ... shape=(3, 3) dtype=float32, numpy=
array([[0.5, 0. , 0. ],
[0. , 0.5, 0. ],
[0. , 0. , 0.5]], dtype=float32)>
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
ValueError: If the requested shape does not have exactly two axes.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
if len(shape) != 2:
raise ValueError("The tensor to initialize, specified by argument `shape`"
" must be at least two-dimensional. Received shape="
f"{shape}")
initializer = linalg_ops_impl.eye(*shape, dtype=dtype)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain}
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed)
def get_config(self):
return {"seed": self.seed}
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed)
def get_config(self):
return {"seed": self.seed}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
# pylint: enable=invalid-name
def lecun_normal(seed=None):
"""LeCun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
def he_normal(seed=None):
"""He normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError("Argument `dtype` is expected to be floating point. "
f"Received: {dtype}.")
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
| en | 0.593878 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Initializers for TF 2. Initializer base class: all initializers inherit from this class. Initializers should implement a `__call__` method with the following signature: ```python def __call__(self, shape, dtype=None, **kwargs): # returns a tensor of shape `shape` and dtype `dtype` # containing values drawn from a distribution of your choice. ``` Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided will return tensor of `tf.float32`. **kwargs: Additional keyword arguments. Accepted values: `partition_shape` and `partition_offset`. Used when creating a single partition in a partitioned variable. `partition_shape` is the shape of the partition (i.e. the shape of the returned tensor) and `partition_offset` is a tuple of `int` specifying the offset of this partition w.r.t each axis. For example, a tensor of shape `(30, 100)` can be partitioned into two partitions: `p0` of shape `(10, 100)` and `p1` of shape `(20, 100)`; if the initializer is called with `partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should return the value for `p1`. Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary. It will typically be the output of `get_config`. Returns: An Initializer instance. Initializer that generates tensors initialized to 0. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.zeros_initializer()) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32)> >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean. Initializer that generates tensors initialized to 1. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype=float32)> >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean. Initializer that generates tensors with constant values. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. `tf.constant_initializer` returns an object which when called returns a tensor populated with the `value` specified in the constructor. This `value` must be convertible to the requested `dtype`. The argument `value` can be a scalar constant value, or a list of values. Scalars broadcast to whichever shape is requested from the initializer. If `value` is a list, then the length of the list must be equal to the number of elements implied by the desired shape of the tensor. If the total number of elements in `value` is not equal to the number of elements required by the tensor shape, the initializer will raise a `TypeError`. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.constant_initializer(2.)) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([2., 2., 2.], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= array([[2., 2., 2.], [2., 2., 2.], [2., 2., 2.]], dtype=float32)> >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... >>> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> init = tf.constant_initializer(value) >>> # Fitting shape >>> tf.Variable(init(shape=[2, 4], dtype=tf.float32)) <tf.Variable ... array([[0., 1., 2., 3.], [4., 5., 6., 7.]], dtype=float32)> >>> # Larger shape >>> tf.Variable(init(shape=[3, 4], dtype=tf.float32)) Traceback (most recent call last): ... TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements... >>> # Smaller shape >>> tf.Variable(init(shape=[2, 3], dtype=tf.float32)) Traceback (most recent call last): ... TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements... Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. Raises: TypeError: If the input `value` is not one of the expected types. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided the dtype of the tensor created will be the type of the inital value. **kwargs: Additional keyword arguments. Raises: TypeError: If the initializer cannot create a tensor of the requested dtype. Initializer that generates tensors with a uniform distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype=float32)> >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not numeric. Initializer that generates tensors with a normal distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, ... tf.random_normal_initializer(mean=1., stddev=2.)) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= ... >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point Initializer that generates a truncated normal distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. These values are similar to values from a `tf.initializers.RandomNormal` except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables( ... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.)) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= ... >>> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.)) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point Initializer capable of adapting its scale to the shape of weights tensors. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. With `distribution="truncated_normal" or "untruncated_normal"`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)` where n is: - number of input units in the weight tensor, if mode = "fan_in" - number of output units, if mode = "fan_out" - average of the numbers of input and output units, if mode = "fan_avg" With `distribution="uniform"`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.)) >>> v1 <tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)> >>> v2 <tf.Variable ... shape=(3, 3) ... numpy= ... >>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform')) (<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ... Args: scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "truncated_normal", "untruncated_normal" and "uniform". seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. Raises: ValueError: In case of an invalid value for the "scale", mode" or "distribution" arguments. # Compatibility with keras-team/keras. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) Initializer that generates an orthogonal matrix. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.Orthogonal()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.Orthogonal(gain=0.5)) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point or the input shape is not valid. # Check the shape # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d # Generate a random matrix # Compute the qr factorization # Make Q uniform Initializer that generates the identity matrix. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Only usable for generating 2D matrices. Examples: >>> def make_variable(k, initializer): ... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32)) >>> make_variable(2, tf.initializers.Identity()) <tf.Variable ... shape=(2, 2) dtype=float32, numpy= array([[1., 0.], [0., 1.]], dtype=float32)> >>> make_variable(3, tf.initializers.Identity(gain=0.5)) <tf.Variable ... shape=(3, 3) dtype=float32, numpy= array([[0.5, 0. , 0. ], [0. , 0.5, 0. ], [0. , 0. , 0.5]], dtype=float32)> Args: gain: Multiplicative factor to apply to the identity matrix. Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point ValueError: If the requested shape does not have exactly two axes. The Glorot uniform initializer, also called Xavier uniform initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) The Glorot normal initializer, also called Xavier normal initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) # Aliases. # pylint: disable=invalid-name # pylint: enable=invalid-name LeCun normal initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.lecun_normal()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with `shape` and `dtype` arguments which generates a tensor. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) LeCun uniform initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with `shape` and `dtype` arguments which generates a tensor. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) He normal initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_normal()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with `shape` and `dtype` arguments which generates a tensor. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) He uniform variance scaling initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_uniform()) >>> v1 <tf.Variable ... shape=(3, 3) ... >>> v2 <tf.Variable ... shape=(3, 3, 3) ... >>> make_variables(4, tf.initializers.RandomNormal()) (<tf.Variable ... shape=(4, 4) dtype=float32... <tf.Variable ... shape=(4, 4, 4) dtype=float32... Args: seed: A Python integer. Used to seed the random generator. Returns: A callable Initializer with `shape` and `dtype` arguments which generates a tensor. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) # Utility functions. Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. Random generator that selects appropriate random ops. # Stateless random ops requires 2-int seed. A deterministic random normal if seed is passed. A deterministic random uniform if seed is passed. A deterministic truncated normal if seed is passed. # Compatibility aliases # pylint: disable=invalid-name | 2.166435 | 2 |
alura/design_patterns_python/calculador_desconto.py | flaviogf/Cursos | 2 | 6631055 | <reponame>flaviogf/Cursos<gh_stars>1-10
from desconto import DescontoPorCincoItens, SemDesconto
class CalculadorDesconto:
def calcula(self, orcamento):
return DescontoPorCincoItens(SemDesconto()).calcula(orcamento)
| from desconto import DescontoPorCincoItens, SemDesconto
class CalculadorDesconto:
def calcula(self, orcamento):
return DescontoPorCincoItens(SemDesconto()).calcula(orcamento) | none | 1 | 2.475264 | 2 |
|
conftest.py | githubmata/projects | 0 | 6631056 | # -*- coding: utf8 -*-
import pytest
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException, TimeoutException
pytest.app = "https://www.disney.com/"
# pytest.data = { "firstName": "tester", "lastName": "testing", "email address":"<EMAIL>"
load_timeout = 60
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome", help="Browser for test")
@pytest.fixture(scope='session')
def browser(request):
return request.config.getoption("--browser")
@pytest.fixture
def driver(request, browser):
if "chrome" in browser.lower():
caps = webdriver.DesiredCapabilities.CHROME.copy()
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--ignore-certificate-errors')
caps['acceptInsecureCerts'] = True
b = webdriver.Chrome(desired_capabilities = caps, options=chrome_options)
request.addfinalizer(lambda *args: b.quit())
return b
@pytest.fixture()
def selenium(request, driver):
try:
driver.set_page_load_timeout(load_timeout)
driver.implicitly_wait(5)
driver.get(pytest.app)
except TimeoutException:
print ("browser load timeout")
return driver | # -*- coding: utf8 -*-
import pytest
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException, TimeoutException
pytest.app = "https://www.disney.com/"
# pytest.data = { "firstName": "tester", "lastName": "testing", "email address":"<EMAIL>"
load_timeout = 60
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome", help="Browser for test")
@pytest.fixture(scope='session')
def browser(request):
return request.config.getoption("--browser")
@pytest.fixture
def driver(request, browser):
if "chrome" in browser.lower():
caps = webdriver.DesiredCapabilities.CHROME.copy()
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--ignore-certificate-errors')
caps['acceptInsecureCerts'] = True
b = webdriver.Chrome(desired_capabilities = caps, options=chrome_options)
request.addfinalizer(lambda *args: b.quit())
return b
@pytest.fixture()
def selenium(request, driver):
try:
driver.set_page_load_timeout(load_timeout)
driver.implicitly_wait(5)
driver.get(pytest.app)
except TimeoutException:
print ("browser load timeout")
return driver | en | 0.443217 | # -*- coding: utf8 -*- # pytest.data = { "firstName": "tester", "lastName": "testing", "email address":"<EMAIL>" | 2.731041 | 3 |
tests/fields/test_json_field.py | mdanse/wtforms-components | 54 | 6631057 | <reponame>mdanse/wtforms-components
from tests import MultiDict, SimpleFieldTestCase
from wtforms_components import JSONField
class TestJSONField(SimpleFieldTestCase):
field_class = JSONField
def setup_method(self, method):
self.valid_jsons = [
'{"a": {"b": true, "c": "lv", "d": 3}, "e": {"f": {"g": [85]}}}'
]
self.invalid_jsons = [
'{"a": {"b": bzz, "c": "lv", "d": 3}, "e": {"f": {"g": [85]}}}'
]
def test_valid_times(self):
form_class = self.init_form()
for time_ in self.valid_jsons:
form = form_class(MultiDict(test_field=time_))
form.validate()
assert len(form.errors) == 0
def test_invalid_times(self):
form_class = self.init_form()
for time_ in self.invalid_jsons:
form = form_class(MultiDict(test_field=time_))
form.validate()
assert len(form.errors['test_field']) == 1
| from tests import MultiDict, SimpleFieldTestCase
from wtforms_components import JSONField
class TestJSONField(SimpleFieldTestCase):
field_class = JSONField
def setup_method(self, method):
self.valid_jsons = [
'{"a": {"b": true, "c": "lv", "d": 3}, "e": {"f": {"g": [85]}}}'
]
self.invalid_jsons = [
'{"a": {"b": bzz, "c": "lv", "d": 3}, "e": {"f": {"g": [85]}}}'
]
def test_valid_times(self):
form_class = self.init_form()
for time_ in self.valid_jsons:
form = form_class(MultiDict(test_field=time_))
form.validate()
assert len(form.errors) == 0
def test_invalid_times(self):
form_class = self.init_form()
for time_ in self.invalid_jsons:
form = form_class(MultiDict(test_field=time_))
form.validate()
assert len(form.errors['test_field']) == 1 | none | 1 | 2.963247 | 3 |
|
autokeras/utils.py | MustafaKadioglu/autokeras | 1 | 6631058 | import csv
import os
import pickle
import sys
import tempfile
import zipfile
import warnings
import imageio
import numpy as np
import requests
import torch
import subprocess
import string
import random
from autokeras.constant import Constant
from scipy.ndimage import zoom
class NoImprovementError(Exception):
def __init__(self, message):
self.message = message
def ensure_dir(directory):
"""Create directory if it does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def ensure_file_dir(path):
"""Create path if it does not exist."""
ensure_dir(os.path.dirname(path))
def has_file(path):
"""Check if the given path exists."""
return os.path.exists(path)
def pickle_from_file(path):
"""Load the pickle file from the provided path and returns the object."""
return pickle.load(open(path, 'rb'))
def pickle_to_file(obj, path):
"""Save the pickle file to the specified path."""
pickle.dump(obj, open(path, 'wb'))
# TODO cannot detect nvidia-smi in Windows normally. We need a fall back for windows
def get_device():
""" If CUDA is available, use CUDA device, else use CPU device.
When choosing from CUDA devices, this function will choose the one with max memory available.
Returns: string device name.
"""
# TODO: could use gputil in the future
device = 'cpu'
if torch.cuda.is_available():
try:
# smi_out=
# Free : xxxxxx MiB
# Free : xxxxxx MiB
# ....
smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)
if isinstance(smi_out, bytes):
smi_out = smi_out.decode('utf-8')
except subprocess.SubprocessError:
warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')
return 'cpu'
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')
if len(visible_devices) == 1 and visible_devices[0] == '':
visible_devices = []
visible_devices = [int(x) for x in visible_devices]
memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
for cuda_index, _ in enumerate(memory_available):
if cuda_index not in visible_devices and visible_devices:
memory_available[cuda_index] = 0
if memory_available:
if max(memory_available) != 0:
device = 'cuda:' + str(memory_available.index(max(memory_available)))
return device
def temp_path_generator():
sys_temp = tempfile.gettempdir()
path = os.path.join(sys_temp, 'autokeras')
return path
def rand_temp_folder_generator():
"""Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
chars = string.ascii_uppercase + string.digits
size = 6
random_suffix = ''.join(random.choice(chars) for _ in range(size))
sys_temp = temp_path_generator()
path = sys_temp + '_' + random_suffix
ensure_dir(path)
return path
def download_file(file_link, file_path):
"""Download the file specified in `file_link` and saves it in `file_path`."""
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
print("Downloading %s" % file_path)
response = requests.get(file_link, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
def download_file_with_extract(file_link, file_path, extract_path):
"""Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`."""
if not os.path.exists(extract_path):
download_file(file_link, file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
print("extracting downloaded file...")
zip_ref.extractall(extract_path)
os.remove(file_path)
print("extracted and removed downloaded zip file")
print("file already extracted in the path %s" % extract_path)
def verbose_print(new_father_id, new_graph, new_model_id):
"""Print information about the operation performed on father model to obtain current model and father's id."""
cell_size = [24, 49]
print('New Model Id', new_model_id)
header = ['Father Model ID', 'Added Operation']
line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))
print('\n' + '+' + '-' * len(line) + '+')
print('|' + line + '|')
print('+' + '-' * len(line) + '+')
for i in range(len(new_graph.operation_history)):
if i == len(new_graph.operation_history) // 2:
r = [new_father_id, ' '.join(str(item) for item in new_graph.operation_history[i])]
else:
r = [' ', ' '.join(str(item) for item in new_graph.operation_history[i])]
line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))
print('|' + line + '|')
print('+' + '-' * len(line) + '+')
def validate_xy(x_train, y_train):
"""Validate `x_train`'s type and the shape of `x_train`, `y_train`."""
try:
x_train = x_train.astype('float64')
except ValueError:
raise ValueError('x_train should only contain numerical data.')
if len(x_train.shape) < 2:
raise ValueError('x_train should at least has 2 dimensions.')
if x_train.shape[0] != y_train.shape[0]:
raise ValueError('x_train and y_train should have the same number of instances.')
def read_csv_file(csv_file_path):
"""Read the csv file and returns two separate list containing file names and their labels.
Args:
csv_file_path: Path to the CSV file.
Returns:
file_names: List containing files names.
file_label: List containing their respective labels.
"""
file_names = []
file_labels = []
with open(csv_file_path, 'r') as files_path:
path_list = csv.DictReader(files_path)
fieldnames = path_list.fieldnames
for path in path_list:
file_names.append(path[fieldnames[0]])
file_labels.append(path[fieldnames[1]])
return file_names, file_labels
def read_image(img_path):
"""Read the image contained in the provided path `image_path`."""
img = imageio.imread(uri=img_path)
return img
def compute_image_resize_params(data):
"""Compute median dimension of all images in data.
It used to resize the images later. Number of channels do not change from the original data.
Args:
data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration.
Returns:
median shape.
"""
if data is None or len(data.shape) == 0:
return []
if len(data.shape) == len(data[0].shape) + 1 and np.prod(data[0].shape[:-1]) <= Constant.MAX_IMAGE_SIZE:
return data[0].shape
data_shapes = []
for x in data:
data_shapes.append(x.shape)
median_shape = np.median(np.array(data_shapes), axis=0)
median_size = np.prod(median_shape[:-1])
if median_size > Constant.MAX_IMAGE_SIZE:
reduction_factor = np.power(Constant.MAX_IMAGE_SIZE / median_size, 1 / (len(median_shape) - 1))
median_shape[:-1] = median_shape[:-1] * reduction_factor
return median_shape.astype(int)
def resize_image_data(data, resize_shape):
"""Resize images to given dimension.
Args:
data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration.
resize_shape: Image resize dimension.
Returns:
data: Reshaped data.
"""
if data is None or len(resize_shape) == 0:
return data
if len(data.shape) > 1 and np.array_equal(data[0].shape, resize_shape):
return data
output_data = []
for im in data:
output_data.append(zoom(input=im, zoom=np.divide(resize_shape, im.shape)))
return np.array(output_data)
def get_system():
"""Get the current system environment. If the current system is not supported, raise an exception.
Returns:
A string to represent the current OS name.
"posix" stands for Linux, Mac or Solaris architecture.
"nt" stands for Windows system.
"""
if 'google.colab' in sys.modules:
return Constant.SYS_GOOGLE_COLAB
if os.name == 'posix':
return Constant.SYS_LINUX
if os.name == 'nt':
return Constant.SYS_WINDOWS
raise EnvironmentError('Unsupported environment')
| import csv
import os
import pickle
import sys
import tempfile
import zipfile
import warnings
import imageio
import numpy as np
import requests
import torch
import subprocess
import string
import random
from autokeras.constant import Constant
from scipy.ndimage import zoom
class NoImprovementError(Exception):
def __init__(self, message):
self.message = message
def ensure_dir(directory):
"""Create directory if it does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def ensure_file_dir(path):
"""Create path if it does not exist."""
ensure_dir(os.path.dirname(path))
def has_file(path):
"""Check if the given path exists."""
return os.path.exists(path)
def pickle_from_file(path):
"""Load the pickle file from the provided path and returns the object."""
return pickle.load(open(path, 'rb'))
def pickle_to_file(obj, path):
"""Save the pickle file to the specified path."""
pickle.dump(obj, open(path, 'wb'))
# TODO cannot detect nvidia-smi in Windows normally. We need a fall back for windows
def get_device():
""" If CUDA is available, use CUDA device, else use CPU device.
When choosing from CUDA devices, this function will choose the one with max memory available.
Returns: string device name.
"""
# TODO: could use gputil in the future
device = 'cpu'
if torch.cuda.is_available():
try:
# smi_out=
# Free : xxxxxx MiB
# Free : xxxxxx MiB
# ....
smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)
if isinstance(smi_out, bytes):
smi_out = smi_out.decode('utf-8')
except subprocess.SubprocessError:
warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')
return 'cpu'
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')
if len(visible_devices) == 1 and visible_devices[0] == '':
visible_devices = []
visible_devices = [int(x) for x in visible_devices]
memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
for cuda_index, _ in enumerate(memory_available):
if cuda_index not in visible_devices and visible_devices:
memory_available[cuda_index] = 0
if memory_available:
if max(memory_available) != 0:
device = 'cuda:' + str(memory_available.index(max(memory_available)))
return device
def temp_path_generator():
sys_temp = tempfile.gettempdir()
path = os.path.join(sys_temp, 'autokeras')
return path
def rand_temp_folder_generator():
"""Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
chars = string.ascii_uppercase + string.digits
size = 6
random_suffix = ''.join(random.choice(chars) for _ in range(size))
sys_temp = temp_path_generator()
path = sys_temp + '_' + random_suffix
ensure_dir(path)
return path
def download_file(file_link, file_path):
"""Download the file specified in `file_link` and saves it in `file_path`."""
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
print("Downloading %s" % file_path)
response = requests.get(file_link, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
def download_file_with_extract(file_link, file_path, extract_path):
"""Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`."""
if not os.path.exists(extract_path):
download_file(file_link, file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
print("extracting downloaded file...")
zip_ref.extractall(extract_path)
os.remove(file_path)
print("extracted and removed downloaded zip file")
print("file already extracted in the path %s" % extract_path)
def verbose_print(new_father_id, new_graph, new_model_id):
"""Print information about the operation performed on father model to obtain current model and father's id."""
cell_size = [24, 49]
print('New Model Id', new_model_id)
header = ['Father Model ID', 'Added Operation']
line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))
print('\n' + '+' + '-' * len(line) + '+')
print('|' + line + '|')
print('+' + '-' * len(line) + '+')
for i in range(len(new_graph.operation_history)):
if i == len(new_graph.operation_history) // 2:
r = [new_father_id, ' '.join(str(item) for item in new_graph.operation_history[i])]
else:
r = [' ', ' '.join(str(item) for item in new_graph.operation_history[i])]
line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))
print('|' + line + '|')
print('+' + '-' * len(line) + '+')
def validate_xy(x_train, y_train):
"""Validate `x_train`'s type and the shape of `x_train`, `y_train`."""
try:
x_train = x_train.astype('float64')
except ValueError:
raise ValueError('x_train should only contain numerical data.')
if len(x_train.shape) < 2:
raise ValueError('x_train should at least has 2 dimensions.')
if x_train.shape[0] != y_train.shape[0]:
raise ValueError('x_train and y_train should have the same number of instances.')
def read_csv_file(csv_file_path):
"""Read the csv file and returns two separate list containing file names and their labels.
Args:
csv_file_path: Path to the CSV file.
Returns:
file_names: List containing files names.
file_label: List containing their respective labels.
"""
file_names = []
file_labels = []
with open(csv_file_path, 'r') as files_path:
path_list = csv.DictReader(files_path)
fieldnames = path_list.fieldnames
for path in path_list:
file_names.append(path[fieldnames[0]])
file_labels.append(path[fieldnames[1]])
return file_names, file_labels
def read_image(img_path):
"""Read the image contained in the provided path `image_path`."""
img = imageio.imread(uri=img_path)
return img
def compute_image_resize_params(data):
"""Compute median dimension of all images in data.
It used to resize the images later. Number of channels do not change from the original data.
Args:
data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration.
Returns:
median shape.
"""
if data is None or len(data.shape) == 0:
return []
if len(data.shape) == len(data[0].shape) + 1 and np.prod(data[0].shape[:-1]) <= Constant.MAX_IMAGE_SIZE:
return data[0].shape
data_shapes = []
for x in data:
data_shapes.append(x.shape)
median_shape = np.median(np.array(data_shapes), axis=0)
median_size = np.prod(median_shape[:-1])
if median_size > Constant.MAX_IMAGE_SIZE:
reduction_factor = np.power(Constant.MAX_IMAGE_SIZE / median_size, 1 / (len(median_shape) - 1))
median_shape[:-1] = median_shape[:-1] * reduction_factor
return median_shape.astype(int)
def resize_image_data(data, resize_shape):
"""Resize images to given dimension.
Args:
data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration.
resize_shape: Image resize dimension.
Returns:
data: Reshaped data.
"""
if data is None or len(resize_shape) == 0:
return data
if len(data.shape) > 1 and np.array_equal(data[0].shape, resize_shape):
return data
output_data = []
for im in data:
output_data.append(zoom(input=im, zoom=np.divide(resize_shape, im.shape)))
return np.array(output_data)
def get_system():
"""Get the current system environment. If the current system is not supported, raise an exception.
Returns:
A string to represent the current OS name.
"posix" stands for Linux, Mac or Solaris architecture.
"nt" stands for Windows system.
"""
if 'google.colab' in sys.modules:
return Constant.SYS_GOOGLE_COLAB
if os.name == 'posix':
return Constant.SYS_LINUX
if os.name == 'nt':
return Constant.SYS_WINDOWS
raise EnvironmentError('Unsupported environment')
| en | 0.770993 | Create directory if it does not exist. Create path if it does not exist. Check if the given path exists. Load the pickle file from the provided path and returns the object. Save the pickle file to the specified path. # TODO cannot detect nvidia-smi in Windows normally. We need a fall back for windows If CUDA is available, use CUDA device, else use CPU device. When choosing from CUDA devices, this function will choose the one with max memory available. Returns: string device name. # TODO: could use gputil in the future # smi_out= # Free : xxxxxx MiB # Free : xxxxxx MiB # .... Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras). Download the file specified in `file_link` and saves it in `file_path`. # no content length header Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`. Print information about the operation performed on father model to obtain current model and father's id. Validate `x_train`'s type and the shape of `x_train`, `y_train`. Read the csv file and returns two separate list containing file names and their labels. Args: csv_file_path: Path to the CSV file. Returns: file_names: List containing files names. file_label: List containing their respective labels. Read the image contained in the provided path `image_path`. Compute median dimension of all images in data. It used to resize the images later. Number of channels do not change from the original data. Args: data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration. Returns: median shape. Resize images to given dimension. Args: data: 1-D, 2-D or 3-D images. The Images are expected to have channel last configuration. resize_shape: Image resize dimension. Returns: data: Reshaped data. Get the current system environment. If the current system is not supported, raise an exception. Returns: A string to represent the current OS name. "posix" stands for Linux, Mac or Solaris architecture. "nt" stands for Windows system. | 2.259336 | 2 |
qurator/dinglehopper/align.py | JKamlah/dinglehopper | 0 | 6631059 | from .edit_distance import *
def align(t1, t2):
"""Align text."""
s1 = list(grapheme_clusters(unicodedata.normalize('NFC', t1)))
s2 = list(grapheme_clusters(unicodedata.normalize('NFC', t2)))
return seq_align(s1, s2)
def seq_align(s1, s2):
"""Align general sequences."""
s1 = list(s1)
s2 = list(s2)
ops = seq_editops(s1, s2)
i = 0
j = 0
while i < len(s1) or j < len(s2):
o = None
try:
ot = ops[0]
if ot[1] == i and ot[2] == j:
ops = ops[1:]
o = ot
except IndexError:
pass
if o:
if o[0] == 'insert':
yield (None, s2[j])
j += 1
elif o[0] == 'delete':
yield (s1[i], None)
i += 1
elif o[0] == 'replace':
yield (s1[i], s2[j])
i += 1
j += 1
else:
yield (s1[i], s2[j])
i += 1
j += 1
| from .edit_distance import *
def align(t1, t2):
"""Align text."""
s1 = list(grapheme_clusters(unicodedata.normalize('NFC', t1)))
s2 = list(grapheme_clusters(unicodedata.normalize('NFC', t2)))
return seq_align(s1, s2)
def seq_align(s1, s2):
"""Align general sequences."""
s1 = list(s1)
s2 = list(s2)
ops = seq_editops(s1, s2)
i = 0
j = 0
while i < len(s1) or j < len(s2):
o = None
try:
ot = ops[0]
if ot[1] == i and ot[2] == j:
ops = ops[1:]
o = ot
except IndexError:
pass
if o:
if o[0] == 'insert':
yield (None, s2[j])
j += 1
elif o[0] == 'delete':
yield (s1[i], None)
i += 1
elif o[0] == 'replace':
yield (s1[i], s2[j])
i += 1
j += 1
else:
yield (s1[i], s2[j])
i += 1
j += 1
| en | 0.394181 | Align text. Align general sequences. | 2.767178 | 3 |
addons/point_of_sale/models/pos_order.py | SHIVJITH/Odoo_Machine_Test | 0 | 6631060 | <reponame>SHIVJITH/Odoo_Machine_Test
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from datetime import timedelta
from functools import partial
import psycopg2
import pytz
import re
from odoo import api, fields, models, tools, _
from odoo.tools import float_is_zero, float_round
from odoo.exceptions import ValidationError, UserError
from odoo.http import request
from odoo.osv.expression import AND
import base64
_logger = logging.getLogger(__name__)
class PosOrder(models.Model):
_name = "pos.order"
_description = "Point of Sale Orders"
_order = "date_order desc, name desc, id desc"
@api.model
def _amount_line_tax(self, line, fiscal_position_id):
taxes = line.tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id)
taxes = fiscal_position_id.map_tax(taxes, line.product_id, line.order_id.partner_id)
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = taxes.compute_all(price, line.order_id.pricelist_id.currency_id, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
return sum(tax.get('amount', 0.0) for tax in taxes)
@api.model
def _order_fields(self, ui_order):
process_line = partial(self.env['pos.order.line']._order_line_fields, session_id=ui_order['pos_session_id'])
return {
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': [process_line(l) for l in ui_order['lines']] if ui_order['lines'] else False,
'pos_reference': ui_order['name'],
'sequence_number': ui_order['sequence_number'],
'partner_id': ui_order['partner_id'] or False,
'date_order': ui_order['creation_date'].replace('T', ' ')[:19],
'fiscal_position_id': ui_order['fiscal_position_id'],
'pricelist_id': ui_order['pricelist_id'],
'amount_paid': ui_order['amount_paid'],
'amount_total': ui_order['amount_total'],
'amount_tax': ui_order['amount_tax'],
'amount_return': ui_order['amount_return'],
'company_id': self.env['pos.session'].browse(ui_order['pos_session_id']).company_id.id,
'to_invoice': ui_order['to_invoice'] if "to_invoice" in ui_order else False,
'is_tipped': ui_order.get('is_tipped', False),
'tip_amount': ui_order.get('tip_amount', 0),
}
@api.model
def _payment_fields(self, order, ui_paymentline):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'payment_method_id': ui_paymentline['payment_method_id'],
'card_type': ui_paymentline.get('card_type'),
'cardholder_name': ui_paymentline.get('cardholder_name'),
'transaction_id': ui_paymentline.get('transaction_id'),
'payment_status': ui_paymentline.get('payment_status'),
'ticket': ui_paymentline.get('ticket'),
'pos_order_id': order.id,
}
# This deals with orders that belong to a closed session. In order
# to recover from this situation we create a new rescue session,
# making it obvious that something went wrong.
# A new, separate, rescue session is preferred for every such recovery,
# to avoid adding unrelated orders to live sessions.
def _get_valid_session(self, order):
PosSession = self.env['pos.session']
closed_session = PosSession.browse(order['pos_session_id'])
_logger.warning('session %s (ID: %s) was closed but received order %s (total: %s) belonging to it',
closed_session.name,
closed_session.id,
order['name'],
order['amount_total'])
rescue_session = PosSession.search([
('state', 'not in', ('closed', 'closing_control')),
('rescue', '=', True),
('config_id', '=', closed_session.config_id.id),
], limit=1)
if rescue_session:
_logger.warning('reusing recovery session %s for saving order %s', rescue_session.name, order['name'])
return rescue_session
_logger.warning('attempting to create recovery session for saving order %s', order['name'])
new_session = PosSession.create({
'config_id': closed_session.config_id.id,
'name': _('(RESCUE FOR %(session)s)') % {'session': closed_session.name},
'rescue': True, # avoid conflict with live sessions
})
# bypass opening_control (necessary when using cash control)
new_session.action_pos_session_open()
return new_session
@api.model
def _process_order(self, order, draft, existing_order):
"""Create or update an pos.order from a given dictionary.
:param dict order: dictionary representing the order.
:param bool draft: Indicate that the pos_order is not validated yet.
:param existing_order: order to be updated or False.
:type existing_order: pos.order.
:returns: id of created/updated pos.order
:rtype: int
"""
order = order['data']
pos_session = self.env['pos.session'].browse(order['pos_session_id'])
if pos_session.state == 'closing_control' or pos_session.state == 'closed':
order['pos_session_id'] = self._get_valid_session(order).id
pos_order = False
if not existing_order:
pos_order = self.create(self._order_fields(order))
else:
pos_order = existing_order
pos_order.lines.unlink()
order['user_id'] = pos_order.user_id.id
pos_order.write(self._order_fields(order))
pos_order = pos_order.with_company(pos_order.company_id)
self = self.with_company(pos_order.company_id)
self._process_payment_lines(order, pos_order, pos_session, draft)
if not draft:
try:
pos_order.action_pos_order_paid()
except psycopg2.DatabaseError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
pos_order._create_order_picking()
if pos_order.to_invoice and pos_order.state == 'paid':
pos_order.action_pos_order_invoice()
return pos_order.id
def _process_payment_lines(self, pos_order, order, pos_session, draft):
"""Create account.bank.statement.lines from the dictionary given to the parent function.
If the payment_line is an updated version of an existing one, the existing payment_line will first be
removed before making a new one.
:param pos_order: dictionary representing the order.
:type pos_order: dict.
:param order: Order object the payment lines should belong to.
:type order: pos.order
:param pos_session: PoS session the order was created in.
:type pos_session: pos.session
:param draft: Indicate that the pos_order is not validated yet.
:type draft: bool.
"""
prec_acc = order.pricelist_id.currency_id.decimal_places
order_bank_statement_lines= self.env['pos.payment'].search([('pos_order_id', '=', order.id)])
order_bank_statement_lines.unlink()
for payments in pos_order['statement_ids']:
order.add_payment(self._payment_fields(order, payments[2]))
order.amount_paid = sum(order.payment_ids.mapped('amount'))
if not draft and not float_is_zero(pos_order['amount_return'], prec_acc):
cash_payment_method = pos_session.payment_method_ids.filtered('is_cash_count')[:1]
if not cash_payment_method:
raise UserError(_("No cash statement found for this session. Unable to record returned cash."))
return_payment_vals = {
'name': _('return'),
'pos_order_id': order.id,
'amount': -pos_order['amount_return'],
'payment_date': fields.Datetime.now(),
'payment_method_id': cash_payment_method.id,
'is_change': True,
}
order.add_payment(return_payment_vals)
def _prepare_invoice_line(self, order_line):
return {
'product_id': order_line.product_id.id,
'quantity': order_line.qty if self.amount_total >= 0 else -order_line.qty,
'discount': order_line.discount,
'price_unit': order_line.price_unit,
'name': order_line.product_id.display_name,
'tax_ids': [(6, 0, order_line.tax_ids_after_fiscal_position.ids)],
'product_uom_id': order_line.product_uom_id.id,
}
def _get_pos_anglo_saxon_price_unit(self, product, partner_id, quantity):
moves = self.filtered(lambda o: o.partner_id.id == partner_id)\
.mapped('picking_ids.move_lines')\
._filter_anglo_saxon_moves(product)\
.sorted(lambda x: x.date)
price_unit = product._compute_average_price(0, quantity, moves)
return price_unit
name = fields.Char(string='Order Ref', required=True, readonly=True, copy=False, default='/')
date_order = fields.Datetime(string='Date', readonly=True, index=True, default=fields.Datetime.now)
user_id = fields.Many2one(
comodel_name='res.users', string='Responsible',
help="Person who uses the cash register. It can be a reliever, a student or an interim employee.",
default=lambda self: self.env.uid,
states={'done': [('readonly', True)], 'invoiced': [('readonly', True)]},
)
amount_tax = fields.Float(string='Taxes', digits=0, readonly=True, required=True)
amount_total = fields.Float(string='Total', digits=0, readonly=True, required=True)
amount_paid = fields.Float(string='Paid', states={'draft': [('readonly', False)]},
readonly=True, digits=0, required=True)
amount_return = fields.Float(string='Returned', digits=0, required=True, readonly=True)
lines = fields.One2many('pos.order.line', 'order_id', string='Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True)
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, states={
'draft': [('readonly', False)]}, readonly=True)
partner_id = fields.Many2one('res.partner', string='Customer', change_default=True, index=True, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]})
sequence_number = fields.Integer(string='Sequence Number', help='A session-unique sequence number for the order', default=1)
session_id = fields.Many2one(
'pos.session', string='Session', required=True, index=True,
domain="[('state', '=', 'opened')]", states={'draft': [('readonly', False)]},
readonly=True)
config_id = fields.Many2one('pos.config', related='session_id.config_id', string="Point of Sale", readonly=False)
currency_id = fields.Many2one('res.currency', related='config_id.currency_id', string="Currency")
currency_rate = fields.Float("Currency Rate", compute='_compute_currency_rate', compute_sudo=True, store=True, digits=0, readonly=True,
help='The rate of the currency to the currency of rate applicable at the date of the order')
invoice_group = fields.Boolean(related="config_id.module_account", readonly=False)
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False, default='draft')
account_move = fields.Many2one('account.move', string='Invoice', readonly=True, copy=False)
picking_ids = fields.One2many('stock.picking', 'pos_order_id')
picking_count = fields.Integer(compute='_compute_picking_count')
failed_pickings = fields.Boolean(compute='_compute_picking_count')
picking_type_id = fields.Many2one('stock.picking.type', related='session_id.config_id.picking_type_id', string="Operation Type", readonly=False)
note = fields.Text(string='Internal Notes')
nb_print = fields.Integer(string='Number of Print', readonly=True, copy=False, default=0)
pos_reference = fields.Char(string='Receipt Number', readonly=True, copy=False)
sale_journal = fields.Many2one('account.journal', related='session_id.config_id.journal_id', string='Sales Journal', store=True, readonly=True, ondelete='restrict')
fiscal_position_id = fields.Many2one(
comodel_name='account.fiscal.position', string='Fiscal Position',
readonly=True,
states={'draft': [('readonly', False)]},
)
payment_ids = fields.One2many('pos.payment', 'pos_order_id', string='Payments', readonly=True)
session_move_id = fields.Many2one('account.move', string='Session Journal Entry', related='session_id.move_id', readonly=True, copy=False)
to_invoice = fields.Boolean('To invoice')
is_invoiced = fields.Boolean('Is Invoiced', compute='_compute_is_invoiced')
is_tipped = fields.Boolean('Is this already tipped?', readonly=True)
tip_amount = fields.Float(string='Tip Amount', digits=0, readonly=True)
@api.depends('account_move')
def _compute_is_invoiced(self):
for order in self:
order.is_invoiced = bool(order.account_move)
@api.depends('picking_ids', 'picking_ids.state')
def _compute_picking_count(self):
for order in self:
order.picking_count = len(order.picking_ids)
order.failed_pickings = bool(order.picking_ids.filtered(lambda p: p.state != 'done'))
@api.depends('date_order', 'company_id', 'currency_id', 'company_id.currency_id')
def _compute_currency_rate(self):
for order in self:
order.currency_rate = self.env['res.currency']._get_conversion_rate(order.company_id.currency_id, order.currency_id, order.company_id, order.date_order)
@api.onchange('payment_ids', 'lines')
def _onchange_amount_all(self):
for order in self:
currency = order.pricelist_id.currency_id
order.amount_paid = sum(payment.amount for payment in order.payment_ids)
order.amount_return = sum(payment.amount < 0 and payment.amount or 0 for payment in order.payment_ids)
order.amount_tax = currency.round(sum(self._amount_line_tax(line, order.fiscal_position_id) for line in order.lines))
amount_untaxed = currency.round(sum(line.price_subtotal for line in order.lines))
order.amount_total = order.amount_tax + amount_untaxed
def _compute_batch_amount_all(self):
"""
Does essentially the same thing as `_onchange_amount_all` but only for actually existing records
It is intended as a helper method , not as a business one
Practical to be used for migrations
"""
amounts = {order_id: {'paid': 0, 'return': 0, 'taxed': 0, 'taxes': 0} for order_id in self.ids}
for order in self.env['pos.payment'].read_group([('pos_order_id', 'in', self.ids)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['paid'] = order['amount']
for order in self.env['pos.payment'].read_group(['&', ('pos_order_id', 'in', self.ids), ('amount', '<', 0)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['return'] = order['amount']
for order in self.env['pos.order.line'].read_group([('order_id', 'in', self.ids)], ['order_id', 'price_subtotal', 'price_subtotal_incl'], ['order_id']):
amounts[order['order_id'][0]]['taxed'] = order['price_subtotal_incl']
amounts[order['order_id'][0]]['taxes'] = order['price_subtotal_incl'] - order['price_subtotal']
for order in self:
currency = order.pricelist_id.currency_id
order.write({
'amount_paid': amounts[order.id]['paid'],
'amount_return': amounts[order.id]['return'],
'amount_tax': currency.round(amounts[order.id]['taxes']),
'amount_total': currency.round(amounts[order.id]['taxed'])
})
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
self.pricelist_id = self.partner_id.property_product_pricelist.id
def unlink(self):
for pos_order in self.filtered(lambda pos_order: pos_order.state not in ['draft', 'cancel']):
raise UserError(_('In order to delete a sale, it must be new or cancelled.'))
return super(PosOrder, self).unlink()
@api.model
def create(self, values):
session = self.env['pos.session'].browse(values['session_id'])
values = self._complete_values_from_session(session, values)
return super(PosOrder, self).create(values)
@api.model
def _complete_values_from_session(self, session, values):
if values.get('state') and values['state'] == 'paid':
values['name'] = session.config_id.sequence_id._next()
values.setdefault('pricelist_id', session.config_id.pricelist_id.id)
values.setdefault('fiscal_position_id', session.config_id.default_fiscal_position_id.id)
values.setdefault('company_id', session.config_id.company_id.id)
return values
def write(self, vals):
for order in self:
if vals.get('state') and vals['state'] == 'paid' and order.name == '/':
vals['name'] = order.config_id.sequence_id._next()
return super(PosOrder, self).write(vals)
def action_stock_picking(self):
self.ensure_one()
action = self.env['ir.actions.act_window']._for_xml_id('stock.action_picking_tree_ready')
action['context'] = {}
action['domain'] = [('id', 'in', self.picking_ids.ids)]
return action
def action_view_invoice(self):
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'res_id': self.account_move.id,
}
def _is_pos_order_paid(self):
return float_is_zero(self._get_rounded_amount(self.amount_total) - self.amount_paid, precision_rounding=self.currency_id.rounding)
def _get_rounded_amount(self, amount):
if self.config_id.cash_rounding:
amount = float_round(amount, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
currency = self.currency_id
return currency.round(amount) if currency else amount
def _create_invoice(self, move_vals):
self.ensure_one()
new_move = self.env['account.move'].sudo().with_company(self.company_id).with_context(default_move_type=move_vals['move_type']).create(move_vals)
message = _("This invoice has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>") % (self.id, self.name)
new_move.message_post(body=message)
if self.config_id.cash_rounding:
rounding_applied = float_round(self.amount_paid - self.amount_total,
precision_rounding=new_move.currency_id.rounding)
rounding_line = new_move.line_ids.filtered(lambda line: line.is_rounding_line)
if rounding_line and rounding_line.debit > 0:
rounding_line_difference = rounding_line.debit + rounding_applied
elif rounding_line and rounding_line.credit > 0:
rounding_line_difference = -rounding_line.credit + rounding_applied
else:
rounding_line_difference = rounding_applied
if rounding_applied:
if rounding_applied > 0.0:
account_id = new_move.invoice_cash_rounding_id.loss_account_id.id
else:
account_id = new_move.invoice_cash_rounding_id.profit_account_id.id
if rounding_line:
if rounding_line_difference:
rounding_line.with_context(check_move_validity=False).write({
'debit': rounding_applied < 0.0 and -rounding_applied or 0.0,
'credit': rounding_applied > 0.0 and rounding_applied or 0.0,
'account_id': account_id,
'price_unit': rounding_applied,
})
else:
self.env['account.move.line'].with_context(check_move_validity=False).create({
'debit': rounding_applied < 0.0 and -rounding_applied or 0.0,
'credit': rounding_applied > 0.0 and rounding_applied or 0.0,
'quantity': 1.0,
'amount_currency': rounding_applied,
'partner_id': new_move.partner_id.id,
'move_id': new_move.id,
'currency_id': new_move.currency_id if new_move.currency_id != new_move.company_id.currency_id else False,
'company_id': new_move.company_id.id,
'company_currency_id': new_move.company_id.currency_id.id,
'is_rounding_line': True,
'sequence': 9999,
'name': new_move.invoice_cash_rounding_id.name,
'account_id': account_id,
})
else:
if rounding_line:
rounding_line.with_context(check_move_validity=False).unlink()
if rounding_line_difference:
existing_terms_line = new_move.line_ids.filtered(
lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
if existing_terms_line.debit > 0:
existing_terms_line_new_val = float_round(
existing_terms_line.debit + rounding_line_difference,
precision_rounding=new_move.currency_id.rounding)
else:
existing_terms_line_new_val = float_round(
-existing_terms_line.credit + rounding_line_difference,
precision_rounding=new_move.currency_id.rounding)
existing_terms_line.write({
'debit': existing_terms_line_new_val > 0.0 and existing_terms_line_new_val or 0.0,
'credit': existing_terms_line_new_val < 0.0 and -existing_terms_line_new_val or 0.0,
})
new_move._recompute_payment_terms_lines()
return new_move
def action_pos_order_paid(self):
self.ensure_one()
# TODO: add support for mix of cash and non-cash payments when both cash_rounding and only_round_cash_method are True
if not self.config_id.cash_rounding \
or self.config_id.only_round_cash_method \
and not any(p.payment_method_id.is_cash_count for p in self.payment_ids):
total = self.amount_total
else:
total = float_round(self.amount_total, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
isPaid = float_is_zero(total - self.amount_paid, precision_rounding=self.currency_id.rounding)
if not isPaid and not self.config_id.cash_rounding:
raise UserError(_("Order %s is not fully paid.", self.name))
elif not isPaid and self.config_id.cash_rounding:
currency = self.currency_id
if self.config_id.rounding_method.rounding_method == "HALF-UP":
maxDiff = currency.round(self.config_id.rounding_method.rounding / 2)
else:
maxDiff = currency.round(self.config_id.rounding_method.rounding)
diff = currency.round(self.amount_total - self.amount_paid)
if not abs(diff) < maxDiff:
raise UserError(_("Order %s is not fully paid.", self.name))
self.write({'state': 'paid'})
return True
def _prepare_invoice_vals(self):
self.ensure_one()
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
vals = {
'payment_reference': self.name,
'invoice_origin': self.name,
'journal_id': self.session_id.config_id.invoice_journal_id.id,
'move_type': 'out_invoice' if self.amount_total >= 0 else 'out_refund',
'ref': self.name,
'partner_id': self.partner_id.id,
'narration': self.note or '',
# considering partner's sale pricelist's currency
'currency_id': self.pricelist_id.currency_id.id,
'invoice_user_id': self.user_id.id,
'invoice_date': self.date_order.astimezone(timezone).date(),
'fiscal_position_id': self.fiscal_position_id.id,
'invoice_line_ids': [(0, None, self._prepare_invoice_line(line)) for line in self.lines],
'invoice_cash_rounding_id': self.config_id.rounding_method.id
if self.config_id.cash_rounding and (not self.config_id.only_round_cash_method or any(p.payment_method_id.is_cash_count for p in self.payment_ids))
else False
}
return vals
def action_pos_order_invoice(self):
moves = self.env['account.move']
for order in self:
# Force company for all SUPERUSER_ID action
if order.account_move:
moves += order.account_move
continue
if not order.partner_id:
raise UserError(_('Please provide a partner for the sale.'))
move_vals = order._prepare_invoice_vals()
new_move = order._create_invoice(move_vals)
order.write({'account_move': new_move.id, 'state': 'invoiced'})
new_move.sudo().with_company(order.company_id)._post()
moves += new_move
if not moves:
return {}
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': moves and moves.ids[0] or False,
}
# this method is unused, and so is the state 'cancel'
def action_pos_order_cancel(self):
return self.write({'state': 'cancel'})
@api.model
def create_from_ui(self, orders, draft=False):
""" Create and update Orders from the frontend PoS application.
Create new orders and update orders that are in draft status. If an order already exists with a status
diferent from 'draft'it will be discareded, otherwise it will be saved to the database. If saved with
'draft' status the order can be overwritten later by this function.
:param orders: dictionary with the orders to be created.
:type orders: dict.
:param draft: Indicate if the orders are ment to be finalised or temporarily saved.
:type draft: bool.
:Returns: list -- list of db-ids for the created and updated orders.
"""
order_ids = []
for order in orders:
existing_order = False
if 'server_id' in order['data']:
existing_order = self.env['pos.order'].search(['|', ('id', '=', order['data']['server_id']), ('pos_reference', '=', order['data']['name'])], limit=1)
if (existing_order and existing_order.state == 'draft') or not existing_order:
order_ids.append(self._process_order(order, draft, existing_order))
return self.env['pos.order'].search_read(domain = [('id', 'in', order_ids)], fields = ['id', 'pos_reference'])
def _create_order_picking(self):
self.ensure_one()
if not self.session_id.update_stock_at_closing or (self.company_id.anglo_saxon_accounting and self.to_invoice):
picking_type = self.config_id.picking_type_id
if self.partner_id.property_stock_customer:
destination_id = self.partner_id.property_stock_customer.id
elif not picking_type or not picking_type.default_location_dest_id:
destination_id = self.env['stock.warehouse']._get_partner_locations()[0].id
else:
destination_id = picking_type.default_location_dest_id.id
pickings = self.env['stock.picking']._create_picking_from_pos_order_lines(destination_id, self.lines, picking_type, self.partner_id)
pickings.write({'pos_session_id': self.session_id.id, 'pos_order_id': self.id, 'origin': self.name})
def add_payment(self, data):
"""Create a new payment for the order"""
self.ensure_one()
self.env['pos.payment'].create(data)
self.amount_paid = sum(self.payment_ids.mapped('amount'))
def _prepare_refund_values(self, current_session):
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'session_id': current_session.id,
'date_order': fields.Datetime.now(),
'pos_reference': self.pos_reference,
'lines': False,
'amount_tax': -self.amount_tax,
'amount_total': -self.amount_total,
'amount_paid': 0,
}
def refund(self):
"""Create a copy of order for refund order"""
refund_orders = self.env['pos.order']
for order in self:
# When a refund is performed, we are creating it in a session having the same config as the original
# order. It can be the same session, or if it has been closed the new one that has been opened.
current_session = order.session_id.config_id.current_session_id
if not current_session:
raise UserError(_('To return product(s), you need to open a session in the POS %s', order.session_id.config_id.display_name))
refund_order = order.copy(
order._prepare_refund_values(current_session)
)
for line in order.lines:
PosOrderLineLot = self.env['pos.pack.operation.lot']
for pack_lot in line.pack_lot_ids:
PosOrderLineLot += pack_lot.copy()
line.copy(line._prepare_refund_data(refund_order, PosOrderLineLot))
refund_orders |= refund_order
return {
'name': _('Return Products'),
'view_mode': 'form',
'res_model': 'pos.order',
'res_id': refund_orders.ids[0],
'view_id': False,
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'current',
}
def action_receipt_to_customer(self, name, client, ticket):
if not self:
return False
if not client.get('email'):
return False
message = _("<p>Dear %s,<br/>Here is your electronic ticket for the %s. </p>") % (client['name'], name)
filename = 'Receipt-' + name + '.jpg'
receipt = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': ticket,
'res_model': 'pos.order',
'res_id': self.ids[0],
'store_fname': filename,
'mimetype': 'image/jpeg',
})
mail_values = {
'subject': _('Receipt %s', name),
'body_html': message,
'author_id': self.env.user.partner_id.id,
'email_from': self.env.company.email or self.env.user.email_formatted,
'email_to': client['email'],
'attachment_ids': [(4, receipt.id)],
}
if self.mapped('account_move'):
report = self.env.ref('point_of_sale.pos_invoice_report')._render_qweb_pdf(self.ids[0])
filename = name + '.pdf'
attachment = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': base64.b64encode(report[0]),
'store_fname': filename,
'res_model': 'pos.order',
'res_id': self.ids[0],
'mimetype': 'application/x-pdf'
})
mail_values['attachment_ids'] += [(4, attachment.id)]
mail = self.env['mail.mail'].sudo().create(mail_values)
mail.send()
@api.model
def remove_from_ui(self, server_ids):
""" Remove orders from the frontend PoS application
Remove orders from the server by id.
:param server_ids: list of the id's of orders to remove from the server.
:type server_ids: list.
:returns: list -- list of db-ids for the removed orders.
"""
orders = self.search([('id', 'in', server_ids),('state', '=', 'draft')])
orders.write({'state': 'cancel'})
# TODO Looks like delete cascade is a better solution.
orders.mapped('payment_ids').sudo().unlink()
orders.sudo().unlink()
return orders.ids
@api.model
def search_paid_order_ids(self, config_id, domain, limit, offset):
"""Search for 'paid' orders that satisfy the given domain, limit and offset."""
default_domain = ['&', ('config_id', '=', config_id), '!', '|', ('state', '=', 'draft'), ('state', '=', 'cancelled')]
real_domain = AND([domain, default_domain])
ids = self.search(AND([domain, default_domain]), limit=limit, offset=offset).ids
totalCount = self.search_count(real_domain)
return {'ids': ids, 'totalCount': totalCount}
def _export_for_ui(self, order):
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
return {
'lines': [[0, 0, line] for line in order.lines.export_for_ui()],
'statement_ids': [[0, 0, payment] for payment in order.payment_ids.export_for_ui()],
'name': order.pos_reference,
'uid': re.search('([0-9]|-){14}', order.pos_reference).group(0),
'amount_paid': order.amount_paid,
'amount_total': order.amount_total,
'amount_tax': order.amount_tax,
'amount_return': order.amount_return,
'pos_session_id': order.session_id.id,
'is_session_closed': order.session_id.state == 'closed',
'pricelist_id': order.pricelist_id.id,
'partner_id': order.partner_id.id,
'user_id': order.user_id.id,
'sequence_number': order.sequence_number,
'creation_date': order.date_order.astimezone(timezone),
'fiscal_position_id': order.fiscal_position_id.id,
'to_invoice': order.to_invoice,
'state': order.state,
'account_move': order.account_move.id,
'id': order.id,
'is_tipped': order.is_tipped,
'tip_amount': order.tip_amount,
}
def export_for_ui(self):
""" Returns a list of dict with each item having similar signature as the return of
`export_as_JSON` of models.Order. This is useful for back-and-forth communication
between the pos frontend and backend.
"""
return self.mapped(self._export_for_ui) if self else []
class PosOrderLine(models.Model):
_name = "pos.order.line"
_description = "Point of Sale Order Lines"
_rec_name = "product_id"
def _order_line_fields(self, line, session_id=None):
if line and 'name' not in line[2]:
session = self.env['pos.session'].browse(session_id).exists() if session_id else None
if session and session.config_id.sequence_line_id:
# set name based on the sequence specified on the config
line[2]['name'] = session.config_id.sequence_line_id._next()
else:
# fallback on any pos.order.line sequence
line[2]['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
if line and 'tax_ids' not in line[2]:
product = self.env['product.product'].browse(line[2]['product_id'])
line[2]['tax_ids'] = [(6, 0, [x.id for x in product.taxes_id])]
# Clean up fields sent by the JS
line = [
line[0], line[1], {k: v for k, v in line[2].items() if k in self.env['pos.order.line']._fields}
]
return line
company_id = fields.Many2one('res.company', string='Company', related="order_id.company_id", store=True)
name = fields.Char(string='Line No', required=True, copy=False)
notice = fields.Char(string='Discount Notice')
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], required=True, change_default=True)
price_unit = fields.Float(string='Unit Price', digits=0)
qty = fields.Float('Quantity', digits='Product Unit of Measure', default=1)
price_subtotal = fields.Float(string='Subtotal w/o Tax', digits=0,
readonly=True, required=True)
price_subtotal_incl = fields.Float(string='Subtotal', digits=0,
readonly=True, required=True)
discount = fields.Float(string='Discount (%)', digits=0, default=0.0)
order_id = fields.Many2one('pos.order', string='Order Ref', ondelete='cascade', required=True)
tax_ids = fields.Many2many('account.tax', string='Taxes', readonly=True)
tax_ids_after_fiscal_position = fields.Many2many('account.tax', compute='_get_tax_ids_after_fiscal_position', string='Taxes to Apply')
pack_lot_ids = fields.One2many('pos.pack.operation.lot', 'pos_order_line_id', string='Lot/serial Number')
product_uom_id = fields.Many2one('uom.uom', string='Product UoM', related='product_id.uom_id')
currency_id = fields.Many2one('res.currency', related='order_id.currency_id')
full_product_name = fields.Char('Full Product Name')
def _prepare_refund_data(self, refund_order, PosOrderLineLot):
"""
This prepares data for refund order line. Inheritance may inject more data here
@param refund_order: the pre-created refund order
@type refund_order: pos.order
@param PosOrderLineLot: the pre-created Pack operation Lot
@type PosOrderLineLot: pos.pack.operation.lot
@return: dictionary of data which is for creating a refund order line from the original line
@rtype: dict
"""
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'qty': -self.qty,
'order_id': refund_order.id,
'price_subtotal': -self.price_subtotal,
'price_subtotal_incl': -self.price_subtotal_incl,
'pack_lot_ids': PosOrderLineLot,
}
@api.model
def create(self, values):
if values.get('order_id') and not values.get('name'):
# set name based on the sequence specified on the config
config = self.env['pos.order'].browse(values['order_id']).session_id.config_id
if config.sequence_line_id:
values['name'] = config.sequence_line_id._next()
if not values.get('name'):
# fallback on any pos.order sequence
values['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
return super(PosOrderLine, self).create(values)
def write(self, values):
if values.get('pack_lot_line_ids'):
for pl in values.get('pack_lot_ids'):
if pl[2].get('server_id'):
pl[2]['id'] = pl[2]['server_id']
del pl[2]['server_id']
return super().write(values)
@api.onchange('price_unit', 'tax_ids', 'qty', 'discount', 'product_id')
def _onchange_amount_line_all(self):
for line in self:
res = line._compute_amount_line_all()
line.update(res)
def _compute_amount_line_all(self):
self.ensure_one()
fpos = self.order_id.fiscal_position_id
tax_ids_after_fiscal_position = fpos.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = tax_ids_after_fiscal_position.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=self.order_id.partner_id)
return {
'price_subtotal_incl': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
}
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(
_('You have to select a pricelist in the sale form !\n'
'Please set one before choosing a product.'))
price = self.order_id.pricelist_id.get_product_price(
self.product_id, self.qty or 1.0, self.order_id.partner_id)
self._onchange_qty()
self.tax_ids = self.product_id.taxes_id.filtered(lambda r: not self.company_id or r.company_id == self.company_id)
tax_ids_after_fiscal_position = self.order_id.fiscal_position_id.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
self.price_unit = self.env['account.tax']._fix_tax_included_price_company(price, self.product_id.taxes_id, tax_ids_after_fiscal_position, self.company_id)
@api.onchange('qty', 'discount', 'price_unit', 'tax_ids')
def _onchange_qty(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(_('You have to select a pricelist in the sale form.'))
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
self.price_subtotal = self.price_subtotal_incl = price * self.qty
if (self.product_id.taxes_id):
taxes = self.product_id.taxes_id.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=False)
self.price_subtotal = taxes['total_excluded']
self.price_subtotal_incl = taxes['total_included']
@api.depends('order_id', 'order_id.fiscal_position_id')
def _get_tax_ids_after_fiscal_position(self):
for line in self:
line.tax_ids_after_fiscal_position = line.order_id.fiscal_position_id.map_tax(line.tax_ids, line.product_id, line.order_id.partner_id)
def _export_for_ui(self, orderline):
return {
'qty': orderline.qty,
'price_unit': orderline.price_unit,
'price_subtotal': orderline.price_subtotal,
'price_subtotal_incl': orderline.price_subtotal_incl,
'product_id': orderline.product_id.id,
'discount': orderline.discount,
'tax_ids': [[6, False, orderline.tax_ids.mapped(lambda tax: tax.id)]],
'id': orderline.id,
'pack_lot_ids': [[0, 0, lot] for lot in orderline.pack_lot_ids.export_for_ui()],
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class PosOrderLineLot(models.Model):
_name = "pos.pack.operation.lot"
_description = "Specify product lot/serial number in pos order line"
_rec_name = "lot_name"
pos_order_line_id = fields.Many2one('pos.order.line')
order_id = fields.Many2one('pos.order', related="pos_order_line_id.order_id", readonly=False)
lot_name = fields.Char('Lot Name')
product_id = fields.Many2one('product.product', related='pos_order_line_id.product_id', readonly=False)
def _export_for_ui(self, lot):
return {
'lot_name': lot.lot_name,
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class ReportSaleDetails(models.AbstractModel):
_name = 'report.point_of_sale.report_saledetails'
_description = 'Point of Sale Details'
@api.model
def get_sale_details(self, date_start=False, date_stop=False, config_ids=False, session_ids=False):
""" Serialise the orders of the requested time period, configs and sessions.
:param date_start: The dateTime to start, default today 00:00:00.
:type date_start: str.
:param date_stop: The dateTime to stop, default date_start + 23:59:59.
:type date_stop: str.
:param config_ids: Pos Config id's to include.
:type config_ids: list of numbers.
:param session_ids: Pos Config id's to include.
:type session_ids: list of numbers.
:returns: dict -- Serialised sales.
"""
domain = [('state', 'in', ['paid','invoiced','done'])]
if (session_ids):
domain = AND([domain, [('session_id', 'in', session_ids)]])
else:
if date_start:
date_start = fields.Datetime.from_string(date_start)
else:
# start by default today 00:00:00
user_tz = pytz.timezone(self.env.context.get('tz') or self.env.user.tz or 'UTC')
today = user_tz.localize(fields.Datetime.from_string(fields.Date.context_today(self)))
date_start = today.astimezone(pytz.timezone('UTC'))
if date_stop:
date_stop = fields.Datetime.from_string(date_stop)
# avoid a date_stop smaller than date_start
if (date_stop < date_start):
date_stop = date_start + timedelta(days=1, seconds=-1)
else:
# stop by default today 23:59:59
date_stop = date_start + timedelta(days=1, seconds=-1)
domain = AND([domain,
[('date_order', '>=', fields.Datetime.to_string(date_start)),
('date_order', '<=', fields.Datetime.to_string(date_stop))]
])
if config_ids:
domain = AND([domain, [('config_id', 'in', config_ids)]])
orders = self.env['pos.order'].search(domain)
user_currency = self.env.company.currency_id
total = 0.0
products_sold = {}
taxes = {}
for order in orders:
if user_currency != order.pricelist_id.currency_id:
total += order.pricelist_id.currency_id._convert(
order.amount_total, user_currency, order.company_id, order.date_order or fields.Date.today())
else:
total += order.amount_total
currency = order.session_id.currency_id
for line in order.lines:
key = (line.product_id, line.price_unit, line.discount)
products_sold.setdefault(key, 0.0)
products_sold[key] += line.qty
if line.tax_ids_after_fiscal_position:
line_taxes = line.tax_ids_after_fiscal_position.sudo().compute_all(line.price_unit * (1-(line.discount or 0.0)/100.0), currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'tax_amount':0.0, 'base_amount':0.0})
taxes[tax['id']]['tax_amount'] += tax['amount']
taxes[tax['id']]['base_amount'] += tax['base']
else:
taxes.setdefault(0, {'name': _('No Taxes'), 'tax_amount':0.0, 'base_amount':0.0})
taxes[0]['base_amount'] += line.price_subtotal_incl
payment_ids = self.env["pos.payment"].search([('pos_order_id', 'in', orders.ids)]).ids
if payment_ids:
self.env.cr.execute("""
SELECT method.name, sum(amount) total
FROM pos_payment AS payment,
pos_payment_method AS method
WHERE payment.payment_method_id = method.id
AND payment.id IN %s
GROUP BY method.name
""", (tuple(payment_ids),))
payments = self.env.cr.dictfetchall()
else:
payments = []
return {
'currency_precision': user_currency.decimal_places,
'total_paid': user_currency.round(total),
'payments': payments,
'company_name': self.env.company.name,
'taxes': list(taxes.values()),
'products': sorted([{
'product_id': product.id,
'product_name': product.name,
'code': product.default_code,
'quantity': qty,
'price_unit': price_unit,
'discount': discount,
'uom': product.uom_id.name
} for (product, price_unit, discount), qty in products_sold.items()], key=lambda l: l['product_name'])
}
@api.model
def _get_report_values(self, docids, data=None):
data = dict(data or {})
configs = self.env['pos.config'].browse(data['config_ids'])
data.update(self.get_sale_details(data['date_start'], data['date_stop'], configs.ids))
return data
class AccountCashRounding(models.Model):
_inherit = 'account.cash.rounding'
@api.constrains('rounding', 'rounding_method', 'strategy')
def _check_session_state(self):
open_session = self.env['pos.session'].search([('config_id.rounding_method', '=', self.id), ('state', '!=', 'closed')])
if open_session:
raise ValidationError(
_("You are not allowed to change the cash rounding configuration while a pos session using it is already opened."))
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from datetime import timedelta
from functools import partial
import psycopg2
import pytz
import re
from odoo import api, fields, models, tools, _
from odoo.tools import float_is_zero, float_round
from odoo.exceptions import ValidationError, UserError
from odoo.http import request
from odoo.osv.expression import AND
import base64
_logger = logging.getLogger(__name__)
class PosOrder(models.Model):
_name = "pos.order"
_description = "Point of Sale Orders"
_order = "date_order desc, name desc, id desc"
@api.model
def _amount_line_tax(self, line, fiscal_position_id):
taxes = line.tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id)
taxes = fiscal_position_id.map_tax(taxes, line.product_id, line.order_id.partner_id)
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = taxes.compute_all(price, line.order_id.pricelist_id.currency_id, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
return sum(tax.get('amount', 0.0) for tax in taxes)
@api.model
def _order_fields(self, ui_order):
process_line = partial(self.env['pos.order.line']._order_line_fields, session_id=ui_order['pos_session_id'])
return {
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': [process_line(l) for l in ui_order['lines']] if ui_order['lines'] else False,
'pos_reference': ui_order['name'],
'sequence_number': ui_order['sequence_number'],
'partner_id': ui_order['partner_id'] or False,
'date_order': ui_order['creation_date'].replace('T', ' ')[:19],
'fiscal_position_id': ui_order['fiscal_position_id'],
'pricelist_id': ui_order['pricelist_id'],
'amount_paid': ui_order['amount_paid'],
'amount_total': ui_order['amount_total'],
'amount_tax': ui_order['amount_tax'],
'amount_return': ui_order['amount_return'],
'company_id': self.env['pos.session'].browse(ui_order['pos_session_id']).company_id.id,
'to_invoice': ui_order['to_invoice'] if "to_invoice" in ui_order else False,
'is_tipped': ui_order.get('is_tipped', False),
'tip_amount': ui_order.get('tip_amount', 0),
}
@api.model
def _payment_fields(self, order, ui_paymentline):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'payment_method_id': ui_paymentline['payment_method_id'],
'card_type': ui_paymentline.get('card_type'),
'cardholder_name': ui_paymentline.get('cardholder_name'),
'transaction_id': ui_paymentline.get('transaction_id'),
'payment_status': ui_paymentline.get('payment_status'),
'ticket': ui_paymentline.get('ticket'),
'pos_order_id': order.id,
}
# This deals with orders that belong to a closed session. In order
# to recover from this situation we create a new rescue session,
# making it obvious that something went wrong.
# A new, separate, rescue session is preferred for every such recovery,
# to avoid adding unrelated orders to live sessions.
def _get_valid_session(self, order):
PosSession = self.env['pos.session']
closed_session = PosSession.browse(order['pos_session_id'])
_logger.warning('session %s (ID: %s) was closed but received order %s (total: %s) belonging to it',
closed_session.name,
closed_session.id,
order['name'],
order['amount_total'])
rescue_session = PosSession.search([
('state', 'not in', ('closed', 'closing_control')),
('rescue', '=', True),
('config_id', '=', closed_session.config_id.id),
], limit=1)
if rescue_session:
_logger.warning('reusing recovery session %s for saving order %s', rescue_session.name, order['name'])
return rescue_session
_logger.warning('attempting to create recovery session for saving order %s', order['name'])
new_session = PosSession.create({
'config_id': closed_session.config_id.id,
'name': _('(RESCUE FOR %(session)s)') % {'session': closed_session.name},
'rescue': True, # avoid conflict with live sessions
})
# bypass opening_control (necessary when using cash control)
new_session.action_pos_session_open()
return new_session
@api.model
def _process_order(self, order, draft, existing_order):
"""Create or update an pos.order from a given dictionary.
:param dict order: dictionary representing the order.
:param bool draft: Indicate that the pos_order is not validated yet.
:param existing_order: order to be updated or False.
:type existing_order: pos.order.
:returns: id of created/updated pos.order
:rtype: int
"""
order = order['data']
pos_session = self.env['pos.session'].browse(order['pos_session_id'])
if pos_session.state == 'closing_control' or pos_session.state == 'closed':
order['pos_session_id'] = self._get_valid_session(order).id
pos_order = False
if not existing_order:
pos_order = self.create(self._order_fields(order))
else:
pos_order = existing_order
pos_order.lines.unlink()
order['user_id'] = pos_order.user_id.id
pos_order.write(self._order_fields(order))
pos_order = pos_order.with_company(pos_order.company_id)
self = self.with_company(pos_order.company_id)
self._process_payment_lines(order, pos_order, pos_session, draft)
if not draft:
try:
pos_order.action_pos_order_paid()
except psycopg2.DatabaseError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
pos_order._create_order_picking()
if pos_order.to_invoice and pos_order.state == 'paid':
pos_order.action_pos_order_invoice()
return pos_order.id
def _process_payment_lines(self, pos_order, order, pos_session, draft):
"""Create account.bank.statement.lines from the dictionary given to the parent function.
If the payment_line is an updated version of an existing one, the existing payment_line will first be
removed before making a new one.
:param pos_order: dictionary representing the order.
:type pos_order: dict.
:param order: Order object the payment lines should belong to.
:type order: pos.order
:param pos_session: PoS session the order was created in.
:type pos_session: pos.session
:param draft: Indicate that the pos_order is not validated yet.
:type draft: bool.
"""
prec_acc = order.pricelist_id.currency_id.decimal_places
order_bank_statement_lines= self.env['pos.payment'].search([('pos_order_id', '=', order.id)])
order_bank_statement_lines.unlink()
for payments in pos_order['statement_ids']:
order.add_payment(self._payment_fields(order, payments[2]))
order.amount_paid = sum(order.payment_ids.mapped('amount'))
if not draft and not float_is_zero(pos_order['amount_return'], prec_acc):
cash_payment_method = pos_session.payment_method_ids.filtered('is_cash_count')[:1]
if not cash_payment_method:
raise UserError(_("No cash statement found for this session. Unable to record returned cash."))
return_payment_vals = {
'name': _('return'),
'pos_order_id': order.id,
'amount': -pos_order['amount_return'],
'payment_date': fields.Datetime.now(),
'payment_method_id': cash_payment_method.id,
'is_change': True,
}
order.add_payment(return_payment_vals)
def _prepare_invoice_line(self, order_line):
return {
'product_id': order_line.product_id.id,
'quantity': order_line.qty if self.amount_total >= 0 else -order_line.qty,
'discount': order_line.discount,
'price_unit': order_line.price_unit,
'name': order_line.product_id.display_name,
'tax_ids': [(6, 0, order_line.tax_ids_after_fiscal_position.ids)],
'product_uom_id': order_line.product_uom_id.id,
}
def _get_pos_anglo_saxon_price_unit(self, product, partner_id, quantity):
moves = self.filtered(lambda o: o.partner_id.id == partner_id)\
.mapped('picking_ids.move_lines')\
._filter_anglo_saxon_moves(product)\
.sorted(lambda x: x.date)
price_unit = product._compute_average_price(0, quantity, moves)
return price_unit
name = fields.Char(string='Order Ref', required=True, readonly=True, copy=False, default='/')
date_order = fields.Datetime(string='Date', readonly=True, index=True, default=fields.Datetime.now)
user_id = fields.Many2one(
comodel_name='res.users', string='Responsible',
help="Person who uses the cash register. It can be a reliever, a student or an interim employee.",
default=lambda self: self.env.uid,
states={'done': [('readonly', True)], 'invoiced': [('readonly', True)]},
)
amount_tax = fields.Float(string='Taxes', digits=0, readonly=True, required=True)
amount_total = fields.Float(string='Total', digits=0, readonly=True, required=True)
amount_paid = fields.Float(string='Paid', states={'draft': [('readonly', False)]},
readonly=True, digits=0, required=True)
amount_return = fields.Float(string='Returned', digits=0, required=True, readonly=True)
lines = fields.One2many('pos.order.line', 'order_id', string='Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True)
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, states={
'draft': [('readonly', False)]}, readonly=True)
partner_id = fields.Many2one('res.partner', string='Customer', change_default=True, index=True, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]})
sequence_number = fields.Integer(string='Sequence Number', help='A session-unique sequence number for the order', default=1)
session_id = fields.Many2one(
'pos.session', string='Session', required=True, index=True,
domain="[('state', '=', 'opened')]", states={'draft': [('readonly', False)]},
readonly=True)
config_id = fields.Many2one('pos.config', related='session_id.config_id', string="Point of Sale", readonly=False)
currency_id = fields.Many2one('res.currency', related='config_id.currency_id', string="Currency")
currency_rate = fields.Float("Currency Rate", compute='_compute_currency_rate', compute_sudo=True, store=True, digits=0, readonly=True,
help='The rate of the currency to the currency of rate applicable at the date of the order')
invoice_group = fields.Boolean(related="config_id.module_account", readonly=False)
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False, default='draft')
account_move = fields.Many2one('account.move', string='Invoice', readonly=True, copy=False)
picking_ids = fields.One2many('stock.picking', 'pos_order_id')
picking_count = fields.Integer(compute='_compute_picking_count')
failed_pickings = fields.Boolean(compute='_compute_picking_count')
picking_type_id = fields.Many2one('stock.picking.type', related='session_id.config_id.picking_type_id', string="Operation Type", readonly=False)
note = fields.Text(string='Internal Notes')
nb_print = fields.Integer(string='Number of Print', readonly=True, copy=False, default=0)
pos_reference = fields.Char(string='Receipt Number', readonly=True, copy=False)
sale_journal = fields.Many2one('account.journal', related='session_id.config_id.journal_id', string='Sales Journal', store=True, readonly=True, ondelete='restrict')
fiscal_position_id = fields.Many2one(
comodel_name='account.fiscal.position', string='Fiscal Position',
readonly=True,
states={'draft': [('readonly', False)]},
)
payment_ids = fields.One2many('pos.payment', 'pos_order_id', string='Payments', readonly=True)
session_move_id = fields.Many2one('account.move', string='Session Journal Entry', related='session_id.move_id', readonly=True, copy=False)
to_invoice = fields.Boolean('To invoice')
is_invoiced = fields.Boolean('Is Invoiced', compute='_compute_is_invoiced')
is_tipped = fields.Boolean('Is this already tipped?', readonly=True)
tip_amount = fields.Float(string='Tip Amount', digits=0, readonly=True)
@api.depends('account_move')
def _compute_is_invoiced(self):
for order in self:
order.is_invoiced = bool(order.account_move)
@api.depends('picking_ids', 'picking_ids.state')
def _compute_picking_count(self):
for order in self:
order.picking_count = len(order.picking_ids)
order.failed_pickings = bool(order.picking_ids.filtered(lambda p: p.state != 'done'))
@api.depends('date_order', 'company_id', 'currency_id', 'company_id.currency_id')
def _compute_currency_rate(self):
for order in self:
order.currency_rate = self.env['res.currency']._get_conversion_rate(order.company_id.currency_id, order.currency_id, order.company_id, order.date_order)
@api.onchange('payment_ids', 'lines')
def _onchange_amount_all(self):
for order in self:
currency = order.pricelist_id.currency_id
order.amount_paid = sum(payment.amount for payment in order.payment_ids)
order.amount_return = sum(payment.amount < 0 and payment.amount or 0 for payment in order.payment_ids)
order.amount_tax = currency.round(sum(self._amount_line_tax(line, order.fiscal_position_id) for line in order.lines))
amount_untaxed = currency.round(sum(line.price_subtotal for line in order.lines))
order.amount_total = order.amount_tax + amount_untaxed
def _compute_batch_amount_all(self):
"""
Does essentially the same thing as `_onchange_amount_all` but only for actually existing records
It is intended as a helper method , not as a business one
Practical to be used for migrations
"""
amounts = {order_id: {'paid': 0, 'return': 0, 'taxed': 0, 'taxes': 0} for order_id in self.ids}
for order in self.env['pos.payment'].read_group([('pos_order_id', 'in', self.ids)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['paid'] = order['amount']
for order in self.env['pos.payment'].read_group(['&', ('pos_order_id', 'in', self.ids), ('amount', '<', 0)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['return'] = order['amount']
for order in self.env['pos.order.line'].read_group([('order_id', 'in', self.ids)], ['order_id', 'price_subtotal', 'price_subtotal_incl'], ['order_id']):
amounts[order['order_id'][0]]['taxed'] = order['price_subtotal_incl']
amounts[order['order_id'][0]]['taxes'] = order['price_subtotal_incl'] - order['price_subtotal']
for order in self:
currency = order.pricelist_id.currency_id
order.write({
'amount_paid': amounts[order.id]['paid'],
'amount_return': amounts[order.id]['return'],
'amount_tax': currency.round(amounts[order.id]['taxes']),
'amount_total': currency.round(amounts[order.id]['taxed'])
})
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
self.pricelist_id = self.partner_id.property_product_pricelist.id
def unlink(self):
for pos_order in self.filtered(lambda pos_order: pos_order.state not in ['draft', 'cancel']):
raise UserError(_('In order to delete a sale, it must be new or cancelled.'))
return super(PosOrder, self).unlink()
@api.model
def create(self, values):
session = self.env['pos.session'].browse(values['session_id'])
values = self._complete_values_from_session(session, values)
return super(PosOrder, self).create(values)
@api.model
def _complete_values_from_session(self, session, values):
if values.get('state') and values['state'] == 'paid':
values['name'] = session.config_id.sequence_id._next()
values.setdefault('pricelist_id', session.config_id.pricelist_id.id)
values.setdefault('fiscal_position_id', session.config_id.default_fiscal_position_id.id)
values.setdefault('company_id', session.config_id.company_id.id)
return values
def write(self, vals):
for order in self:
if vals.get('state') and vals['state'] == 'paid' and order.name == '/':
vals['name'] = order.config_id.sequence_id._next()
return super(PosOrder, self).write(vals)
def action_stock_picking(self):
self.ensure_one()
action = self.env['ir.actions.act_window']._for_xml_id('stock.action_picking_tree_ready')
action['context'] = {}
action['domain'] = [('id', 'in', self.picking_ids.ids)]
return action
def action_view_invoice(self):
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'res_id': self.account_move.id,
}
def _is_pos_order_paid(self):
return float_is_zero(self._get_rounded_amount(self.amount_total) - self.amount_paid, precision_rounding=self.currency_id.rounding)
def _get_rounded_amount(self, amount):
if self.config_id.cash_rounding:
amount = float_round(amount, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
currency = self.currency_id
return currency.round(amount) if currency else amount
def _create_invoice(self, move_vals):
self.ensure_one()
new_move = self.env['account.move'].sudo().with_company(self.company_id).with_context(default_move_type=move_vals['move_type']).create(move_vals)
message = _("This invoice has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>") % (self.id, self.name)
new_move.message_post(body=message)
if self.config_id.cash_rounding:
rounding_applied = float_round(self.amount_paid - self.amount_total,
precision_rounding=new_move.currency_id.rounding)
rounding_line = new_move.line_ids.filtered(lambda line: line.is_rounding_line)
if rounding_line and rounding_line.debit > 0:
rounding_line_difference = rounding_line.debit + rounding_applied
elif rounding_line and rounding_line.credit > 0:
rounding_line_difference = -rounding_line.credit + rounding_applied
else:
rounding_line_difference = rounding_applied
if rounding_applied:
if rounding_applied > 0.0:
account_id = new_move.invoice_cash_rounding_id.loss_account_id.id
else:
account_id = new_move.invoice_cash_rounding_id.profit_account_id.id
if rounding_line:
if rounding_line_difference:
rounding_line.with_context(check_move_validity=False).write({
'debit': rounding_applied < 0.0 and -rounding_applied or 0.0,
'credit': rounding_applied > 0.0 and rounding_applied or 0.0,
'account_id': account_id,
'price_unit': rounding_applied,
})
else:
self.env['account.move.line'].with_context(check_move_validity=False).create({
'debit': rounding_applied < 0.0 and -rounding_applied or 0.0,
'credit': rounding_applied > 0.0 and rounding_applied or 0.0,
'quantity': 1.0,
'amount_currency': rounding_applied,
'partner_id': new_move.partner_id.id,
'move_id': new_move.id,
'currency_id': new_move.currency_id if new_move.currency_id != new_move.company_id.currency_id else False,
'company_id': new_move.company_id.id,
'company_currency_id': new_move.company_id.currency_id.id,
'is_rounding_line': True,
'sequence': 9999,
'name': new_move.invoice_cash_rounding_id.name,
'account_id': account_id,
})
else:
if rounding_line:
rounding_line.with_context(check_move_validity=False).unlink()
if rounding_line_difference:
existing_terms_line = new_move.line_ids.filtered(
lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
if existing_terms_line.debit > 0:
existing_terms_line_new_val = float_round(
existing_terms_line.debit + rounding_line_difference,
precision_rounding=new_move.currency_id.rounding)
else:
existing_terms_line_new_val = float_round(
-existing_terms_line.credit + rounding_line_difference,
precision_rounding=new_move.currency_id.rounding)
existing_terms_line.write({
'debit': existing_terms_line_new_val > 0.0 and existing_terms_line_new_val or 0.0,
'credit': existing_terms_line_new_val < 0.0 and -existing_terms_line_new_val or 0.0,
})
new_move._recompute_payment_terms_lines()
return new_move
def action_pos_order_paid(self):
self.ensure_one()
# TODO: add support for mix of cash and non-cash payments when both cash_rounding and only_round_cash_method are True
if not self.config_id.cash_rounding \
or self.config_id.only_round_cash_method \
and not any(p.payment_method_id.is_cash_count for p in self.payment_ids):
total = self.amount_total
else:
total = float_round(self.amount_total, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
isPaid = float_is_zero(total - self.amount_paid, precision_rounding=self.currency_id.rounding)
if not isPaid and not self.config_id.cash_rounding:
raise UserError(_("Order %s is not fully paid.", self.name))
elif not isPaid and self.config_id.cash_rounding:
currency = self.currency_id
if self.config_id.rounding_method.rounding_method == "HALF-UP":
maxDiff = currency.round(self.config_id.rounding_method.rounding / 2)
else:
maxDiff = currency.round(self.config_id.rounding_method.rounding)
diff = currency.round(self.amount_total - self.amount_paid)
if not abs(diff) < maxDiff:
raise UserError(_("Order %s is not fully paid.", self.name))
self.write({'state': 'paid'})
return True
def _prepare_invoice_vals(self):
self.ensure_one()
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
vals = {
'payment_reference': self.name,
'invoice_origin': self.name,
'journal_id': self.session_id.config_id.invoice_journal_id.id,
'move_type': 'out_invoice' if self.amount_total >= 0 else 'out_refund',
'ref': self.name,
'partner_id': self.partner_id.id,
'narration': self.note or '',
# considering partner's sale pricelist's currency
'currency_id': self.pricelist_id.currency_id.id,
'invoice_user_id': self.user_id.id,
'invoice_date': self.date_order.astimezone(timezone).date(),
'fiscal_position_id': self.fiscal_position_id.id,
'invoice_line_ids': [(0, None, self._prepare_invoice_line(line)) for line in self.lines],
'invoice_cash_rounding_id': self.config_id.rounding_method.id
if self.config_id.cash_rounding and (not self.config_id.only_round_cash_method or any(p.payment_method_id.is_cash_count for p in self.payment_ids))
else False
}
return vals
def action_pos_order_invoice(self):
moves = self.env['account.move']
for order in self:
# Force company for all SUPERUSER_ID action
if order.account_move:
moves += order.account_move
continue
if not order.partner_id:
raise UserError(_('Please provide a partner for the sale.'))
move_vals = order._prepare_invoice_vals()
new_move = order._create_invoice(move_vals)
order.write({'account_move': new_move.id, 'state': 'invoiced'})
new_move.sudo().with_company(order.company_id)._post()
moves += new_move
if not moves:
return {}
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': moves and moves.ids[0] or False,
}
# this method is unused, and so is the state 'cancel'
def action_pos_order_cancel(self):
return self.write({'state': 'cancel'})
@api.model
def create_from_ui(self, orders, draft=False):
""" Create and update Orders from the frontend PoS application.
Create new orders and update orders that are in draft status. If an order already exists with a status
diferent from 'draft'it will be discareded, otherwise it will be saved to the database. If saved with
'draft' status the order can be overwritten later by this function.
:param orders: dictionary with the orders to be created.
:type orders: dict.
:param draft: Indicate if the orders are ment to be finalised or temporarily saved.
:type draft: bool.
:Returns: list -- list of db-ids for the created and updated orders.
"""
order_ids = []
for order in orders:
existing_order = False
if 'server_id' in order['data']:
existing_order = self.env['pos.order'].search(['|', ('id', '=', order['data']['server_id']), ('pos_reference', '=', order['data']['name'])], limit=1)
if (existing_order and existing_order.state == 'draft') or not existing_order:
order_ids.append(self._process_order(order, draft, existing_order))
return self.env['pos.order'].search_read(domain = [('id', 'in', order_ids)], fields = ['id', 'pos_reference'])
def _create_order_picking(self):
self.ensure_one()
if not self.session_id.update_stock_at_closing or (self.company_id.anglo_saxon_accounting and self.to_invoice):
picking_type = self.config_id.picking_type_id
if self.partner_id.property_stock_customer:
destination_id = self.partner_id.property_stock_customer.id
elif not picking_type or not picking_type.default_location_dest_id:
destination_id = self.env['stock.warehouse']._get_partner_locations()[0].id
else:
destination_id = picking_type.default_location_dest_id.id
pickings = self.env['stock.picking']._create_picking_from_pos_order_lines(destination_id, self.lines, picking_type, self.partner_id)
pickings.write({'pos_session_id': self.session_id.id, 'pos_order_id': self.id, 'origin': self.name})
def add_payment(self, data):
"""Create a new payment for the order"""
self.ensure_one()
self.env['pos.payment'].create(data)
self.amount_paid = sum(self.payment_ids.mapped('amount'))
def _prepare_refund_values(self, current_session):
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'session_id': current_session.id,
'date_order': fields.Datetime.now(),
'pos_reference': self.pos_reference,
'lines': False,
'amount_tax': -self.amount_tax,
'amount_total': -self.amount_total,
'amount_paid': 0,
}
def refund(self):
"""Create a copy of order for refund order"""
refund_orders = self.env['pos.order']
for order in self:
# When a refund is performed, we are creating it in a session having the same config as the original
# order. It can be the same session, or if it has been closed the new one that has been opened.
current_session = order.session_id.config_id.current_session_id
if not current_session:
raise UserError(_('To return product(s), you need to open a session in the POS %s', order.session_id.config_id.display_name))
refund_order = order.copy(
order._prepare_refund_values(current_session)
)
for line in order.lines:
PosOrderLineLot = self.env['pos.pack.operation.lot']
for pack_lot in line.pack_lot_ids:
PosOrderLineLot += pack_lot.copy()
line.copy(line._prepare_refund_data(refund_order, PosOrderLineLot))
refund_orders |= refund_order
return {
'name': _('Return Products'),
'view_mode': 'form',
'res_model': 'pos.order',
'res_id': refund_orders.ids[0],
'view_id': False,
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'current',
}
def action_receipt_to_customer(self, name, client, ticket):
if not self:
return False
if not client.get('email'):
return False
message = _("<p>Dear %s,<br/>Here is your electronic ticket for the %s. </p>") % (client['name'], name)
filename = 'Receipt-' + name + '.jpg'
receipt = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': ticket,
'res_model': 'pos.order',
'res_id': self.ids[0],
'store_fname': filename,
'mimetype': 'image/jpeg',
})
mail_values = {
'subject': _('Receipt %s', name),
'body_html': message,
'author_id': self.env.user.partner_id.id,
'email_from': self.env.company.email or self.env.user.email_formatted,
'email_to': client['email'],
'attachment_ids': [(4, receipt.id)],
}
if self.mapped('account_move'):
report = self.env.ref('point_of_sale.pos_invoice_report')._render_qweb_pdf(self.ids[0])
filename = name + '.pdf'
attachment = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': base64.b64encode(report[0]),
'store_fname': filename,
'res_model': 'pos.order',
'res_id': self.ids[0],
'mimetype': 'application/x-pdf'
})
mail_values['attachment_ids'] += [(4, attachment.id)]
mail = self.env['mail.mail'].sudo().create(mail_values)
mail.send()
@api.model
def remove_from_ui(self, server_ids):
""" Remove orders from the frontend PoS application
Remove orders from the server by id.
:param server_ids: list of the id's of orders to remove from the server.
:type server_ids: list.
:returns: list -- list of db-ids for the removed orders.
"""
orders = self.search([('id', 'in', server_ids),('state', '=', 'draft')])
orders.write({'state': 'cancel'})
# TODO Looks like delete cascade is a better solution.
orders.mapped('payment_ids').sudo().unlink()
orders.sudo().unlink()
return orders.ids
@api.model
def search_paid_order_ids(self, config_id, domain, limit, offset):
"""Search for 'paid' orders that satisfy the given domain, limit and offset."""
default_domain = ['&', ('config_id', '=', config_id), '!', '|', ('state', '=', 'draft'), ('state', '=', 'cancelled')]
real_domain = AND([domain, default_domain])
ids = self.search(AND([domain, default_domain]), limit=limit, offset=offset).ids
totalCount = self.search_count(real_domain)
return {'ids': ids, 'totalCount': totalCount}
def _export_for_ui(self, order):
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
return {
'lines': [[0, 0, line] for line in order.lines.export_for_ui()],
'statement_ids': [[0, 0, payment] for payment in order.payment_ids.export_for_ui()],
'name': order.pos_reference,
'uid': re.search('([0-9]|-){14}', order.pos_reference).group(0),
'amount_paid': order.amount_paid,
'amount_total': order.amount_total,
'amount_tax': order.amount_tax,
'amount_return': order.amount_return,
'pos_session_id': order.session_id.id,
'is_session_closed': order.session_id.state == 'closed',
'pricelist_id': order.pricelist_id.id,
'partner_id': order.partner_id.id,
'user_id': order.user_id.id,
'sequence_number': order.sequence_number,
'creation_date': order.date_order.astimezone(timezone),
'fiscal_position_id': order.fiscal_position_id.id,
'to_invoice': order.to_invoice,
'state': order.state,
'account_move': order.account_move.id,
'id': order.id,
'is_tipped': order.is_tipped,
'tip_amount': order.tip_amount,
}
def export_for_ui(self):
""" Returns a list of dict with each item having similar signature as the return of
`export_as_JSON` of models.Order. This is useful for back-and-forth communication
between the pos frontend and backend.
"""
return self.mapped(self._export_for_ui) if self else []
class PosOrderLine(models.Model):
_name = "pos.order.line"
_description = "Point of Sale Order Lines"
_rec_name = "product_id"
def _order_line_fields(self, line, session_id=None):
if line and 'name' not in line[2]:
session = self.env['pos.session'].browse(session_id).exists() if session_id else None
if session and session.config_id.sequence_line_id:
# set name based on the sequence specified on the config
line[2]['name'] = session.config_id.sequence_line_id._next()
else:
# fallback on any pos.order.line sequence
line[2]['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
if line and 'tax_ids' not in line[2]:
product = self.env['product.product'].browse(line[2]['product_id'])
line[2]['tax_ids'] = [(6, 0, [x.id for x in product.taxes_id])]
# Clean up fields sent by the JS
line = [
line[0], line[1], {k: v for k, v in line[2].items() if k in self.env['pos.order.line']._fields}
]
return line
company_id = fields.Many2one('res.company', string='Company', related="order_id.company_id", store=True)
name = fields.Char(string='Line No', required=True, copy=False)
notice = fields.Char(string='Discount Notice')
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], required=True, change_default=True)
price_unit = fields.Float(string='Unit Price', digits=0)
qty = fields.Float('Quantity', digits='Product Unit of Measure', default=1)
price_subtotal = fields.Float(string='Subtotal w/o Tax', digits=0,
readonly=True, required=True)
price_subtotal_incl = fields.Float(string='Subtotal', digits=0,
readonly=True, required=True)
discount = fields.Float(string='Discount (%)', digits=0, default=0.0)
order_id = fields.Many2one('pos.order', string='Order Ref', ondelete='cascade', required=True)
tax_ids = fields.Many2many('account.tax', string='Taxes', readonly=True)
tax_ids_after_fiscal_position = fields.Many2many('account.tax', compute='_get_tax_ids_after_fiscal_position', string='Taxes to Apply')
pack_lot_ids = fields.One2many('pos.pack.operation.lot', 'pos_order_line_id', string='Lot/serial Number')
product_uom_id = fields.Many2one('uom.uom', string='Product UoM', related='product_id.uom_id')
currency_id = fields.Many2one('res.currency', related='order_id.currency_id')
full_product_name = fields.Char('Full Product Name')
def _prepare_refund_data(self, refund_order, PosOrderLineLot):
"""
This prepares data for refund order line. Inheritance may inject more data here
@param refund_order: the pre-created refund order
@type refund_order: pos.order
@param PosOrderLineLot: the pre-created Pack operation Lot
@type PosOrderLineLot: pos.pack.operation.lot
@return: dictionary of data which is for creating a refund order line from the original line
@rtype: dict
"""
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'qty': -self.qty,
'order_id': refund_order.id,
'price_subtotal': -self.price_subtotal,
'price_subtotal_incl': -self.price_subtotal_incl,
'pack_lot_ids': PosOrderLineLot,
}
@api.model
def create(self, values):
if values.get('order_id') and not values.get('name'):
# set name based on the sequence specified on the config
config = self.env['pos.order'].browse(values['order_id']).session_id.config_id
if config.sequence_line_id:
values['name'] = config.sequence_line_id._next()
if not values.get('name'):
# fallback on any pos.order sequence
values['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
return super(PosOrderLine, self).create(values)
def write(self, values):
if values.get('pack_lot_line_ids'):
for pl in values.get('pack_lot_ids'):
if pl[2].get('server_id'):
pl[2]['id'] = pl[2]['server_id']
del pl[2]['server_id']
return super().write(values)
@api.onchange('price_unit', 'tax_ids', 'qty', 'discount', 'product_id')
def _onchange_amount_line_all(self):
for line in self:
res = line._compute_amount_line_all()
line.update(res)
def _compute_amount_line_all(self):
self.ensure_one()
fpos = self.order_id.fiscal_position_id
tax_ids_after_fiscal_position = fpos.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = tax_ids_after_fiscal_position.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=self.order_id.partner_id)
return {
'price_subtotal_incl': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
}
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(
_('You have to select a pricelist in the sale form !\n'
'Please set one before choosing a product.'))
price = self.order_id.pricelist_id.get_product_price(
self.product_id, self.qty or 1.0, self.order_id.partner_id)
self._onchange_qty()
self.tax_ids = self.product_id.taxes_id.filtered(lambda r: not self.company_id or r.company_id == self.company_id)
tax_ids_after_fiscal_position = self.order_id.fiscal_position_id.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
self.price_unit = self.env['account.tax']._fix_tax_included_price_company(price, self.product_id.taxes_id, tax_ids_after_fiscal_position, self.company_id)
@api.onchange('qty', 'discount', 'price_unit', 'tax_ids')
def _onchange_qty(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(_('You have to select a pricelist in the sale form.'))
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
self.price_subtotal = self.price_subtotal_incl = price * self.qty
if (self.product_id.taxes_id):
taxes = self.product_id.taxes_id.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=False)
self.price_subtotal = taxes['total_excluded']
self.price_subtotal_incl = taxes['total_included']
@api.depends('order_id', 'order_id.fiscal_position_id')
def _get_tax_ids_after_fiscal_position(self):
for line in self:
line.tax_ids_after_fiscal_position = line.order_id.fiscal_position_id.map_tax(line.tax_ids, line.product_id, line.order_id.partner_id)
def _export_for_ui(self, orderline):
return {
'qty': orderline.qty,
'price_unit': orderline.price_unit,
'price_subtotal': orderline.price_subtotal,
'price_subtotal_incl': orderline.price_subtotal_incl,
'product_id': orderline.product_id.id,
'discount': orderline.discount,
'tax_ids': [[6, False, orderline.tax_ids.mapped(lambda tax: tax.id)]],
'id': orderline.id,
'pack_lot_ids': [[0, 0, lot] for lot in orderline.pack_lot_ids.export_for_ui()],
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class PosOrderLineLot(models.Model):
_name = "pos.pack.operation.lot"
_description = "Specify product lot/serial number in pos order line"
_rec_name = "lot_name"
pos_order_line_id = fields.Many2one('pos.order.line')
order_id = fields.Many2one('pos.order', related="pos_order_line_id.order_id", readonly=False)
lot_name = fields.Char('Lot Name')
product_id = fields.Many2one('product.product', related='pos_order_line_id.product_id', readonly=False)
def _export_for_ui(self, lot):
return {
'lot_name': lot.lot_name,
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class ReportSaleDetails(models.AbstractModel):
_name = 'report.point_of_sale.report_saledetails'
_description = 'Point of Sale Details'
@api.model
def get_sale_details(self, date_start=False, date_stop=False, config_ids=False, session_ids=False):
""" Serialise the orders of the requested time period, configs and sessions.
:param date_start: The dateTime to start, default today 00:00:00.
:type date_start: str.
:param date_stop: The dateTime to stop, default date_start + 23:59:59.
:type date_stop: str.
:param config_ids: Pos Config id's to include.
:type config_ids: list of numbers.
:param session_ids: Pos Config id's to include.
:type session_ids: list of numbers.
:returns: dict -- Serialised sales.
"""
domain = [('state', 'in', ['paid','invoiced','done'])]
if (session_ids):
domain = AND([domain, [('session_id', 'in', session_ids)]])
else:
if date_start:
date_start = fields.Datetime.from_string(date_start)
else:
# start by default today 00:00:00
user_tz = pytz.timezone(self.env.context.get('tz') or self.env.user.tz or 'UTC')
today = user_tz.localize(fields.Datetime.from_string(fields.Date.context_today(self)))
date_start = today.astimezone(pytz.timezone('UTC'))
if date_stop:
date_stop = fields.Datetime.from_string(date_stop)
# avoid a date_stop smaller than date_start
if (date_stop < date_start):
date_stop = date_start + timedelta(days=1, seconds=-1)
else:
# stop by default today 23:59:59
date_stop = date_start + timedelta(days=1, seconds=-1)
domain = AND([domain,
[('date_order', '>=', fields.Datetime.to_string(date_start)),
('date_order', '<=', fields.Datetime.to_string(date_stop))]
])
if config_ids:
domain = AND([domain, [('config_id', 'in', config_ids)]])
orders = self.env['pos.order'].search(domain)
user_currency = self.env.company.currency_id
total = 0.0
products_sold = {}
taxes = {}
for order in orders:
if user_currency != order.pricelist_id.currency_id:
total += order.pricelist_id.currency_id._convert(
order.amount_total, user_currency, order.company_id, order.date_order or fields.Date.today())
else:
total += order.amount_total
currency = order.session_id.currency_id
for line in order.lines:
key = (line.product_id, line.price_unit, line.discount)
products_sold.setdefault(key, 0.0)
products_sold[key] += line.qty
if line.tax_ids_after_fiscal_position:
line_taxes = line.tax_ids_after_fiscal_position.sudo().compute_all(line.price_unit * (1-(line.discount or 0.0)/100.0), currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'tax_amount':0.0, 'base_amount':0.0})
taxes[tax['id']]['tax_amount'] += tax['amount']
taxes[tax['id']]['base_amount'] += tax['base']
else:
taxes.setdefault(0, {'name': _('No Taxes'), 'tax_amount':0.0, 'base_amount':0.0})
taxes[0]['base_amount'] += line.price_subtotal_incl
payment_ids = self.env["pos.payment"].search([('pos_order_id', 'in', orders.ids)]).ids
if payment_ids:
self.env.cr.execute("""
SELECT method.name, sum(amount) total
FROM pos_payment AS payment,
pos_payment_method AS method
WHERE payment.payment_method_id = method.id
AND payment.id IN %s
GROUP BY method.name
""", (tuple(payment_ids),))
payments = self.env.cr.dictfetchall()
else:
payments = []
return {
'currency_precision': user_currency.decimal_places,
'total_paid': user_currency.round(total),
'payments': payments,
'company_name': self.env.company.name,
'taxes': list(taxes.values()),
'products': sorted([{
'product_id': product.id,
'product_name': product.name,
'code': product.default_code,
'quantity': qty,
'price_unit': price_unit,
'discount': discount,
'uom': product.uom_id.name
} for (product, price_unit, discount), qty in products_sold.items()], key=lambda l: l['product_name'])
}
@api.model
def _get_report_values(self, docids, data=None):
data = dict(data or {})
configs = self.env['pos.config'].browse(data['config_ids'])
data.update(self.get_sale_details(data['date_start'], data['date_stop'], configs.ids))
return data
class AccountCashRounding(models.Model):
_inherit = 'account.cash.rounding'
@api.constrains('rounding', 'rounding_method', 'strategy')
def _check_session_state(self):
open_session = self.env['pos.session'].search([('config_id.rounding_method', '=', self.id), ('state', '!=', 'closed')])
if open_session:
raise ValidationError(
_("You are not allowed to change the cash rounding configuration while a pos session using it is already opened.")) | en | 0.820083 | # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. # This deals with orders that belong to a closed session. In order # to recover from this situation we create a new rescue session, # making it obvious that something went wrong. # A new, separate, rescue session is preferred for every such recovery, # to avoid adding unrelated orders to live sessions. # avoid conflict with live sessions # bypass opening_control (necessary when using cash control) Create or update an pos.order from a given dictionary. :param dict order: dictionary representing the order. :param bool draft: Indicate that the pos_order is not validated yet. :param existing_order: order to be updated or False. :type existing_order: pos.order. :returns: id of created/updated pos.order :rtype: int # do not hide transactional errors, the order(s) won't be saved! Create account.bank.statement.lines from the dictionary given to the parent function. If the payment_line is an updated version of an existing one, the existing payment_line will first be removed before making a new one. :param pos_order: dictionary representing the order. :type pos_order: dict. :param order: Order object the payment lines should belong to. :type order: pos.order :param pos_session: PoS session the order was created in. :type pos_session: pos.session :param draft: Indicate that the pos_order is not validated yet. :type draft: bool. Does essentially the same thing as `_onchange_amount_all` but only for actually existing records It is intended as a helper method , not as a business one Practical to be used for migrations # data-oe-model=pos.order data-oe-id=%d>%s</a>") % (self.id, self.name) # TODO: add support for mix of cash and non-cash payments when both cash_rounding and only_round_cash_method are True # considering partner's sale pricelist's currency # Force company for all SUPERUSER_ID action # this method is unused, and so is the state 'cancel' Create and update Orders from the frontend PoS application. Create new orders and update orders that are in draft status. If an order already exists with a status diferent from 'draft'it will be discareded, otherwise it will be saved to the database. If saved with 'draft' status the order can be overwritten later by this function. :param orders: dictionary with the orders to be created. :type orders: dict. :param draft: Indicate if the orders are ment to be finalised or temporarily saved. :type draft: bool. :Returns: list -- list of db-ids for the created and updated orders. Create a new payment for the order Create a copy of order for refund order # When a refund is performed, we are creating it in a session having the same config as the original # order. It can be the same session, or if it has been closed the new one that has been opened. Remove orders from the frontend PoS application Remove orders from the server by id. :param server_ids: list of the id's of orders to remove from the server. :type server_ids: list. :returns: list -- list of db-ids for the removed orders. # TODO Looks like delete cascade is a better solution. Search for 'paid' orders that satisfy the given domain, limit and offset. Returns a list of dict with each item having similar signature as the return of `export_as_JSON` of models.Order. This is useful for back-and-forth communication between the pos frontend and backend. # set name based on the sequence specified on the config # fallback on any pos.order.line sequence # Clean up fields sent by the JS This prepares data for refund order line. Inheritance may inject more data here @param refund_order: the pre-created refund order @type refund_order: pos.order @param PosOrderLineLot: the pre-created Pack operation Lot @type PosOrderLineLot: pos.pack.operation.lot @return: dictionary of data which is for creating a refund order line from the original line @rtype: dict # set name based on the sequence specified on the config # fallback on any pos.order sequence Serialise the orders of the requested time period, configs and sessions. :param date_start: The dateTime to start, default today 00:00:00. :type date_start: str. :param date_stop: The dateTime to stop, default date_start + 23:59:59. :type date_stop: str. :param config_ids: Pos Config id's to include. :type config_ids: list of numbers. :param session_ids: Pos Config id's to include. :type session_ids: list of numbers. :returns: dict -- Serialised sales. # start by default today 00:00:00 # avoid a date_stop smaller than date_start # stop by default today 23:59:59 SELECT method.name, sum(amount) total FROM pos_payment AS payment, pos_payment_method AS method WHERE payment.payment_method_id = method.id AND payment.id IN %s GROUP BY method.name | 2.142655 | 2 |
rotkehlchen/tests/external_apis/test_coingecko.py | rotkehlchenio/rotkehlchen | 137 | 6631061 | <filename>rotkehlchen/tests/external_apis/test_coingecko.py
import pytest
from rotkehlchen.assets.asset import EthereumToken
from rotkehlchen.constants.assets import A_BTC, A_ETH, A_EUR, A_YFI
from rotkehlchen.errors.asset import UnsupportedAsset
from rotkehlchen.externalapis.coingecko import CoingeckoAssetData
from rotkehlchen.fval import FVal
from rotkehlchen.types import Price
def assert_coin_data_same(given, expected, compare_description=False):
if compare_description:
assert given == expected
# else
assert given.identifier == expected.identifier
assert given.symbol == expected.symbol
assert given.name == expected.name
assert given.image_url == expected.image_url
def test_asset_data(session_coingecko):
expected_data = CoingeckoAssetData(
identifier='bitcoin',
symbol='btc',
name='Bitcoin',
description='',
image_url='https://assets.coingecko.com/coins/images/1/small/bitcoin.png?1547033579',
)
data = session_coingecko.asset_data(A_BTC)
assert_coin_data_same(data, expected_data)
expected_data = CoingeckoAssetData(
identifier='yearn-finance',
symbol='yfi',
name='yearn.finance',
description='Management token for the yearn.finance ecosystem',
image_url='https://assets.coingecko.com/coins/images/11849/small/yfi-192x192.png?1598325330', # noqa: E501
)
data = session_coingecko.asset_data(A_YFI)
assert_coin_data_same(data, expected_data, compare_description=False)
with pytest.raises(UnsupportedAsset):
session_coingecko.asset_data(EthereumToken('<KEY>')) # PRL, a token without coingecko page # noqa: E501
def test_coingecko_historical_price(session_coingecko):
price = session_coingecko.query_historical_price(
from_asset=A_ETH,
to_asset=A_EUR,
timestamp=1483056100,
)
assert price == Price(FVal('7.7478028375650725'))
| <filename>rotkehlchen/tests/external_apis/test_coingecko.py
import pytest
from rotkehlchen.assets.asset import EthereumToken
from rotkehlchen.constants.assets import A_BTC, A_ETH, A_EUR, A_YFI
from rotkehlchen.errors.asset import UnsupportedAsset
from rotkehlchen.externalapis.coingecko import CoingeckoAssetData
from rotkehlchen.fval import FVal
from rotkehlchen.types import Price
def assert_coin_data_same(given, expected, compare_description=False):
if compare_description:
assert given == expected
# else
assert given.identifier == expected.identifier
assert given.symbol == expected.symbol
assert given.name == expected.name
assert given.image_url == expected.image_url
def test_asset_data(session_coingecko):
expected_data = CoingeckoAssetData(
identifier='bitcoin',
symbol='btc',
name='Bitcoin',
description='',
image_url='https://assets.coingecko.com/coins/images/1/small/bitcoin.png?1547033579',
)
data = session_coingecko.asset_data(A_BTC)
assert_coin_data_same(data, expected_data)
expected_data = CoingeckoAssetData(
identifier='yearn-finance',
symbol='yfi',
name='yearn.finance',
description='Management token for the yearn.finance ecosystem',
image_url='https://assets.coingecko.com/coins/images/11849/small/yfi-192x192.png?1598325330', # noqa: E501
)
data = session_coingecko.asset_data(A_YFI)
assert_coin_data_same(data, expected_data, compare_description=False)
with pytest.raises(UnsupportedAsset):
session_coingecko.asset_data(EthereumToken('<KEY>')) # PRL, a token without coingecko page # noqa: E501
def test_coingecko_historical_price(session_coingecko):
price = session_coingecko.query_historical_price(
from_asset=A_ETH,
to_asset=A_EUR,
timestamp=1483056100,
)
assert price == Price(FVal('7.7478028375650725'))
| en | 0.33574 | # else # noqa: E501 # PRL, a token without coingecko page # noqa: E501 | 2.226618 | 2 |
software/glasgow/applet/video/ws2812_output/__init__.py | electroniceel/Glasgow | 1,014 | 6631062 | <reponame>electroniceel/Glasgow<filename>software/glasgow/applet/video/ws2812_output/__init__.py
import logging
import asyncio
from nmigen import *
from ....support.endpoint import *
from ....gateware.pads import *
from ....gateware.pll import *
from ... import *
class VideoWS2812Output(Elaboratable):
def __init__(self, pads):
self.pads = pads
self.out = Signal(len(pads))
def elaborate(self, platform):
m = Module()
for i, pad in enumerate(self.pads):
m.d.comb += [
pad.oe.eq(1),
pad.o.eq(self.out[i]),
]
return m
class VideoWS2812OutputSubtarget(Elaboratable):
def __init__(self, pads, count, out_fifo):
self.pads = pads
self.count = count
self.out_fifo = out_fifo
def elaborate(self, platform):
# Safe timings:
# bit period needs to be > 1250ns and < 7µs
# 0 bits should be 100 - 500 ns
# 1 bits should be > 750ns and < (period - 200ns)
# reset should be >300µs
sys_clk_freq = platform.default_clk_frequency
t_one = int(1 + sys_clk_freq * 750e-9)
t_period = int(max(1 + sys_clk_freq * 1250e-9, 1 + t_one + sys_clk_freq * 200e-9))
assert t_period / sys_clk_freq < 7000e-9
t_zero = int(1 + sys_clk_freq * 100e-9)
assert t_zero < sys_clk_freq * 500e-9
t_reset = int(1 + sys_clk_freq * 300e-6)
m = Module()
m.submodules.output = output = VideoWS2812Output(self.pads)
cyc_ctr = Signal(range(t_reset+1))
bit_ctr = Signal(range(24))
pix_ctr = Signal(range(self.count+1))
word_ctr = Signal(range(max(2, len(self.pads))))
r = Signal(8)
g = Signal(8)
word = Signal(24 * len(self.pads))
with m.FSM():
with m.State("LOAD-R"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += r.eq(self.out_fifo.r_data)
m.next = "LOAD-G"
with m.State("LOAD-G"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += g.eq(self.out_fifo.r_data)
m.next = "LOAD-B"
with m.State("LOAD-B"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += word.eq(Cat(word[24:] if len(self.pads) > 1 else [], self.out_fifo.r_data, r, g))
with m.If(word_ctr == (len(self.pads) - 1)):
m.next = "SEND-WORD"
with m.Else():
m.d.sync += word_ctr.eq(word_ctr + 1)
m.next = "LOAD-R"
with m.State("SEND-WORD"):
with m.If(cyc_ctr < t_zero):
m.d.comb += output.out.eq((1 << len(self.pads)) - 1)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(cyc_ctr < t_one):
m.d.comb += ( o.eq(word[23 + 24 * i]) for i,o in enumerate(output.out) )
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(cyc_ctr < t_period):
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(bit_ctr < 23):
m.d.comb += output.out.eq(0)
m.d.sync += [
cyc_ctr.eq(0),
bit_ctr.eq(bit_ctr + 1),
word.eq(Cat(0, word[:-1])),
]
with m.Elif(pix_ctr < (self.count - 1)):
m.d.comb += output.out.eq(0)
m.d.sync += [
pix_ctr.eq(pix_ctr + 1),
cyc_ctr.eq(0),
bit_ctr.eq(0),
word_ctr.eq(0),
]
m.next = "LOAD-R"
with m.Else():
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(0)
m.next = "RESET"
with m.State("RESET"):
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.If(cyc_ctr == t_reset):
m.d.sync += [
cyc_ctr.eq(0),
pix_ctr.eq(0),
bit_ctr.eq(0),
word_ctr.eq(0),
]
m.next = "LOAD-R"
return m
class VideoWS2812OutputApplet(GlasgowApplet, name="video-ws2812-output"):
logger = logging.getLogger(__name__)
help = "display video via WS2812 LEDs"
description = """
Output RGB frames from a socket to one or more WS2812(B) LED strings.
"""
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
access.add_pin_set_argument(parser, "out", width=range(1, 17), required=True)
parser.add_argument(
"-c", "--count", metavar="N", type=int, required=True,
help="set the number of LEDs per string")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
subtarget = iface.add_subtarget(VideoWS2812OutputSubtarget(
pads=[iface.get_pin(pin) for pin in args.pin_set_out],
count=args.count,
out_fifo=iface.get_out_fifo(),
))
return subtarget
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument(
"-b", "--buffer", metavar="N", type=int, default=16,
help="set the number of frames to buffer internally (buffered twice)")
async def run(self, device, args):
buffer_size = len(args.pin_set_out) * args.count * 3 * args.buffer
return await device.demultiplexer.claim_interface(self, self.mux_interface, args, write_buffer_size=buffer_size)
@classmethod
def add_interact_arguments(cls, parser):
ServerEndpoint.add_argument(parser, "endpoint")
async def interact(self, device, args, leds):
frame_size = len(args.pin_set_out) * args.count * 3
buffer_size = frame_size * args.buffer
endpoint = await ServerEndpoint("socket", self.logger, args.endpoint, queue_size=buffer_size)
while True:
try:
data = await asyncio.shield(endpoint.recv(buffer_size))
partial = len(data) % frame_size
while partial:
data += await asyncio.shield(endpoint.recv(frame_size - partial))
partial = len(data) % frame_size
await leds.write(data)
await leds.flush(wait=False)
except asyncio.CancelledError:
pass
# -------------------------------------------------------------------------------------------------
class VideoWS2812OutputAppletTestCase(GlasgowAppletTestCase, applet=VideoWS2812OutputApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=["--pins-out", "0:3", "-c", "1024"])
| import logging
import asyncio
from nmigen import *
from ....support.endpoint import *
from ....gateware.pads import *
from ....gateware.pll import *
from ... import *
class VideoWS2812Output(Elaboratable):
def __init__(self, pads):
self.pads = pads
self.out = Signal(len(pads))
def elaborate(self, platform):
m = Module()
for i, pad in enumerate(self.pads):
m.d.comb += [
pad.oe.eq(1),
pad.o.eq(self.out[i]),
]
return m
class VideoWS2812OutputSubtarget(Elaboratable):
def __init__(self, pads, count, out_fifo):
self.pads = pads
self.count = count
self.out_fifo = out_fifo
def elaborate(self, platform):
# Safe timings:
# bit period needs to be > 1250ns and < 7µs
# 0 bits should be 100 - 500 ns
# 1 bits should be > 750ns and < (period - 200ns)
# reset should be >300µs
sys_clk_freq = platform.default_clk_frequency
t_one = int(1 + sys_clk_freq * 750e-9)
t_period = int(max(1 + sys_clk_freq * 1250e-9, 1 + t_one + sys_clk_freq * 200e-9))
assert t_period / sys_clk_freq < 7000e-9
t_zero = int(1 + sys_clk_freq * 100e-9)
assert t_zero < sys_clk_freq * 500e-9
t_reset = int(1 + sys_clk_freq * 300e-6)
m = Module()
m.submodules.output = output = VideoWS2812Output(self.pads)
cyc_ctr = Signal(range(t_reset+1))
bit_ctr = Signal(range(24))
pix_ctr = Signal(range(self.count+1))
word_ctr = Signal(range(max(2, len(self.pads))))
r = Signal(8)
g = Signal(8)
word = Signal(24 * len(self.pads))
with m.FSM():
with m.State("LOAD-R"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += r.eq(self.out_fifo.r_data)
m.next = "LOAD-G"
with m.State("LOAD-G"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += g.eq(self.out_fifo.r_data)
m.next = "LOAD-B"
with m.State("LOAD-B"):
m.d.comb += [
self.out_fifo.r_en.eq(1),
output.out.eq(0),
]
with m.If(self.out_fifo.r_rdy):
m.d.sync += word.eq(Cat(word[24:] if len(self.pads) > 1 else [], self.out_fifo.r_data, r, g))
with m.If(word_ctr == (len(self.pads) - 1)):
m.next = "SEND-WORD"
with m.Else():
m.d.sync += word_ctr.eq(word_ctr + 1)
m.next = "LOAD-R"
with m.State("SEND-WORD"):
with m.If(cyc_ctr < t_zero):
m.d.comb += output.out.eq((1 << len(self.pads)) - 1)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(cyc_ctr < t_one):
m.d.comb += ( o.eq(word[23 + 24 * i]) for i,o in enumerate(output.out) )
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(cyc_ctr < t_period):
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.Elif(bit_ctr < 23):
m.d.comb += output.out.eq(0)
m.d.sync += [
cyc_ctr.eq(0),
bit_ctr.eq(bit_ctr + 1),
word.eq(Cat(0, word[:-1])),
]
with m.Elif(pix_ctr < (self.count - 1)):
m.d.comb += output.out.eq(0)
m.d.sync += [
pix_ctr.eq(pix_ctr + 1),
cyc_ctr.eq(0),
bit_ctr.eq(0),
word_ctr.eq(0),
]
m.next = "LOAD-R"
with m.Else():
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(0)
m.next = "RESET"
with m.State("RESET"):
m.d.comb += output.out.eq(0)
m.d.sync += cyc_ctr.eq(cyc_ctr + 1)
with m.If(cyc_ctr == t_reset):
m.d.sync += [
cyc_ctr.eq(0),
pix_ctr.eq(0),
bit_ctr.eq(0),
word_ctr.eq(0),
]
m.next = "LOAD-R"
return m
class VideoWS2812OutputApplet(GlasgowApplet, name="video-ws2812-output"):
logger = logging.getLogger(__name__)
help = "display video via WS2812 LEDs"
description = """
Output RGB frames from a socket to one or more WS2812(B) LED strings.
"""
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
access.add_pin_set_argument(parser, "out", width=range(1, 17), required=True)
parser.add_argument(
"-c", "--count", metavar="N", type=int, required=True,
help="set the number of LEDs per string")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
subtarget = iface.add_subtarget(VideoWS2812OutputSubtarget(
pads=[iface.get_pin(pin) for pin in args.pin_set_out],
count=args.count,
out_fifo=iface.get_out_fifo(),
))
return subtarget
@classmethod
def add_run_arguments(cls, parser, access):
super().add_run_arguments(parser, access)
parser.add_argument(
"-b", "--buffer", metavar="N", type=int, default=16,
help="set the number of frames to buffer internally (buffered twice)")
async def run(self, device, args):
buffer_size = len(args.pin_set_out) * args.count * 3 * args.buffer
return await device.demultiplexer.claim_interface(self, self.mux_interface, args, write_buffer_size=buffer_size)
@classmethod
def add_interact_arguments(cls, parser):
ServerEndpoint.add_argument(parser, "endpoint")
async def interact(self, device, args, leds):
frame_size = len(args.pin_set_out) * args.count * 3
buffer_size = frame_size * args.buffer
endpoint = await ServerEndpoint("socket", self.logger, args.endpoint, queue_size=buffer_size)
while True:
try:
data = await asyncio.shield(endpoint.recv(buffer_size))
partial = len(data) % frame_size
while partial:
data += await asyncio.shield(endpoint.recv(frame_size - partial))
partial = len(data) % frame_size
await leds.write(data)
await leds.flush(wait=False)
except asyncio.CancelledError:
pass
# -------------------------------------------------------------------------------------------------
class VideoWS2812OutputAppletTestCase(GlasgowAppletTestCase, applet=VideoWS2812OutputApplet):
@synthesis_test
def test_build(self):
self.assertBuilds(args=["--pins-out", "0:3", "-c", "1024"]) | en | 0.547233 | # Safe timings: # bit period needs to be > 1250ns and < 7µs # 0 bits should be 100 - 500 ns # 1 bits should be > 750ns and < (period - 200ns) # reset should be >300µs Output RGB frames from a socket to one or more WS2812(B) LED strings. # ------------------------------------------------------------------------------------------------- | 2.225807 | 2 |
Bio/AlignIO/FastaIO.py | ttung/biopython | 0 | 6631063 | # Copyright 2008-2016 by <NAME>. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.AlignIO support for "fasta-m10" output from <NAME>'s FASTA tools.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
This module contains a parser for the pairwise alignments produced by <NAME> FASTA tools, for use from the Bio.AlignIO interface where it is
referred to as the "fasta-m10" file format (as we only support the machine
readable output format selected with the -m 10 command line option).
This module does NOT cover the generic "fasta" file format originally
developed as an input format to the FASTA tools. The Bio.AlignIO and
Bio.SeqIO both use the Bio.SeqIO.FastaIO module to deal with these files,
which can also be used to store a multiple sequence alignments.
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet, generic_dna, generic_protein
def _extract_alignment_region(alignment_seq_with_flanking, annotation):
"""Extract alignment region (PRIVATE).
Helper function for the main parsing code.
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are prsent as a result of using the -X command line option.
"""
align_stripped = alignment_seq_with_flanking.strip("-")
display_start = int(annotation["al_display_start"])
if int(annotation["al_start"]) <= int(annotation["al_stop"]):
start = int(annotation["al_start"]) - display_start
end = int(annotation["al_stop"]) - display_start + 1
else:
# FASTA has flipped this sequence...
start = display_start - int(annotation["al_start"])
end = display_start - int(annotation["al_stop"]) + 1
end += align_stripped.count("-")
if start < 0 or start >= end or end > len(align_stripped):
raise ValueError(
"Problem with sequence start/stop,\n%s[%i:%i]\n%s"
% (alignment_seq_with_flanking, start, end, annotation)
)
return align_stripped[start:end]
def FastaM10Iterator(handle, alphabet=single_letter_alphabet):
"""Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by <NAME>'s
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
<NAME> & <NAME> PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned.
"""
if alphabet is None:
alphabet = single_letter_alphabet
state_PREAMBLE = -1
state_NONE = 0
state_QUERY_HEADER = 1
state_ALIGN_HEADER = 2
state_ALIGN_QUERY = 3
state_ALIGN_MATCH = 4
state_ALIGN_CONS = 5
def build_hsp():
if not query_tags and not match_tags:
raise ValueError("No data for query %r, match %r" % (query_id, match_id))
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect")
q = "?" # Just for printing len(q) in debug below
m = "?" # Just for printing len(m) in debug below
tool = global_tags.get("tool", "").upper()
q = _extract_alignment_region(query_seq, query_tags)
if tool in ["TFASTX"] and len(match_seq) == len(q):
m = match_seq
# Quick hack until I can work out how -, * and / characters
# and the apparent mix of aa and bp coordinates works.
else:
m = _extract_alignment_region(match_seq, match_tags)
if len(q) != len(m):
raise ValueError(
f"""\
Darn... amino acids vs nucleotide coordinates?
tool: {tool}
query_seq: {query_seq}
query_tags: {query_tags}
{q} length: {len(q)}
match_seq: {match_seq}
match_tags: {match_tags}
{m} length: {len(m)}
handle.name: {handle.name}
"""
)
assert alphabet is not None
alignment = MultipleSeqAlignment([], alphabet)
# TODO - Introduce an annotated alignment class?
# See also Bio/AlignIO/MafIO.py for same requirement.
# For now, store the annotation a new private property:
alignment._annotations = {}
# Want to record both the query header tags, and the alignment tags.
for key, value in header_tags.items():
alignment._annotations[key] = value
for key, value in align_tags.items():
alignment._annotations[key] = value
# Query
# =====
record = SeqRecord(
Seq(q, alphabet),
id=query_id,
name="query",
description=query_descr,
annotations={"original_length": int(query_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(query_tags["al_start"])
record._al_stop = int(query_tags["al_stop"])
alignment.append(record)
# TODO - What if a specific alphabet has been requested?
# TODO - Can FASTA output RNA?
if alphabet == single_letter_alphabet and "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif query_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.annotations["molecule_type"] = "DNA"
elif query_tags["sq_type"] == "p":
record.annotations["molecule_type"] = "protein"
# Match
# =====
record = SeqRecord(
Seq(m, alphabet),
id=match_id,
name="match",
description=match_descr,
annotations={"original_length": int(match_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(match_tags["al_start"])
record._al_stop = int(match_tags["al_stop"])
alignment.append(record)
# This is still a very crude way of dealing with the alphabet:
if alphabet == single_letter_alphabet and "sq_type" in match_tags:
if match_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif match_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
return alignment
state = state_PREAMBLE
query_id = None
match_id = None
query_descr = ""
match_descr = ""
global_tags = {}
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
for line in handle:
if ">>>" in line and not line.startswith(">>>"):
if query_id and match_id:
# This happens on old FASTA output which lacked an end of
# query >>><<< marker line.
yield build_hsp()
state = state_NONE
query_descr = line[line.find(">>>") + 3 :].strip()
query_id = query_descr.split(None, 1)[0]
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith("!! No "):
# e.g.
# !! No library sequences with E() < 0.5
# or on more recent versions,
# No sequences with E() < 0.05
assert state == state_NONE
assert not header_tags
assert not align_tags
assert not match_tags
assert not query_tags
assert match_id is None
assert not query_seq
assert not match_seq
assert not cons_seq
query_id = None
elif line.strip() in [">>><<<", ">>>///"]:
# End of query, possible end of all queries
if query_id and match_id:
yield build_hsp()
state = state_NONE
query_id = None
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith(">>>"):
# Should be start of a match!
assert query_id is not None
assert line[3:].split(", ", 1)[0] == query_id, line
assert match_id is None
assert not header_tags
assert not align_tags
assert not query_tags
assert not match_tags
assert not match_seq
assert not query_seq
assert not cons_seq
state = state_QUERY_HEADER
elif line.startswith(">>"):
# Should now be at start of a match alignment!
if query_id and match_id:
yield build_hsp()
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
match_descr = line[2:].strip()
match_id = match_descr.split(None, 1)[0]
state = state_ALIGN_HEADER
elif line.startswith(">--"):
# End of one HSP
assert query_id and match_id, line
yield build_hsp()
# Clean up read for next HSP
# but reuse header_tags
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
state = state_ALIGN_HEADER
elif line.startswith(">"):
if state == state_ALIGN_HEADER:
# Should be start of query alignment seq...
assert query_id is not None, line
assert match_id is not None, line
assert query_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_QUERY
elif state == state_ALIGN_QUERY:
# Should be start of match alignment seq
assert query_id is not None, line
assert match_id is not None, line
assert match_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_MATCH
elif state == state_NONE:
# Can get > as the last line of a histogram
pass
else:
raise RuntimeError("state %i got %r" % (state, line))
elif line.startswith("; al_cons"):
assert state == state_ALIGN_MATCH, line
state = state_ALIGN_CONS
# Next line(s) should be consensus seq...
elif line.startswith("; "):
if ": " in line:
key, value = [s.strip() for s in line[2:].split(": ", 1)]
else:
import warnings
from Bio import BiopythonParserWarning
# Seen in lalign36, specifically version 36.3.4 Apr, 2011
# Fixed in version 36.3.5b Oct, 2011(preload8)
warnings.warn(
"Missing colon in line: %r" % line, BiopythonParserWarning
)
try:
key, value = [s.strip() for s in line[2:].split(" ", 1)]
except ValueError:
raise ValueError("Bad line: %r" % line) from None
if state == state_QUERY_HEADER:
header_tags[key] = value
elif state == state_ALIGN_HEADER:
align_tags[key] = value
elif state == state_ALIGN_QUERY:
query_tags[key] = value
elif state == state_ALIGN_MATCH:
match_tags[key] = value
else:
raise RuntimeError("Unexpected state %r, %r" % (state, line))
elif state == state_ALIGN_QUERY:
query_seq += line.strip()
elif state == state_ALIGN_MATCH:
match_seq += line.strip()
elif state == state_ALIGN_CONS:
cons_seq += line.strip("\n")
elif state == state_PREAMBLE:
if line.startswith("#"):
global_tags["command"] = line[1:].strip()
elif line.startswith(" version "):
global_tags["version"] = line[9:].strip()
elif " compares a " in line:
global_tags["tool"] = line[: line.find(" compares a ")].strip()
elif " searches a " in line:
global_tags["tool"] = line[: line.find(" searches a ")].strip()
else:
pass
| # Copyright 2008-2016 by <NAME>. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.AlignIO support for "fasta-m10" output from <NAME>'s FASTA tools.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
This module contains a parser for the pairwise alignments produced by <NAME> FASTA tools, for use from the Bio.AlignIO interface where it is
referred to as the "fasta-m10" file format (as we only support the machine
readable output format selected with the -m 10 command line option).
This module does NOT cover the generic "fasta" file format originally
developed as an input format to the FASTA tools. The Bio.AlignIO and
Bio.SeqIO both use the Bio.SeqIO.FastaIO module to deal with these files,
which can also be used to store a multiple sequence alignments.
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet, generic_dna, generic_protein
def _extract_alignment_region(alignment_seq_with_flanking, annotation):
"""Extract alignment region (PRIVATE).
Helper function for the main parsing code.
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are prsent as a result of using the -X command line option.
"""
align_stripped = alignment_seq_with_flanking.strip("-")
display_start = int(annotation["al_display_start"])
if int(annotation["al_start"]) <= int(annotation["al_stop"]):
start = int(annotation["al_start"]) - display_start
end = int(annotation["al_stop"]) - display_start + 1
else:
# FASTA has flipped this sequence...
start = display_start - int(annotation["al_start"])
end = display_start - int(annotation["al_stop"]) + 1
end += align_stripped.count("-")
if start < 0 or start >= end or end > len(align_stripped):
raise ValueError(
"Problem with sequence start/stop,\n%s[%i:%i]\n%s"
% (alignment_seq_with_flanking, start, end, annotation)
)
return align_stripped[start:end]
def FastaM10Iterator(handle, alphabet=single_letter_alphabet):
"""Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by <NAME>'s
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
<NAME> & <NAME> PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned.
"""
if alphabet is None:
alphabet = single_letter_alphabet
state_PREAMBLE = -1
state_NONE = 0
state_QUERY_HEADER = 1
state_ALIGN_HEADER = 2
state_ALIGN_QUERY = 3
state_ALIGN_MATCH = 4
state_ALIGN_CONS = 5
def build_hsp():
if not query_tags and not match_tags:
raise ValueError("No data for query %r, match %r" % (query_id, match_id))
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect")
q = "?" # Just for printing len(q) in debug below
m = "?" # Just for printing len(m) in debug below
tool = global_tags.get("tool", "").upper()
q = _extract_alignment_region(query_seq, query_tags)
if tool in ["TFASTX"] and len(match_seq) == len(q):
m = match_seq
# Quick hack until I can work out how -, * and / characters
# and the apparent mix of aa and bp coordinates works.
else:
m = _extract_alignment_region(match_seq, match_tags)
if len(q) != len(m):
raise ValueError(
f"""\
Darn... amino acids vs nucleotide coordinates?
tool: {tool}
query_seq: {query_seq}
query_tags: {query_tags}
{q} length: {len(q)}
match_seq: {match_seq}
match_tags: {match_tags}
{m} length: {len(m)}
handle.name: {handle.name}
"""
)
assert alphabet is not None
alignment = MultipleSeqAlignment([], alphabet)
# TODO - Introduce an annotated alignment class?
# See also Bio/AlignIO/MafIO.py for same requirement.
# For now, store the annotation a new private property:
alignment._annotations = {}
# Want to record both the query header tags, and the alignment tags.
for key, value in header_tags.items():
alignment._annotations[key] = value
for key, value in align_tags.items():
alignment._annotations[key] = value
# Query
# =====
record = SeqRecord(
Seq(q, alphabet),
id=query_id,
name="query",
description=query_descr,
annotations={"original_length": int(query_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(query_tags["al_start"])
record._al_stop = int(query_tags["al_stop"])
alignment.append(record)
# TODO - What if a specific alphabet has been requested?
# TODO - Can FASTA output RNA?
if alphabet == single_letter_alphabet and "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif query_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
if "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.annotations["molecule_type"] = "DNA"
elif query_tags["sq_type"] == "p":
record.annotations["molecule_type"] = "protein"
# Match
# =====
record = SeqRecord(
Seq(m, alphabet),
id=match_id,
name="match",
description=match_descr,
annotations={"original_length": int(match_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(match_tags["al_start"])
record._al_stop = int(match_tags["al_stop"])
alignment.append(record)
# This is still a very crude way of dealing with the alphabet:
if alphabet == single_letter_alphabet and "sq_type" in match_tags:
if match_tags["sq_type"] == "D":
record.seq.alphabet = generic_dna
elif match_tags["sq_type"] == "p":
record.seq.alphabet = generic_protein
return alignment
state = state_PREAMBLE
query_id = None
match_id = None
query_descr = ""
match_descr = ""
global_tags = {}
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
for line in handle:
if ">>>" in line and not line.startswith(">>>"):
if query_id and match_id:
# This happens on old FASTA output which lacked an end of
# query >>><<< marker line.
yield build_hsp()
state = state_NONE
query_descr = line[line.find(">>>") + 3 :].strip()
query_id = query_descr.split(None, 1)[0]
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith("!! No "):
# e.g.
# !! No library sequences with E() < 0.5
# or on more recent versions,
# No sequences with E() < 0.05
assert state == state_NONE
assert not header_tags
assert not align_tags
assert not match_tags
assert not query_tags
assert match_id is None
assert not query_seq
assert not match_seq
assert not cons_seq
query_id = None
elif line.strip() in [">>><<<", ">>>///"]:
# End of query, possible end of all queries
if query_id and match_id:
yield build_hsp()
state = state_NONE
query_id = None
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith(">>>"):
# Should be start of a match!
assert query_id is not None
assert line[3:].split(", ", 1)[0] == query_id, line
assert match_id is None
assert not header_tags
assert not align_tags
assert not query_tags
assert not match_tags
assert not match_seq
assert not query_seq
assert not cons_seq
state = state_QUERY_HEADER
elif line.startswith(">>"):
# Should now be at start of a match alignment!
if query_id and match_id:
yield build_hsp()
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
match_descr = line[2:].strip()
match_id = match_descr.split(None, 1)[0]
state = state_ALIGN_HEADER
elif line.startswith(">--"):
# End of one HSP
assert query_id and match_id, line
yield build_hsp()
# Clean up read for next HSP
# but reuse header_tags
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
state = state_ALIGN_HEADER
elif line.startswith(">"):
if state == state_ALIGN_HEADER:
# Should be start of query alignment seq...
assert query_id is not None, line
assert match_id is not None, line
assert query_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_QUERY
elif state == state_ALIGN_QUERY:
# Should be start of match alignment seq
assert query_id is not None, line
assert match_id is not None, line
assert match_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_MATCH
elif state == state_NONE:
# Can get > as the last line of a histogram
pass
else:
raise RuntimeError("state %i got %r" % (state, line))
elif line.startswith("; al_cons"):
assert state == state_ALIGN_MATCH, line
state = state_ALIGN_CONS
# Next line(s) should be consensus seq...
elif line.startswith("; "):
if ": " in line:
key, value = [s.strip() for s in line[2:].split(": ", 1)]
else:
import warnings
from Bio import BiopythonParserWarning
# Seen in lalign36, specifically version 36.3.4 Apr, 2011
# Fixed in version 36.3.5b Oct, 2011(preload8)
warnings.warn(
"Missing colon in line: %r" % line, BiopythonParserWarning
)
try:
key, value = [s.strip() for s in line[2:].split(" ", 1)]
except ValueError:
raise ValueError("Bad line: %r" % line) from None
if state == state_QUERY_HEADER:
header_tags[key] = value
elif state == state_ALIGN_HEADER:
align_tags[key] = value
elif state == state_ALIGN_QUERY:
query_tags[key] = value
elif state == state_ALIGN_MATCH:
match_tags[key] = value
else:
raise RuntimeError("Unexpected state %r, %r" % (state, line))
elif state == state_ALIGN_QUERY:
query_seq += line.strip()
elif state == state_ALIGN_MATCH:
match_seq += line.strip()
elif state == state_ALIGN_CONS:
cons_seq += line.strip("\n")
elif state == state_PREAMBLE:
if line.startswith("#"):
global_tags["command"] = line[1:].strip()
elif line.startswith(" version "):
global_tags["version"] = line[9:].strip()
elif " compares a " in line:
global_tags["tool"] = line[: line.find(" compares a ")].strip()
elif " searches a " in line:
global_tags["tool"] = line[: line.find(" searches a ")].strip()
else:
pass
| en | 0.847083 | # Copyright 2008-2016 by <NAME>. All rights reserved. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. Bio.AlignIO support for "fasta-m10" output from <NAME>'s FASTA tools. You are expected to use this module via the Bio.AlignIO functions (or the Bio.SeqIO functions if you want to work directly with the gapped sequences). This module contains a parser for the pairwise alignments produced by <NAME> FASTA tools, for use from the Bio.AlignIO interface where it is referred to as the "fasta-m10" file format (as we only support the machine readable output format selected with the -m 10 command line option). This module does NOT cover the generic "fasta" file format originally developed as an input format to the FASTA tools. The Bio.AlignIO and Bio.SeqIO both use the Bio.SeqIO.FastaIO module to deal with these files, which can also be used to store a multiple sequence alignments. Extract alignment region (PRIVATE). Helper function for the main parsing code. To get the actual pairwise alignment sequences, we must first translate the un-gapped sequence based coordinates into positions in the gapped sequence (which may have a flanking region shown using leading - characters). To date, I have never seen any trailing flanking region shown in the m10 file, but the following code should also cope with that. Note that this code seems to work fine even when the "sq_offset" entries are prsent as a result of using the -X command line option. # FASTA has flipped this sequence... Alignment iterator for the FASTA tool's pairwise alignment output. This is for reading the pairwise alignments output by <NAME>'s FASTA program when called with the -m 10 command line option for machine readable output. For more details about the FASTA tools, see the website http://fasta.bioch.virginia.edu/ and the paper: <NAME> & <NAME> PNAS (1988) 85:2444-2448 This class is intended to be used via the Bio.AlignIO.parse() function by specifying the format as "fasta-m10" as shown in the following code:: from Bio import AlignIO handle = ... for a in AlignIO.parse(handle, "fasta-m10"): assert len(a) == 2, "Should be pairwise!" print("Alignment length %i" % a.get_alignment_length()) for record in a: print("%s %s %s" % (record.seq, record.name, record.id)) Note that this is not a full blown parser for all the information in the FASTA output - for example, most of the header and all of the footer is ignored. Also, the alignments are not batched according to the input queries. Also note that there can be up to about 30 letters of flanking region included in the raw FASTA output as contextual information. This is NOT part of the alignment itself, and is not included in the resulting MultipleSeqAlignment objects returned. # Just for printing len(q) in debug below # Just for printing len(m) in debug below # Quick hack until I can work out how -, * and / characters # and the apparent mix of aa and bp coordinates works. \ Darn... amino acids vs nucleotide coordinates? tool: {tool} query_seq: {query_seq} query_tags: {query_tags} {q} length: {len(q)} match_seq: {match_seq} match_tags: {match_tags} {m} length: {len(m)} handle.name: {handle.name} # TODO - Introduce an annotated alignment class? # See also Bio/AlignIO/MafIO.py for same requirement. # For now, store the annotation a new private property: # Want to record both the query header tags, and the alignment tags. # Query # ===== # TODO - handle start/end coordinates properly. Short term hack for now: # TODO - What if a specific alphabet has been requested? # TODO - Can FASTA output RNA? # Match # ===== # TODO - handle start/end coordinates properly. Short term hack for now: # This is still a very crude way of dealing with the alphabet: # This happens on old FASTA output which lacked an end of # query >>><<< marker line. # e.g. # !! No library sequences with E() < 0.5 # or on more recent versions, # No sequences with E() < 0.05 # End of query, possible end of all queries # Should be start of a match! # Should now be at start of a match alignment! # End of one HSP # Clean up read for next HSP # but reuse header_tags # Should be start of query alignment seq... # Should be start of match alignment seq # Can get > as the last line of a histogram # Next line(s) should be consensus seq... # Seen in lalign36, specifically version 36.3.4 Apr, 2011 # Fixed in version 36.3.5b Oct, 2011(preload8) | 2.732175 | 3 |
sign-language-hand-gesture-recognition/classify_webcam.py | kriti3108/CoC_PeRKS | 1 | 6631064 | <reponame>kriti3108/CoC_PeRKS
import sys
import os
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import copy
import cv2
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sequence=""
def predict(image_data):
sess = tf.Session()
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
max_score = 0.0
res = ''
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
if score > max_score:
max_score = score
res = human_string
return res, max_score
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("logs/trained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("logs/trained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
c = 0
cap = cv2.VideoCapture(-1)
res, score = '', 0.0
i = 0
mem = ''
consecutive = 0
sequence = ''
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
if ret:
x1, y1, x2, y2 = 100, 100, 300, 300
img_cropped = img[y1:y2, x1:x2]
c += 1
image_data = cv2.imencode('.jpg', img_cropped)[1].tostring()
a = cv2.waitKey(1) # waits to see if `esc` is pressed
if i == 4:
res_tmp, score = predict(image_data)
res = res_tmp
i = 0
if mem == res:
consecutive += 1
else:
consecutive = 0
if consecutive == 2 and res not in ['nothing']:
if res == 'space':
sequence += ' '
elif res == 'del':
sequence = sequence[:-1]
else:
sequence += res
consecutive = 0
i += 1
cv2.putText(img, '%s' % (res.upper()), (100,400), cv2.FONT_HERSHEY_SIMPLEX, 4, (255,255,255), 4)
cv2.putText(img, '(score = %.5f)' % (float(score)), (100,450), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
mem = res
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2)
cv2.imshow("img", img)
img_sequence = np.zeros((200,1200,3), np.uint8)
cv2.putText(img_sequence, '%s' % (sequence.upper()), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
cv2.imshow('sequence', img_sequence)
if a == 27: # when `esc` is pressed
break
def function():
sequence=sequence
print(sequence)
# Following line should... <-- This should work fine now
cap.release()
cv2.destroyAllWindows()
| import sys
import os
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import copy
import cv2
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sequence=""
def predict(image_data):
sess = tf.Session()
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
max_score = 0.0
res = ''
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
if score > max_score:
max_score = score
res = human_string
return res, max_score
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("logs/trained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("logs/trained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
c = 0
cap = cv2.VideoCapture(-1)
res, score = '', 0.0
i = 0
mem = ''
consecutive = 0
sequence = ''
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
if ret:
x1, y1, x2, y2 = 100, 100, 300, 300
img_cropped = img[y1:y2, x1:x2]
c += 1
image_data = cv2.imencode('.jpg', img_cropped)[1].tostring()
a = cv2.waitKey(1) # waits to see if `esc` is pressed
if i == 4:
res_tmp, score = predict(image_data)
res = res_tmp
i = 0
if mem == res:
consecutive += 1
else:
consecutive = 0
if consecutive == 2 and res not in ['nothing']:
if res == 'space':
sequence += ' '
elif res == 'del':
sequence = sequence[:-1]
else:
sequence += res
consecutive = 0
i += 1
cv2.putText(img, '%s' % (res.upper()), (100,400), cv2.FONT_HERSHEY_SIMPLEX, 4, (255,255,255), 4)
cv2.putText(img, '(score = %.5f)' % (float(score)), (100,450), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
mem = res
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2)
cv2.imshow("img", img)
img_sequence = np.zeros((200,1200,3), np.uint8)
cv2.putText(img_sequence, '%s' % (sequence.upper()), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
cv2.imshow('sequence', img_sequence)
if a == 27: # when `esc` is pressed
break
def function():
sequence=sequence
print(sequence)
# Following line should... <-- This should work fine now
cap.release()
cv2.destroyAllWindows() | en | 0.839301 | # Disable tensorflow compilation warnings # Sort to show labels of first prediction in order of confidence # Loads label file, strips off carriage return # Unpersists graph from file # Feed the image_data as input to the graph and get first prediction # waits to see if `esc` is pressed # when `esc` is pressed # Following line should... <-- This should work fine now | 2.617869 | 3 |
serve.py | chughtaimh/slack-slash-commands | 0 | 6631065 | from app import app
def main():
"""Runs webservice"""
from paste import httpserver
httpserver.serve(app, host='127.0.0.1', port='8080')
if __name__ == '__main__':
main()
| from app import app
def main():
"""Runs webservice"""
from paste import httpserver
httpserver.serve(app, host='127.0.0.1', port='8080')
if __name__ == '__main__':
main()
| de | 0.808817 | Runs webservice | 2.101086 | 2 |
pypubtrack/__init__.py | the16thpythonist/pypubtrack | 0 | 6631066 | """Top-level package for pypubtrack."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from pypubtrack.pypubtrack import Pubtrack
| """Top-level package for pypubtrack."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from pypubtrack.pypubtrack import Pubtrack
| en | 0.67701 | Top-level package for pypubtrack. <NAME> | 1.213774 | 1 |
src/booking/urls.py | dcosentino/django-booking | 1 | 6631067 | <filename>src/booking/urls.py
#-*- coding: utf-8 -*-
from django.conf.urls import *
from views import *
urlpatterns = [
]
| <filename>src/booking/urls.py
#-*- coding: utf-8 -*-
from django.conf.urls import *
from views import *
urlpatterns = [
]
| en | 0.636498 | #-*- coding: utf-8 -*- | 1.172117 | 1 |
evaluation/dpbench/metrics/pmse.py | Tecnarca/patectsdgym | 2 | 6631068 | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
def pmse_ratio(data, synthetic_data):
"""
In order to determine how similar the synthetic and real data are
to each other (general quality of synthetic) we can train a
discriminator to attempt to distinguish between real and
synthetic. The poorer the performance of the discriminator, the
more similar the two datasets are.
From "Really Useful Synthetic Data
A Framework To Evaluate The Quality Of
Differentially Private Synthetic Data"
https://arxiv.org/pdf/2004.07740.pdf
:param data: Original data
:type data: pandas DataFrame
:param synthetic_data: Synthetic data we are analyzing
:type synthetic_data: pandas DataFrame
:return: ratio (pmse score)
:rtype: float
"""
n1 = data.shape[0]
n2 = synthetic_data.shape[0]
comb = pd.concat([data, synthetic_data], axis=0, keys=[0, 1]).reset_index(level=[0]).rename(columns={'level_0': 'indicator'})
X_comb = comb.drop('indicator', axis=1)
y_comb = comb['indicator']
X_train, X_test, y_train, y_test = train_test_split(X_comb, y_comb, test_size=0.33, random_state=42)
clf = LogisticRegression(random_state=0).fit(X_train, y_train)
score = clf.predict_proba(X_comb)[:, 1]
observed_utility = sum((score - n2/(n1+n2))**2) / (n1+n2)
expected_utility = clf.coef_.shape[1] * (n1/(n1+n2))**2 * (n2/(n1+n2)) / (n1+n2)
ratio = observed_utility / expected_utility
return ratio | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
def pmse_ratio(data, synthetic_data):
"""
In order to determine how similar the synthetic and real data are
to each other (general quality of synthetic) we can train a
discriminator to attempt to distinguish between real and
synthetic. The poorer the performance of the discriminator, the
more similar the two datasets are.
From "Really Useful Synthetic Data
A Framework To Evaluate The Quality Of
Differentially Private Synthetic Data"
https://arxiv.org/pdf/2004.07740.pdf
:param data: Original data
:type data: pandas DataFrame
:param synthetic_data: Synthetic data we are analyzing
:type synthetic_data: pandas DataFrame
:return: ratio (pmse score)
:rtype: float
"""
n1 = data.shape[0]
n2 = synthetic_data.shape[0]
comb = pd.concat([data, synthetic_data], axis=0, keys=[0, 1]).reset_index(level=[0]).rename(columns={'level_0': 'indicator'})
X_comb = comb.drop('indicator', axis=1)
y_comb = comb['indicator']
X_train, X_test, y_train, y_test = train_test_split(X_comb, y_comb, test_size=0.33, random_state=42)
clf = LogisticRegression(random_state=0).fit(X_train, y_train)
score = clf.predict_proba(X_comb)[:, 1]
observed_utility = sum((score - n2/(n1+n2))**2) / (n1+n2)
expected_utility = clf.coef_.shape[1] * (n1/(n1+n2))**2 * (n2/(n1+n2)) / (n1+n2)
ratio = observed_utility / expected_utility
return ratio | en | 0.814994 | In order to determine how similar the synthetic and real data are to each other (general quality of synthetic) we can train a discriminator to attempt to distinguish between real and synthetic. The poorer the performance of the discriminator, the more similar the two datasets are. From "Really Useful Synthetic Data A Framework To Evaluate The Quality Of Differentially Private Synthetic Data" https://arxiv.org/pdf/2004.07740.pdf :param data: Original data :type data: pandas DataFrame :param synthetic_data: Synthetic data we are analyzing :type synthetic_data: pandas DataFrame :return: ratio (pmse score) :rtype: float | 3.355882 | 3 |
scripts/experiments/multiple-scripts/01_train_models.py | timothygebhard/hsr4hci | 1 | 6631069 | <filename>scripts/experiments/multiple-scripts/01_train_models.py
"""
Train a collection of half-sibling regression models.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from pathlib import Path
import argparse
import os
import time
from astropy.units import Quantity
from hsr4hci.base_models import BaseModelCreator
from hsr4hci.config import load_config
from hsr4hci.data import load_dataset
from hsr4hci.hdf import save_dict_to_hdf, create_hdf_dir
from hsr4hci.masking import get_roi_mask
from hsr4hci.training import train_all_models
from hsr4hci.units import InstrumentUnitsContext
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# -------------------------------------------------------------------------
# Preliminaries
# -------------------------------------------------------------------------
script_start = time.time()
print('\nTRAIN HALF-SIBLING REGRESSION MODELS\n', flush=True)
# -------------------------------------------------------------------------
# Set up parser to get command line arguments
# -------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
'--experiment-dir',
type=str,
required=True,
metavar='PATH',
help='(Absolute) path to experiment directory.',
)
parser.add_argument(
'--roi-split',
type=int,
default=0,
help='Index of the split to process; must be in [0, n_roi_splits).',
)
parser.add_argument(
'--n-roi-splits',
type=int,
default=1,
help=(
'Number of splits into which the ROI is divided (e.g., for '
'parallel training).'
),
)
parser.add_argument(
'--hdf-location',
type=str,
choices=['local', 'work'],
default='local',
help=(
'Where to create the HDF directory: "locally" or on /work. '
'Note: Unless you are the author of this code and are running an '
'experiment on the cluster at the MPI-IS, you *always* want to '
'use "local" here!'
),
)
args = parser.parse_args()
# -------------------------------------------------------------------------
# Load experiment configuration and data
# -------------------------------------------------------------------------
# Get experiment directory
experiment_dir = Path(os.path.expanduser(args.experiment_dir))
if not experiment_dir.exists():
raise NotADirectoryError(f'{experiment_dir} does not exist!')
# Load experiment config from JSON
print('Loading experiment configuration...', end=' ', flush=True)
config = load_config(experiment_dir / 'config.json')
print('Done!', flush=True)
# Load frames, parallactic angles, etc. from HDF file
print('Loading data set...', end=' ', flush=True)
stack, parang, psf_template, observing_conditions, metadata = load_dataset(
**config['dataset']
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Prepare directory for saving HDF files (*before* training)
# -------------------------------------------------------------------------
# Create HDF directory (either locally or on /work)
create_on_work = args.hdf_location == 'work'
hdf_dir = create_hdf_dir(experiment_dir, create_on_work=create_on_work)
# Create a directory for the partial result files (on /work)
partial_dir = hdf_dir / 'partial'
partial_dir.mkdir(exist_ok=True, parents=True)
# -------------------------------------------------------------------------
# Define various useful shortcuts; activate unit conversions
# -------------------------------------------------------------------------
# Quantities related to the size of the data set
n_frames, x_size, y_size = stack.shape
frame_size = (x_size, y_size)
# Metadata of the data set
pixscale = float(metadata['PIXSCALE'])
lambda_over_d = float(metadata['LAMBDA_OVER_D'])
# Other shortcuts
roi_split = args.roi_split
n_roi_splits = args.n_roi_splits
selected_keys = config['observing_conditions']['selected_keys']
max_oc_correlation = float(
config['observing_conditions']['max_correlation']
)
# Define the unit conversion context for this data set
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(pixscale, 'arcsec / pixel'),
lambda_over_d=Quantity(lambda_over_d, 'arcsec'),
)
# Construct the mask for the region of interest (ROI)
with instrument_units_context:
roi_mask = get_roi_mask(
mask_size=frame_size,
inner_radius=Quantity(*config['roi_mask']['inner_radius']),
outer_radius=Quantity(*config['roi_mask']['outer_radius']),
)
# -------------------------------------------------------------------------
# Loop over pixels in ROI and learn half-sibling regression models
# -------------------------------------------------------------------------
# Set up a BaseModelCreator to create instances of our base model
base_model_creator = BaseModelCreator(**config['base_model'])
# Train models and get residuals for them
print('\nTraining models:', flush=True)
with instrument_units_context:
partial_residuals = train_all_models(
roi_mask=roi_mask,
stack=stack,
parang=parang,
psf_template=psf_template,
obscon_array=observing_conditions.as_array(selected_keys),
selection_mask_config=config['selection_mask'],
base_model_creator=base_model_creator,
max_oc_correlation=max_oc_correlation,
n_train_splits=config['n_train_splits'],
train_mode=config['train_mode'],
n_signal_times=config['n_signal_times'],
n_roi_splits=n_roi_splits,
roi_split=roi_split,
return_format='partial',
)
print()
# -------------------------------------------------------------------------
# Save residuals to an HDF file
# -------------------------------------------------------------------------
# Finally, save residuals to HDF (in the partial directory)
print('Saving residuals...', end=' ', flush=True)
file_name = f'residuals_{roi_split + 1:04d}-{n_roi_splits:04d}.hdf'
file_path = partial_dir / file_name
save_dict_to_hdf(
dictionary=partial_residuals, file_path=file_path, mode='w'
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Postliminaries
# -------------------------------------------------------------------------
print(f'\nThis took {time.time() - script_start:.1f} seconds!\n')
| <filename>scripts/experiments/multiple-scripts/01_train_models.py
"""
Train a collection of half-sibling regression models.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from pathlib import Path
import argparse
import os
import time
from astropy.units import Quantity
from hsr4hci.base_models import BaseModelCreator
from hsr4hci.config import load_config
from hsr4hci.data import load_dataset
from hsr4hci.hdf import save_dict_to_hdf, create_hdf_dir
from hsr4hci.masking import get_roi_mask
from hsr4hci.training import train_all_models
from hsr4hci.units import InstrumentUnitsContext
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# -------------------------------------------------------------------------
# Preliminaries
# -------------------------------------------------------------------------
script_start = time.time()
print('\nTRAIN HALF-SIBLING REGRESSION MODELS\n', flush=True)
# -------------------------------------------------------------------------
# Set up parser to get command line arguments
# -------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
'--experiment-dir',
type=str,
required=True,
metavar='PATH',
help='(Absolute) path to experiment directory.',
)
parser.add_argument(
'--roi-split',
type=int,
default=0,
help='Index of the split to process; must be in [0, n_roi_splits).',
)
parser.add_argument(
'--n-roi-splits',
type=int,
default=1,
help=(
'Number of splits into which the ROI is divided (e.g., for '
'parallel training).'
),
)
parser.add_argument(
'--hdf-location',
type=str,
choices=['local', 'work'],
default='local',
help=(
'Where to create the HDF directory: "locally" or on /work. '
'Note: Unless you are the author of this code and are running an '
'experiment on the cluster at the MPI-IS, you *always* want to '
'use "local" here!'
),
)
args = parser.parse_args()
# -------------------------------------------------------------------------
# Load experiment configuration and data
# -------------------------------------------------------------------------
# Get experiment directory
experiment_dir = Path(os.path.expanduser(args.experiment_dir))
if not experiment_dir.exists():
raise NotADirectoryError(f'{experiment_dir} does not exist!')
# Load experiment config from JSON
print('Loading experiment configuration...', end=' ', flush=True)
config = load_config(experiment_dir / 'config.json')
print('Done!', flush=True)
# Load frames, parallactic angles, etc. from HDF file
print('Loading data set...', end=' ', flush=True)
stack, parang, psf_template, observing_conditions, metadata = load_dataset(
**config['dataset']
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Prepare directory for saving HDF files (*before* training)
# -------------------------------------------------------------------------
# Create HDF directory (either locally or on /work)
create_on_work = args.hdf_location == 'work'
hdf_dir = create_hdf_dir(experiment_dir, create_on_work=create_on_work)
# Create a directory for the partial result files (on /work)
partial_dir = hdf_dir / 'partial'
partial_dir.mkdir(exist_ok=True, parents=True)
# -------------------------------------------------------------------------
# Define various useful shortcuts; activate unit conversions
# -------------------------------------------------------------------------
# Quantities related to the size of the data set
n_frames, x_size, y_size = stack.shape
frame_size = (x_size, y_size)
# Metadata of the data set
pixscale = float(metadata['PIXSCALE'])
lambda_over_d = float(metadata['LAMBDA_OVER_D'])
# Other shortcuts
roi_split = args.roi_split
n_roi_splits = args.n_roi_splits
selected_keys = config['observing_conditions']['selected_keys']
max_oc_correlation = float(
config['observing_conditions']['max_correlation']
)
# Define the unit conversion context for this data set
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(pixscale, 'arcsec / pixel'),
lambda_over_d=Quantity(lambda_over_d, 'arcsec'),
)
# Construct the mask for the region of interest (ROI)
with instrument_units_context:
roi_mask = get_roi_mask(
mask_size=frame_size,
inner_radius=Quantity(*config['roi_mask']['inner_radius']),
outer_radius=Quantity(*config['roi_mask']['outer_radius']),
)
# -------------------------------------------------------------------------
# Loop over pixels in ROI and learn half-sibling regression models
# -------------------------------------------------------------------------
# Set up a BaseModelCreator to create instances of our base model
base_model_creator = BaseModelCreator(**config['base_model'])
# Train models and get residuals for them
print('\nTraining models:', flush=True)
with instrument_units_context:
partial_residuals = train_all_models(
roi_mask=roi_mask,
stack=stack,
parang=parang,
psf_template=psf_template,
obscon_array=observing_conditions.as_array(selected_keys),
selection_mask_config=config['selection_mask'],
base_model_creator=base_model_creator,
max_oc_correlation=max_oc_correlation,
n_train_splits=config['n_train_splits'],
train_mode=config['train_mode'],
n_signal_times=config['n_signal_times'],
n_roi_splits=n_roi_splits,
roi_split=roi_split,
return_format='partial',
)
print()
# -------------------------------------------------------------------------
# Save residuals to an HDF file
# -------------------------------------------------------------------------
# Finally, save residuals to HDF (in the partial directory)
print('Saving residuals...', end=' ', flush=True)
file_name = f'residuals_{roi_split + 1:04d}-{n_roi_splits:04d}.hdf'
file_path = partial_dir / file_name
save_dict_to_hdf(
dictionary=partial_residuals, file_path=file_path, mode='w'
)
print('Done!', flush=True)
# -------------------------------------------------------------------------
# Postliminaries
# -------------------------------------------------------------------------
print(f'\nThis took {time.time() - script_start:.1f} seconds!\n')
| en | 0.331401 | Train a collection of half-sibling regression models. # ----------------------------------------------------------------------------- # IMPORTS # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # MAIN CODE # ----------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Preliminaries # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Set up parser to get command line arguments # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Load experiment configuration and data # ------------------------------------------------------------------------- # Get experiment directory # Load experiment config from JSON # Load frames, parallactic angles, etc. from HDF file # ------------------------------------------------------------------------- # Prepare directory for saving HDF files (*before* training) # ------------------------------------------------------------------------- # Create HDF directory (either locally or on /work) # Create a directory for the partial result files (on /work) # ------------------------------------------------------------------------- # Define various useful shortcuts; activate unit conversions # ------------------------------------------------------------------------- # Quantities related to the size of the data set # Metadata of the data set # Other shortcuts # Define the unit conversion context for this data set # Construct the mask for the region of interest (ROI) # ------------------------------------------------------------------------- # Loop over pixels in ROI and learn half-sibling regression models # ------------------------------------------------------------------------- # Set up a BaseModelCreator to create instances of our base model # Train models and get residuals for them # ------------------------------------------------------------------------- # Save residuals to an HDF file # ------------------------------------------------------------------------- # Finally, save residuals to HDF (in the partial directory) # ------------------------------------------------------------------------- # Postliminaries # ------------------------------------------------------------------------- | 2.029256 | 2 |
tests/shelf_tools/test_tool_paste_items.py | Hengle/Houdini-Toolbox | 136 | 6631070 | """Test the tool_paste_items.shelf file."""
# =============================================================================
# TESTS
# =============================================================================
def test_paste_items(mocker, exec_tool_script):
"""Test the paste_items tool."""
mock_paste = mocker.patch("ht.ui.paste.paste_items")
mock_kwargs = mocker.MagicMock(spec=dict)
exec_tool_script("tool_paste_items.shelf", "paste_items", mock_kwargs)
mock_paste.assert_called_with(mock_kwargs)
| """Test the tool_paste_items.shelf file."""
# =============================================================================
# TESTS
# =============================================================================
def test_paste_items(mocker, exec_tool_script):
"""Test the paste_items tool."""
mock_paste = mocker.patch("ht.ui.paste.paste_items")
mock_kwargs = mocker.MagicMock(spec=dict)
exec_tool_script("tool_paste_items.shelf", "paste_items", mock_kwargs)
mock_paste.assert_called_with(mock_kwargs)
| en | 0.347418 | Test the tool_paste_items.shelf file. # ============================================================================= # TESTS # ============================================================================= Test the paste_items tool. | 2.273395 | 2 |
nannies/netapp_balance.py | DEiselt/openstack-nannies | 0 | 6631071 | <reponame>DEiselt/openstack-nannies<gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) 2020 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# -*- coding: utf-8 -*-
import re
import argparse
import logging
import time
import os
from helper.netapp import NetAppHelper
from helper.vcenter import *
from helper.openstack import OpenstackHelper
from helper.prometheus_exporter import *
from pyVim.task import WaitForTask
# prometheus export functionality
from prometheus_client import start_http_server, Gauge
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
def parse_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true", help='dry run option not doing anything harmful')
parser.add_argument("--vcenter-host", required=True, help="Vcenter hostname")
parser.add_argument("--vcenter-user", required=True, help="Vcenter username")
parser.add_argument("--vcenter-password", required=True, help="Vcenter user password")
parser.add_argument("--netapp-user", required=True, help="Netapp username")
parser.add_argument("--netapp-password", required=True, help="Netapp user password")
parser.add_argument("--region", required=True, help="(Openstack) region")
# 4600 is about 90% of 5tb
parser.add_argument("--flexvol-size-limit", type=int, required=False, default=4600,
help="Maximum size in gb for a healthy flexvol")
parser.add_argument("--flexvol-lun-min-size", type=int, required=False, default=20,
help="Minimum size (>=) in gb for a volume to move for flexvol balancing")
parser.add_argument("--flexvol-lun-max-size", type=int, required=False, default=1200,
help="Maximum size (<) in gb for a volume to move for flexvol balancing")
parser.add_argument("--flexvol-denylist", nargs='*', required=False, help="ignore those flexvols")
parser.add_argument("--aggr-lun-min-size", type=int, required=False, default=1200,
help="Minimum size (>=) in gb for a volume to move for aggregate balancing")
# 2050 is about 2tb
parser.add_argument("--aggr-lun-max-size", type=int, required=False, default=2050,
help="Maximum size (<=) in gb for a volume to move for aggregate balancing")
parser.add_argument("--aggr-denylist", nargs='*', required=False, help="ignore those aggregates")
parser.add_argument("--max-move-vms", type=int, default=10,
help="Maximum number of VMs to (propose to) move")
parser.add_argument("--min-threshold", type=int, default=60,
help="Target aggregate usage must be below this value to do a move")
parser.add_argument("--max-threshold", type=int, default=70,
help="Source aggregate usage must be above this value to do a move")
parser.add_argument("--max-threshold-hysteresis", type=int, default=5,
help="How much to lower the usage below max-threshold")
parser.add_argument("--interval", type=int, default=360,
help="Interval in minutes between check runs")
parser.add_argument("--prometheus-port", type=int, default=9456,
help="Port to run the prometheus exporter for metrics on")
args = parser.parse_args()
return args
def prometheus_exporter_setup(args):
nanny_metrics_data = PromDataClass()
nanny_metrics = PromMetricsClass()
nanny_metrics.set_metrics('netapp_balancing_nanny_flexvol_usage',
'space usage per netapp flexvol in bytes', ['flexvol'])
nanny_metrics.set_metrics('netapp_balancing_nanny_flexvol_usage_threshold',
'usage per netapp flexvol above which balancing should be done', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage',
'space usage per netapp aggregate in percent', ['aggregate'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage_threshold',
'usage per netapp aggregate above which balancing should be done', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage_avg',
'average space usage of all aggregates in percent', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_move_suggestions',
'number of suggested volume moves', ['type', 'attach_state'])
nanny_metrics.set_metrics('netapp_balancing_nanny_move_suggestions_max',
'maximum number of suggested volume moves', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_error_count',
'number of errors we run into during a nanny run', ['error_type'])
REGISTRY.register(CustomCollector(nanny_metrics, nanny_metrics_data))
prometheus_http_start(int(args.prometheus_port))
return nanny_metrics_data
# get all vvol netapps via the vcenter
# TODO: correlate this info with openstack info
def get_netapp_hosts(vc, region):
# iterate over datastores to find vvols --> netapps
netapp_hosts = []
dstores = vc.find_all_of_type(vc.vim.Datastore).view
for dstore in dstores:
name = dstore.name.lower()
if name.startswith("vvol_bb"):
# example for the pattern: vvol_bb123
m = re.match("^(?:vvol)_bb(?P<bb>\d+)$", name)
if m:
bbnum = int(m.group('bb'))
# one of our netapps is inconsistent in its naming - handle this here
if bbnum == 56:
stnpa_num = 0
else:
stnpa_num = 1
# e.g. stnpca1-bb123.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a1..)
netapp_name = "stnpca{}-bb{:03d}.cc.{}.cloud.sap".format(stnpa_num, bbnum, region)
# build a list of netapps
netapp_hosts.append(netapp_name)
if name.startswith("vvol_stnpc"):
# example for the pattern: vVOL_stnpca3_st030
m = re.match("^(?:vvol)_(?P<stname>.*)$", name)
if m:
# e.g. stnpca3-st030.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a3..)
netapp_name = "{}.cc.{}.cloud.sap".format(str(m.group('stname')).replace('_','-'), region)
# build a list of netapps
netapp_hosts.append(netapp_name)
return netapp_hosts
# return a list of (netapp_host, aggregate-name, percent-used-capacity, size-totoal) per aggregates
def get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data):
aggr_usage = []
# get aggregates
for aggr in nh.get_aggregate_usage():
# print info for aggr_denylisted aggregates
if aggr['aggregate-name'] in aggr_denylist:
log.info("- INFO - aggregate {} is aggr_denylist'ed via cmdline".format(aggr['aggregate-name']))
if aggr['aggr-raid-attributes']['is-root-aggregate'] == 'false' and aggr['aggregate-name'] not in aggr_denylist:
log.info("- INFO - aggregate {} of size {:.0f} gb is at {}% utilization"
.format(aggr['aggregate-name'], int(aggr['aggr-space-attributes']['size-total']) / 1024**3, aggr['aggr-space-attributes']['percent-used-capacity']))
aggr_usage.append((netapp_host, aggr['aggregate-name'],
int(aggr['aggr-space-attributes']['percent-used-capacity']),int(aggr['aggr-space-attributes']['size-total'])))
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage', int(aggr['aggr-space-attributes']['percent-used-capacity']),[aggr['aggregate-name']])
return aggr_usage
# examples:
# (netapphost1, aggregate01, 50, 10000000)
# return a list of (netapp_host, flexvol_name, containing-aggregate-name, size-used, size-total) per flexvol
def get_flexvol_usage_list(nh, netapp_host, flexvol_denylist, nanny_metrics_data):
flexvol_usage = []
# get flexvols
for flexvol in nh.get_volume_usage():
# print info for flexvol_denylisted aggregates
if flexvol['volume-id-attributes']['name'] in flexvol_denylist:
log.info("- INFO - flexvol {} is flexvol_denylist'ed via cmdline".format(flexvol['volume-id-attributes']['name']))
if flexvol['volume-id-attributes']['name'].lower().startswith('vv') and flexvol['volume-id-attributes']['name'] not in flexvol_denylist:
log.info("- INFO - flexvol {} of size {:.0f} gb of a total size {:.0f} gb"
.format(flexvol['volume-id-attributes']['name'], int(flexvol['volume-space-attributes']['size-used']) / 1024**3, int(flexvol['volume-space-attributes']['size-total']) / 1024**3))
flexvol_usage.append((netapp_host, flexvol['volume-id-attributes']['name'], flexvol['volume-id-attributes']['containing-aggregate-name'],
int(flexvol['volume-space-attributes']['size-used']),int(flexvol['volume-space-attributes']['size-total'])))
nanny_metrics_data.set_data('netapp_balancing_nanny_flexvol_usage', int(flexvol['volume-space-attributes']['size-used']),[flexvol['volume-id-attributes']['name']])
return flexvol_usage
# examples:
# (netapphost1, flexvol01, aggr1, 50, 100)
# (netapphost1, flexvol02, aggr2, 60, 100)
# (netapphost1, flexvol03, aggr1, 60, 100)
# generate the vvol datastore name from the name of an aggregate
def bb_name_from_aggregate_name(netapp_host, aggregate_name):
# example for the pattern for bb connected netapps: aggr_ssd_bb123_1
m = re.match("^(?:aggr_ssd_bb)(?P<bb>\d+)_\d$", aggregate_name)
if m:
# example ds_name: vVOL_BB123
ds_name = 'vVOL_BB' + m.group('bb')
return ds_name
# example for the pattern for not bb connected netapps: aggr_ssd_st030_02
m = re.match("^(?:aggr_ssd_)(?P<stname>st.*)_\d+$", aggregate_name)
if m:
# example ds_name: vVOL_stnpca3_st030
ds_name = 'vVOL_' + str(netapp_host).split('.')[0].replace('-','_')
return ds_name
def get_vcenter_info(vc):
# get all vms from vcenter
log.info("- INFO - getting information from the vcenter")
vm_view = vc.find_all_of_type(vc.vim.VirtualMachine)
vms = vc.collect_properties(vm_view, vc.vim.VirtualMachine,
['name', 'config.annotation', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'runtime.powerState',
'config.hardware.device'],
include_mors=True)
attached_volumes = []
for vm in vms:
# check if this is a shadow vm
# build list of vvol attachments on non shadow openstack vms
# the double condition is just for safety - they usually should never both match
if vc.is_openstack_vm(vm) and not vc.is_shadow_vm(vm) and not vc.is_snapshot_shadow_vm(vm):
# iterate over all devices
for device in vm['config.hardware.device']:
# TODO maybe use the "continue" method to skip non matching?
# and filter out the virtual disks
if isinstance(device, vc.vim.vm.device.VirtualDisk):
# we are only interested in vvols here
if device.backing.fileName.lower().startswith('[vvol_'):
# add backingObjectId to our list of attached volumes
attached_volumes.append(device.backing.backingObjectId)
return vms, attached_volumes
# we move the lun/volume between the netapps by telling the vcenter to move the attached
# storage of the corresponding shadow vm to another datastore (i.e. anorther netapp)
def move_shadow_vm(vc, volume_uuid, target_ds, dry_run):
# vm = vc.get_object_by_name(vim.VirtualMachine, volume_uuid)
vm = vc.find_server(volume_uuid)
ds = vc.get_object_by_name(vim.Datastore, target_ds)
spec = vim.VirtualMachineRelocateSpec()
spec.datastore = ds
if not dry_run:
task = vm.RelocateVM_Task(spec)
try:
status = WaitForTask(task,si=vc.api)
except Exception as e:
logging.error("- ERROR - failed to move volume %s to data store %s with error message: %s",
str(volume_uuid), str(target_ds), str(e))
return False
else:
log.info("- INFO - move of volume %s to data store %s successful with status %s",
str(volume_uuid), str(target_ds), str(status))
return True
# endless loop of generating move suggestions and wait for the next run
def check_loop(args, nanny_metrics_data):
while True:
log.info("INFO: starting new loop run")
if args.dry_run:
log.info("- INFO - dry-run mode: not doing anything harmful")
# first blanace out flexvols
move_suggestions_flexvol(args, nanny_metrics_data)
# then balance out aggregates
move_suggestions_aggr(args, nanny_metrics_data)
# set some fixed threshold value metrics based on the cmdline args
nanny_metrics_data.set_data('netapp_balancing_nanny_flexvol_usage_threshold', args.flexvol_size_limit,['dummy'])
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage_threshold', args.max_threshold,['dummy'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions_max', args.max_move_vms,['dummy'])
# sync metrics to prometheus exporter
nanny_metrics_data.sync_data()
# wait the interval time
log.info("INFO: waiting %s minutes before starting the next loop run", str(args.interval))
time.sleep(60 * int(args.interval))
# print out suggestions which luns should be moved
# for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest
def move_suggestions_flexvol(args, nanny_metrics_data):
# TODO this might go maybe as we will not need the metrics in some return cases
# a counter for move suggestions
gauge_value_move_suggestions_detached = 0
gauge_value_move_suggestions_attached = 0
# send the empty metric now already in case we are returning before setting a new value
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached, ['flexvol', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached, ['flexvol', 'attached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_in_range'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_enough_auto'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_enough_manual'])
# used for log output
if args.dry_run:
action_string = "dry-run:"
else:
action_string = "action:"
log.info("- INFO - === flexvol balancing ===")
# create a connection to openstack
log.info("- INFO - connecting to openstack in region %s", args.region)
oh = OpenstackHelper(args.region, os.getenv('OS_USER_DOMAIN_NAME'), os.getenv('OS_PROJECT_DOMAIN_NAME'),
os.getenv('OS_PROJECT_NAME'), os.getenv('OS_USERNAME'), os.getenv('OS_PASSWORD'))
vc = VCenterHelper(host=args.vcenter_host, user=args.vcenter_user, password=args.vcenter_password)
netapp_hosts = get_netapp_hosts(vc, args.region)
# there are some bbs with only vmfs and no vvols
if not netapp_hosts:
log.info("- INFO - netapp flexvol balancing - no vvol datastores found for this vc - giving up")
return
# connect to netapp and get the netapp version
flexvol_usage = []
aggr_usage = []
netapps = {}
for netapp_host in netapp_hosts:
log.info("- INFO - connecting to netapp %s", netapp_host)
netapps[netapp_host] = NetAppHelper(host=netapp_host, user=args.netapp_user, password=args.netapp_password)
nh = netapps[netapp_host]
vi = nh.get_single("system-get-version")
log.info("- INFO - {} is on version {}".format(netapp_host, vi['version']))
# TODO this can go maybe by changing the function to use an empty list by default
# make flexvol_denylist an empty list if not set via cmdline
if args.flexvol_denylist:
flexvol_denylist = args.flexvol_denylist
else:
flexvol_denylist = []
# TODO this can go maybe by changing the function to use an empty list by default
# make aggr_denylist an empty list if not set via cmdline
if args.aggr_denylist:
aggr_denylist = args.aggr_denylist
else:
aggr_denylist = []
# collect flexvol usage across all netapp hosts
flexvol_usage += get_flexvol_usage_list(nh, netapp_host, flexvol_denylist, nanny_metrics_data)
# collect aggregate usage across all netapp hosts
aggr_usage += get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data)
# sort the flexvols top down to start with the biggest ones
flexvol_usage.sort(key=lambda x: x[3], reverse=True)
# to keep things simple we only work on the largest flexvol on each nanny run
if flexvol_usage:
flexvol_most_used = flexvol_usage[0]
else:
log.warning("- WARNING - either flexvols are not named properly or maybe no flexvols at all")
return
# we only have to balance flexvols in case we are over the limit with the largest one
if flexvol_most_used[3] <= args.flexvol_size_limit * 1024**3:
log.info("- INFO - all flexvols are fine - nothing to be done - largest is {} at size {:.0f} gb".format(flexvol_most_used[1], flexvol_most_used[3] / 1024**3))
return
# TODO check if containing aggr is not least used aggr
nh_flexvol_most_used = netapps[flexvol_most_used[0]]
luns_on_flexvol_most_used = nh_flexvol_most_used.get_luns_for_flexvol(flexvol_most_used[1])
# sort the luns by used-size
luns_on_flexvol_most_used.sort(key=lambda x: int(x['size-used']), reverse=True)
# filter luns for desired size range
luns_on_flexvol_most_used = [lun for lun in luns_on_flexvol_most_used
if args.flexvol_lun_min_size * 1024**3 <= int(lun['size-used']) < args.flexvol_lun_max_size * 1024**3]
# we can only balance if there are any luns to balance on the flexvol within the given min and max regions
if len(luns_on_flexvol_most_used) == 0:
log.info("- PLEASE IGNORE - there are no movable volumes within the current min/max limits on flexvol {} - maybe limits should be adjusted?".format(flexvol_most_used[1]))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_in_range'])
return
# sort the aggregates top down to start with the highest usage percentage
aggr_usage.sort(key=lambda x: x[2], reverse=True)
# to keep things simple we only work on the largest aggr on each nanny run
# find aggr with highest usage, aggr with lowest usage (that is not on highest usage host)
# TODO see todo above - we need to check that the least used aggr is not the containing aggr of the most used flexvol
# in that case we want to use the second least used aggregate
aggr_most_used = aggr_usage[0]
aggr_least_used = None
for aggr in reversed(aggr_usage):
# make sure we are not on the same netapp and not on the netapp of the source flexvol
if aggr[0] != aggr_most_used[0] and aggr[0] != flexvol_most_used[0]:
aggr_least_used = aggr
break
# TODO this should be double checked and combined with the todo from above
else:
log.warning("- WARNING - no aggregate found that is not on the same netapp")
return 1
log.info("- INFO - least utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_least_used))
log.info("- INFO - using it as target for balancing volume movements")
log.info("- INFO - calculating volume move suggestions for automatic flexvol balancing ... this may take a moment")
# TODO we do not use the comments in the end - we map via vcenter backing
# <uuid>_brick.vmdk - DATA
# <uuid>.vmdk - DATA
# <uuid>_1.vmdk - DATA
comment_re = re.compile(r"^(?P<vmdk>.*\.vmdk) - DATA$")
# /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk
naa_path_re = re.compile(r"^/vol/.*/(?P<naa>naa\..*)\.vmdk$")
vmdks_flexvol = []
for lun in luns_on_flexvol_most_used:
# TODO we do not use the comments in the end - we map via vcenter backing
# looks like not all luns have a comment
if lun.get('comment'):
# does this map to an instance?
comment_match = comment_re.match(lun['comment'])
if not comment_match:
continue
else:
continue
# get the netapp id (naa.xyz...) name
path_match = naa_path_re.match(lun['path'])
if not path_match:
continue
# shadow vm uuid = volume uuid, netapp id, size-used
# TODO maybe also add the aggr this is on to keep track of that too ... requires adding that above too
vmdk_flexvol = (comment_match.group('vmdk'), path_match.group('naa'), int(lun['size-used']), lun['volume'])
vmdks_flexvol.append(vmdk_flexvol)
log.debug("==> flexvol vmdk file: {} - netapp id: {} - size {:.0f} gb"
.format(vmdk_flexvol[0], vmdk_flexvol[1], vmdk_flexvol[2] / 1024**3))
# limit to the largest --max-move-vms
# off for debug
#vmdks_flexvol = vmdks_flexvol[:args.max_move_vms]
valid_netapp_ids_flexvol = [vmdk_flexvol[1] for vmdk_flexvol in vmdks_flexvol]
attached_volumes = []
vms, attached_volumes = get_vcenter_info(vc)
# index = netapp-id, value = ( vm-name, attach-state )
vmvmdks_flexvol = dict()
for vm in vms:
# the double condition is just for safety - they usually should never both match
# TODO i think we already check for them to be a shadow vm in get_vcenter_info -> double check
if vc.is_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids_flexvol:
if device.backing.backingObjectId in attached_volumes:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'attached')
else:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'detached')
break
elif vc.is_snapshot_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids_flexvol:
if device.backing.backingObjectId in attached_volumes:
# not sure if this case is actually possible
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'snapshot attached')
else:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'snapshot detached')
break
log.debug("==> snapshot shadow vm - netapp id: {} - volume uuid: {}".format(device.backing.backingObjectId, vm['name']))
# calculate to which percentage we want to bring down the most used aggregate
aggr_most_used_target_percentage = args.max_threshold - args.max_threshold_hysteresis
# like the last one but as absolute size instead of percentage and for the least used aggregate
aggr_least_used_target_size = int(aggr_least_used[3]) * (aggr_most_used_target_percentage / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_least_used_current_size = int(aggr_least_used[3]) * (int(aggr_least_used[2]) / 100)
# this is for tracking the size of the flexvol we are moving stuff away from after each lun movement
flexvol_most_used_current_size = int(flexvol_most_used[3])
log.info("- PLEASE IGNORE - below might be some debugging output for the planned automatic move of detached volumes")
for vmdk in vmdks_flexvol:
# if aggr_least_used_current_size >= aggr_least_used_target_size:
# log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1]))
# break
if vmvmdks_flexvol.get(vmdk[1]):
if vmvmdks_flexvol.get(vmdk[1])[1] == 'detached':
if oh.api.block_store.get_volume(vmvmdks_flexvol.get(vmdk[1])[0]).attachments:
log.info("- PLEASE IGNORE - the volume {} seems to be attached to instance {} meanwhile - doing nothing # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks_flexvol.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3))
else:
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
log.info("- PLEASE IGNORE - plan: move volume {} from flexvol {} to {} # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], flexvol_most_used[1], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3))
# debug
log.info("- PLEASE IGNORE - {} locking volume {} before moving it".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0]))
if not args.dry_run:
oh.lock_volume(vmvmdks_flexvol.get(vmdk[1])[0])
log.info("- PLEASE IGNORE - {} moving shadow vm of volume {} to {}".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
if not args.dry_run:
move_shadow_vm(vc, vmvmdks_flexvol.get(vmdk[1])[0], str(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
log.info("- PLEASE IGNORE - {} unlocking volume {} after moving it".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0]))
if not args.dry_run:
oh.unlock_volume(vmvmdks_flexvol.get(vmdk[1])[0])
# trying to keep track of the actual usage of the participating flexvols and aggregates
flexvol_most_used_current_size -= int(vmdk[2])
#aggr_least_used_current_size += vmdk[2]
aggr_least_used_current_size += int(vmdk[2])
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
gauge_value_move_suggestions_detached += 1
if gauge_value_move_suggestions_detached >= args.max_move_vms:
log.info("- PLEASE IGNORE - max-move-vms of {} reached - stopping here".format(args.max_move_vms))
break
if aggr_least_used_current_size >= aggr_least_used_target_size:
log.info("- PLEASE IGNORE - further movements would fill up {} too much - stopping here".format(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
break
if flexvol_most_used_current_size < (args.flexvol_size_limit * 1024**3):
log.info("- PLEASE IGNORE - the size of the flexvol {} is below the limit of {:.0f} gb now - stopping here".format(flexvol_most_used[1], args.flexvol_size_limit))
break
if flexvol_most_used_current_size > (args.flexvol_size_limit * 1024**3):
log.info("- PLEASE IGNORE - there are not enough (or no) detached volumes within the current min/max limits to bring the flexvol {} below the limit of {:.0f} gb - stopping here".format(flexvol_most_used[1], args.flexvol_size_limit))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_enough_auto'])
# when the balancing goal is reached an "optional" string is appended to the recommendations
optional_string = ''
overload_warning_printed = 0
log.info("- INFO - volume move target size for flexvol {} is to get below {:.0f} gb".format(flexvol_most_used[1], args.flexvol_size_limit))
log.info("- INFO - volume move suggestions for manual flexvol balancing (from largest in range to smallest):")
# go through all the shadow vms found on the netapp
for vmdk in vmdks_flexvol:
# check if they actually exist in the vcenter
if vmvmdks_flexvol.get(vmdk[1]):
# for attached volumes a move suggestion is printed out
if vmvmdks_flexvol.get(vmdk[1])[1] == 'attached':
if (aggr_least_used_current_size >= aggr_least_used_target_size) and (overload_warning_printed == 0):
# stop if the aggregate we move to gets too full
log.info("- IMPORTANT - please stop with movements here as we would fill up {} too much".format(aggr_least_used[1]))
optional_string = ' (no move)'
overload_warning_printed = 1
if (flexvol_most_used_current_size < (args.flexvol_size_limit * 1024**3)) and (optional_string == ''):
optional_string = ' (optional)'
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
# print out info for the manual volume move
log.info("- INFO - netapp flexvol balancing - ./svmotion_cinder_v2.py {} {} # from flexvol {} on {} - size {:.0f} gb{}".format(vmvmdks_flexvol.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), flexvol_most_used[1], flexvol_most_used[2], vmdk[2] / 1024**3, optional_string))
# trying to keep track of the actual usage of the participating flexvols and aggregates
flexvol_most_used_current_size -= int(vmdk[2])
aggr_least_used_current_size += int(vmdk[2])
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
gauge_value_move_suggestions_attached += 1
if gauge_value_move_suggestions_attached >= args.max_move_vms:
log.info("- IMPORTANT - please stop with movements - max-move-vms of {} reached".format(args.max_move_vms))
optional_string = ' (no move)'
if flexvol_most_used_current_size > (args.flexvol_size_limit * 1024**3):
log.info("- INFO - there are not enough (or no) attached volumes within the current min/max limits to bring the flexvol {} below the limit of {:.0f} gb - maybe limits should be adjusted?".format(flexvol_most_used[1], args.flexvol_size_limit))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_enough_manual'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached,['flexvol', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached,['flexvol', 'attached'])
# print out suggestions which volumes should be moved
# for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest
def move_suggestions_aggr(args, nanny_metrics_data):
# a counter for move suggestions
gauge_value_move_suggestions_detached = 0
gauge_value_move_suggestions_attached = 0
# send the empty metric now already in case we are returning before setting a new value
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached, ['aggregate', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached, ['aggregate', 'attached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_in_range'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_enough_auto'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_enough_manual'])
# used for log output
if args.dry_run:
action_string = "dry-run:"
else:
action_string = "action:"
log.info("- INFO - === aggregate balancing ===")
# create a connection to openstack
log.info("- INFO - connecting to openstack in region %s", args.region)
oh = OpenstackHelper(args.region, os.getenv('OS_USER_DOMAIN_NAME'), os.getenv('OS_PROJECT_DOMAIN_NAME'),
os.getenv('OS_PROJECT_NAME'), os.getenv('OS_USERNAME'), os.getenv('OS_PASSWORD'))
vc = VCenterHelper(host=args.vcenter_host, user=args.vcenter_user, password=args.vcenter_password)
netapp_hosts = get_netapp_hosts(vc, args.region)
# there are some bbs with only vmfs and no vvols
if not netapp_hosts:
log.info("- INFO - netapp aggregate balancing - no vvol datastores found for this vc - giving up")
return
# connect to netapp and get the netapp version
aggr_usage = []
netapps = {}
for netapp_host in netapp_hosts:
log.info("- INFO - connecting to netapp %s", netapp_host)
netapps[netapp_host] = NetAppHelper(host=netapp_host, user=args.netapp_user, password=<PASSWORD>)
nh = netapps[netapp_host]
vi = nh.get_single("system-get-version")
log.info("- INFO - {} is on version {}".format(netapp_host, vi['version']))
# make aggr_denylist an empty list if not set via cmdline
if args.aggr_denylist:
aggr_denylist = args.aggr_denylist
else:
aggr_denylist = []
# collect aggregate usage across all netapp hosts
aggr_usage += get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data)
# sort the aggregates top down to start with the highest usage percentage
aggr_usage.sort(key=lambda x: x[2], reverse=True)
# find aggr with highest usage, aggr with lowest usage (that is not on highest usage host)
aggr_most_used = aggr_usage[0]
aggr_least_used = None
for aggr in reversed(aggr_usage):
if aggr[0] != aggr_most_used[0]:
aggr_least_used = aggr
break
else:
log.warning("- WARNING - no aggregate found that is not on the same netapp")
return 1
log.info("- INFO - most utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_most_used))
log.info("- INFO - least utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_least_used))
vvol_aggr_total_size = 0
vvol_aggr_used_size = 0
for aggr in aggr_usage:
vvol_aggr_total_size += aggr[3]
vvol_aggr_used_size += aggr[2] / 100 * aggr[3]
vvol_aggr_used_percentage_avg = 100 * vvol_aggr_used_size / vvol_aggr_total_size
log.info("- INFO - average usage of all vvol aggregates is {:.0f}% at a total used size of {:.0f} gb".format(vvol_aggr_used_percentage_avg, vvol_aggr_used_size / 1024**3))
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage_avg',vvol_aggr_used_percentage_avg ,['dummy'])
if int(aggr_most_used[2]) < args.max_threshold:
log.info("- INFO - netapp aggregate balancing - usage of most used source aggregate {} of {}% is below threshold of {}% - doing nothing".format(aggr_most_used[1], aggr_most_used[2], args.max_threshold))
return
if int(aggr_least_used[2]) > args.min_threshold:
log.info("- INFO - netapp aggregate balancing - usage of least used target aggregate {} of {}% is above threshold of {}% - doing nothing".format(aggr_least_used[1], aggr_least_used[2], args.min_threshold))
return
log.info("- INFO - calculating volume move suggestions for automatic aggregate balancing ... this may take a moment")
log.info("- INFO - getting luns from the netapp")
nh_most_used = netapps[aggr_most_used[0]]
luns = nh_most_used.get_luns_for_aggr(aggr_most_used[1], "vv")
# filter luns for desired size range
luns = [lun for lun in luns
if args.aggr_lun_min_size * 1024**3 <= int(lun['size-used']) <= args.aggr_lun_max_size * 1024**3]
luns.sort(key=lambda lun: int(lun['size-used']), reverse=True)
# we can only balance if there are any luns to balance on the aggregate within the given min and max regions
if len(luns) == 0:
log.info("- IMPORTANT - there are no movable volumes within the current min/max limits on aggregate {} - maybe limits should be adjusted?".format(aggr_most_used[1]))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_in_range'])
return
# NOTE we do not use the comments in the end - we map via vcenter backing
# <uuid>_brick.vmdk - DATA
# <uuid>.vmdk - DATA
# <uuid>_1.vmdk - DATA
comment_re = re.compile(r"^(?P<vmdk>.*\.vmdk) - DATA$")
# /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk
naa_path_re = re.compile(r"^/vol/.*/(?P<naa>naa\..*)\.vmdk$")
vmdks = []
for lun in luns:
# TODO i think we no longer use the comments and map via vcenter
# looks like not all luns have a comment
if lun.get('comment'):
# does this map to an instance?
comment_match = comment_re.match(lun['comment'])
if not comment_match:
continue
else:
continue
# get the netapp id (naa.xyz...) name
path_match = naa_path_re.match(lun['path'])
if not path_match:
continue
vmdk = (comment_match.group('vmdk'), path_match.group('naa'), int(lun['size-used']))
vmdks.append(vmdk)
log.debug("==> vmdk file: {} - netapp id: {} - size {:.0f} gb"
.format(vmdk[0], vmdk[1], vmdk[2] / 1024**3))
# limit to the largest --max-move-vms
#vmdks = vmdks[:args.max_move_vms]
valid_netapp_ids = [vmdk[1] for vmdk in vmdks]
attached_volumes = []
vms, attached_volumes = get_vcenter_info(vc)
# index = netapp-id, value = ( vm-name, attach-state )
vmvmdks = dict()
vmvmdks_flexvol = dict()
for vm in vms:
# the double condition is just for safety - they usually should never both match
if vc.is_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids:
if device.backing.backingObjectId in attached_volumes:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'attached')
else:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'detached')
break
elif vc.is_snapshot_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids:
if device.backing.backingObjectId in attached_volumes:
# not sure if this case is actually possible
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'snapshot attached')
else:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'snapshot detached')
break
log.debug("==> snapshot shadow vm - netapp id: {} - volume uuid: {}".format(device.backing.backingObjectId, vm['name']))
# calculate to which percentage we want to bring down the most used aggregate
aggr_most_used_target_percentage = args.max_threshold - args.max_threshold_hysteresis
# like the last one but as absolute size instead of percentage
aggr_most_used_target_size = int(aggr_most_used[3]) * (aggr_most_used_target_percentage / 100)
# like the last one but for the least used aggregate
aggr_least_used_target_size = int(aggr_least_used[3]) * (aggr_most_used_target_percentage / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_most_used_current_size = int(aggr_most_used[3]) * (int(aggr_most_used[2]) / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_least_used_current_size = int(aggr_least_used[3]) * (int(aggr_least_used[2]) / 100)
log.info("- PLEASE IGNORE - below might be some debugging output for the planned automatic move of detached volumes")
for vmdk in vmdks:
# if aggr_least_used_current_size >= aggr_least_used_target_size:
# log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1]))
# break
if vmvmdks.get(vmdk[1]):
if vmvmdks.get(vmdk[1])[1] == 'detached':
try:
if oh.api.block_store.get_volume(vmvmdks.get(vmdk[1])[0]).attachments:
log.info("- PLEASE IGNORE - the volume {} seems to be attached to instance {} meanwhile - doing nothing # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3))
else:
log.info("- PLEASE IGNORE - plan: move volume {} from {} to {} # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_most_used[0], aggr_most_used[1]), bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3))
# debug
log.info("- PLEASE IGNORE - {} locking volume {} before moving it".format(action_string, vmvmdks.get(vmdk[1])[0]))
if not args.dry_run:
oh.lock_volume(vmvmdks.get(vmdk[1])[0])
log.info("- PLEASE IGNORE - {} moving shadow vm of volume {} to {}".format(action_string, vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
if not args.dry_run:
move_shadow_vm(vc, vmvmdks.get(vmdk[1])[0], str(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
log.info("- PLEASE IGNORE - {} unlocking volume {} after moving it".format(action_string, vmvmdks.get(vmdk[1])[0]))
if not args.dry_run:
oh.unlock_volume(vmvmdks.get(vmdk[1])[0])
# trying to keep track of the actual usage of the participating aggregates
aggr_most_used_current_size -= vmdk[2]
aggr_least_used_current_size += vmdk[2]
gauge_value_move_suggestions_detached += 1
if gauge_value_move_suggestions_detached == args.max_move_vms:
log.info("- PLEASE IGNORE - max-move-vms of {} reached - stopping here".format(args.max_move_vms))
break
if aggr_least_used_current_size >= aggr_least_used_target_size:
log.info("- PLEASE IGNORE - further movements would fill up {} too much - stopping here".format(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
break
except Exception as e:
logging.error("- ERROR - exception while trying to get_volume %s from openstack - this should be investigated - error: %s",
str(vmvmdks.get(vmdk[1])[0]), str(e))
if aggr_most_used_current_size > aggr_most_used_target_size:
log.info("- PLEASE IGNORE - there are not enough (or no) detached volumes within the current min/max limits to bring the aggregate {} below the limit of {:.0f} gb - stopping here".format(aggr_most_used[1], aggr_most_used_target_size))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_enough_auto'])
# when the balancing goal is reached an "optional" string is appended to the recommendations
optional_string = ''
overload_warning_printed = 0
log.info("- INFO - volume move target usage for aggregate {} is {:.0f}% corresponding to a size of {:.0f} gb".format(aggr_most_used[1], aggr_most_used_target_percentage, aggr_most_used_target_size / 1024**3))
log.info("- INFO - volume move suggestions for manual aggregate balancing (from largest in range to smallest):")
# go through all the shadow vms found on the netapp
for vmdk in vmdks:
# check if they actually exist in the vcenter
if vmvmdks.get(vmdk[1]):
# for attached volumes a move suggestion is printed out
if vmvmdks.get(vmdk[1])[1] == 'attached':
if (aggr_least_used_current_size >= aggr_least_used_target_size) and (overload_warning_printed == 0):
# stop if the aggregate we move to gets too full
log.info("- IMPORTANT - please stop with movements here as we would fill up {} too much".format(aggr_least_used[1]))
optional_string = ' (no move)'
overload_warning_printed = 1
if (aggr_most_used_current_size < aggr_most_used_target_size) and (optional_string == ''):
optional_string = ' (optional)'
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - source aggr usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, aggr_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
# trying to keep track of the actual usage of the participating aggregates
aggr_most_used_current_size -= vmdk[2]
aggr_least_used_current_size += vmdk[2]
gauge_value_move_suggestions_attached += 1
# print out info for the manual volume move
log.info("- INFO - netapp aggregate balancing - ./svmotion_cinder_v2.py {} {} # from {} - size {:.0f} gb{}".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), aggr_most_used[1], vmdk[2] / 1024**3, optional_string))
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - source aggr usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, aggr_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
if gauge_value_move_suggestions_attached >= args.max_move_vms:
log.info("- IMPORTANT - please stop with movements - max-move-vms of {} reached".format(args.max_move_vms))
optional_string = ' (no move)'
if aggr_most_used_current_size > aggr_most_used_target_size:
log.info("- INFO - there are not enough (or no) attached volumes within the current min/max limits to bring the aggregate {} below the limit of {:.0f} gb - maybe limits should be adjusted?".format(aggr_most_used[1], aggr_most_used_target_size / 1024**3))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_enough_manual'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached,['aggregate', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached,['aggregate', 'attached'])
def main():
args = parse_commandline()
nanny_metrics_data = prometheus_exporter_setup(args)
check_loop(args, nanny_metrics_data)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
#
# Copyright (c) 2020 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# -*- coding: utf-8 -*-
import re
import argparse
import logging
import time
import os
from helper.netapp import NetAppHelper
from helper.vcenter import *
from helper.openstack import OpenstackHelper
from helper.prometheus_exporter import *
from pyVim.task import WaitForTask
# prometheus export functionality
from prometheus_client import start_http_server, Gauge
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
def parse_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true", help='dry run option not doing anything harmful')
parser.add_argument("--vcenter-host", required=True, help="Vcenter hostname")
parser.add_argument("--vcenter-user", required=True, help="Vcenter username")
parser.add_argument("--vcenter-password", required=True, help="Vcenter user password")
parser.add_argument("--netapp-user", required=True, help="Netapp username")
parser.add_argument("--netapp-password", required=True, help="Netapp user password")
parser.add_argument("--region", required=True, help="(Openstack) region")
# 4600 is about 90% of 5tb
parser.add_argument("--flexvol-size-limit", type=int, required=False, default=4600,
help="Maximum size in gb for a healthy flexvol")
parser.add_argument("--flexvol-lun-min-size", type=int, required=False, default=20,
help="Minimum size (>=) in gb for a volume to move for flexvol balancing")
parser.add_argument("--flexvol-lun-max-size", type=int, required=False, default=1200,
help="Maximum size (<) in gb for a volume to move for flexvol balancing")
parser.add_argument("--flexvol-denylist", nargs='*', required=False, help="ignore those flexvols")
parser.add_argument("--aggr-lun-min-size", type=int, required=False, default=1200,
help="Minimum size (>=) in gb for a volume to move for aggregate balancing")
# 2050 is about 2tb
parser.add_argument("--aggr-lun-max-size", type=int, required=False, default=2050,
help="Maximum size (<=) in gb for a volume to move for aggregate balancing")
parser.add_argument("--aggr-denylist", nargs='*', required=False, help="ignore those aggregates")
parser.add_argument("--max-move-vms", type=int, default=10,
help="Maximum number of VMs to (propose to) move")
parser.add_argument("--min-threshold", type=int, default=60,
help="Target aggregate usage must be below this value to do a move")
parser.add_argument("--max-threshold", type=int, default=70,
help="Source aggregate usage must be above this value to do a move")
parser.add_argument("--max-threshold-hysteresis", type=int, default=5,
help="How much to lower the usage below max-threshold")
parser.add_argument("--interval", type=int, default=360,
help="Interval in minutes between check runs")
parser.add_argument("--prometheus-port", type=int, default=9456,
help="Port to run the prometheus exporter for metrics on")
args = parser.parse_args()
return args
def prometheus_exporter_setup(args):
nanny_metrics_data = PromDataClass()
nanny_metrics = PromMetricsClass()
nanny_metrics.set_metrics('netapp_balancing_nanny_flexvol_usage',
'space usage per netapp flexvol in bytes', ['flexvol'])
nanny_metrics.set_metrics('netapp_balancing_nanny_flexvol_usage_threshold',
'usage per netapp flexvol above which balancing should be done', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage',
'space usage per netapp aggregate in percent', ['aggregate'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage_threshold',
'usage per netapp aggregate above which balancing should be done', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_aggregate_usage_avg',
'average space usage of all aggregates in percent', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_move_suggestions',
'number of suggested volume moves', ['type', 'attach_state'])
nanny_metrics.set_metrics('netapp_balancing_nanny_move_suggestions_max',
'maximum number of suggested volume moves', ['dummy'])
nanny_metrics.set_metrics('netapp_balancing_nanny_error_count',
'number of errors we run into during a nanny run', ['error_type'])
REGISTRY.register(CustomCollector(nanny_metrics, nanny_metrics_data))
prometheus_http_start(int(args.prometheus_port))
return nanny_metrics_data
# get all vvol netapps via the vcenter
# TODO: correlate this info with openstack info
def get_netapp_hosts(vc, region):
# iterate over datastores to find vvols --> netapps
netapp_hosts = []
dstores = vc.find_all_of_type(vc.vim.Datastore).view
for dstore in dstores:
name = dstore.name.lower()
if name.startswith("vvol_bb"):
# example for the pattern: vvol_bb123
m = re.match("^(?:vvol)_bb(?P<bb>\d+)$", name)
if m:
bbnum = int(m.group('bb'))
# one of our netapps is inconsistent in its naming - handle this here
if bbnum == 56:
stnpa_num = 0
else:
stnpa_num = 1
# e.g. stnpca1-bb123.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a1..)
netapp_name = "stnpca{}-bb{:03d}.cc.{}.cloud.sap".format(stnpa_num, bbnum, region)
# build a list of netapps
netapp_hosts.append(netapp_name)
if name.startswith("vvol_stnpc"):
# example for the pattern: vVOL_stnpca3_st030
m = re.match("^(?:vvol)_(?P<stname>.*)$", name)
if m:
# e.g. stnpca3-st030.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a3..)
netapp_name = "{}.cc.{}.cloud.sap".format(str(m.group('stname')).replace('_','-'), region)
# build a list of netapps
netapp_hosts.append(netapp_name)
return netapp_hosts
# return a list of (netapp_host, aggregate-name, percent-used-capacity, size-totoal) per aggregates
def get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data):
aggr_usage = []
# get aggregates
for aggr in nh.get_aggregate_usage():
# print info for aggr_denylisted aggregates
if aggr['aggregate-name'] in aggr_denylist:
log.info("- INFO - aggregate {} is aggr_denylist'ed via cmdline".format(aggr['aggregate-name']))
if aggr['aggr-raid-attributes']['is-root-aggregate'] == 'false' and aggr['aggregate-name'] not in aggr_denylist:
log.info("- INFO - aggregate {} of size {:.0f} gb is at {}% utilization"
.format(aggr['aggregate-name'], int(aggr['aggr-space-attributes']['size-total']) / 1024**3, aggr['aggr-space-attributes']['percent-used-capacity']))
aggr_usage.append((netapp_host, aggr['aggregate-name'],
int(aggr['aggr-space-attributes']['percent-used-capacity']),int(aggr['aggr-space-attributes']['size-total'])))
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage', int(aggr['aggr-space-attributes']['percent-used-capacity']),[aggr['aggregate-name']])
return aggr_usage
# examples:
# (netapphost1, aggregate01, 50, 10000000)
# return a list of (netapp_host, flexvol_name, containing-aggregate-name, size-used, size-total) per flexvol
def get_flexvol_usage_list(nh, netapp_host, flexvol_denylist, nanny_metrics_data):
flexvol_usage = []
# get flexvols
for flexvol in nh.get_volume_usage():
# print info for flexvol_denylisted aggregates
if flexvol['volume-id-attributes']['name'] in flexvol_denylist:
log.info("- INFO - flexvol {} is flexvol_denylist'ed via cmdline".format(flexvol['volume-id-attributes']['name']))
if flexvol['volume-id-attributes']['name'].lower().startswith('vv') and flexvol['volume-id-attributes']['name'] not in flexvol_denylist:
log.info("- INFO - flexvol {} of size {:.0f} gb of a total size {:.0f} gb"
.format(flexvol['volume-id-attributes']['name'], int(flexvol['volume-space-attributes']['size-used']) / 1024**3, int(flexvol['volume-space-attributes']['size-total']) / 1024**3))
flexvol_usage.append((netapp_host, flexvol['volume-id-attributes']['name'], flexvol['volume-id-attributes']['containing-aggregate-name'],
int(flexvol['volume-space-attributes']['size-used']),int(flexvol['volume-space-attributes']['size-total'])))
nanny_metrics_data.set_data('netapp_balancing_nanny_flexvol_usage', int(flexvol['volume-space-attributes']['size-used']),[flexvol['volume-id-attributes']['name']])
return flexvol_usage
# examples:
# (netapphost1, flexvol01, aggr1, 50, 100)
# (netapphost1, flexvol02, aggr2, 60, 100)
# (netapphost1, flexvol03, aggr1, 60, 100)
# generate the vvol datastore name from the name of an aggregate
def bb_name_from_aggregate_name(netapp_host, aggregate_name):
# example for the pattern for bb connected netapps: aggr_ssd_bb123_1
m = re.match("^(?:aggr_ssd_bb)(?P<bb>\d+)_\d$", aggregate_name)
if m:
# example ds_name: vVOL_BB123
ds_name = 'vVOL_BB' + m.group('bb')
return ds_name
# example for the pattern for not bb connected netapps: aggr_ssd_st030_02
m = re.match("^(?:aggr_ssd_)(?P<stname>st.*)_\d+$", aggregate_name)
if m:
# example ds_name: vVOL_stnpca3_st030
ds_name = 'vVOL_' + str(netapp_host).split('.')[0].replace('-','_')
return ds_name
def get_vcenter_info(vc):
# get all vms from vcenter
log.info("- INFO - getting information from the vcenter")
vm_view = vc.find_all_of_type(vc.vim.VirtualMachine)
vms = vc.collect_properties(vm_view, vc.vim.VirtualMachine,
['name', 'config.annotation', 'config.hardware.memoryMB', 'config.hardware.numCPU', 'runtime.powerState',
'config.hardware.device'],
include_mors=True)
attached_volumes = []
for vm in vms:
# check if this is a shadow vm
# build list of vvol attachments on non shadow openstack vms
# the double condition is just for safety - they usually should never both match
if vc.is_openstack_vm(vm) and not vc.is_shadow_vm(vm) and not vc.is_snapshot_shadow_vm(vm):
# iterate over all devices
for device in vm['config.hardware.device']:
# TODO maybe use the "continue" method to skip non matching?
# and filter out the virtual disks
if isinstance(device, vc.vim.vm.device.VirtualDisk):
# we are only interested in vvols here
if device.backing.fileName.lower().startswith('[vvol_'):
# add backingObjectId to our list of attached volumes
attached_volumes.append(device.backing.backingObjectId)
return vms, attached_volumes
# we move the lun/volume between the netapps by telling the vcenter to move the attached
# storage of the corresponding shadow vm to another datastore (i.e. anorther netapp)
def move_shadow_vm(vc, volume_uuid, target_ds, dry_run):
# vm = vc.get_object_by_name(vim.VirtualMachine, volume_uuid)
vm = vc.find_server(volume_uuid)
ds = vc.get_object_by_name(vim.Datastore, target_ds)
spec = vim.VirtualMachineRelocateSpec()
spec.datastore = ds
if not dry_run:
task = vm.RelocateVM_Task(spec)
try:
status = WaitForTask(task,si=vc.api)
except Exception as e:
logging.error("- ERROR - failed to move volume %s to data store %s with error message: %s",
str(volume_uuid), str(target_ds), str(e))
return False
else:
log.info("- INFO - move of volume %s to data store %s successful with status %s",
str(volume_uuid), str(target_ds), str(status))
return True
# endless loop of generating move suggestions and wait for the next run
def check_loop(args, nanny_metrics_data):
while True:
log.info("INFO: starting new loop run")
if args.dry_run:
log.info("- INFO - dry-run mode: not doing anything harmful")
# first blanace out flexvols
move_suggestions_flexvol(args, nanny_metrics_data)
# then balance out aggregates
move_suggestions_aggr(args, nanny_metrics_data)
# set some fixed threshold value metrics based on the cmdline args
nanny_metrics_data.set_data('netapp_balancing_nanny_flexvol_usage_threshold', args.flexvol_size_limit,['dummy'])
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage_threshold', args.max_threshold,['dummy'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions_max', args.max_move_vms,['dummy'])
# sync metrics to prometheus exporter
nanny_metrics_data.sync_data()
# wait the interval time
log.info("INFO: waiting %s minutes before starting the next loop run", str(args.interval))
time.sleep(60 * int(args.interval))
# print out suggestions which luns should be moved
# for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest
def move_suggestions_flexvol(args, nanny_metrics_data):
# TODO this might go maybe as we will not need the metrics in some return cases
# a counter for move suggestions
gauge_value_move_suggestions_detached = 0
gauge_value_move_suggestions_attached = 0
# send the empty metric now already in case we are returning before setting a new value
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached, ['flexvol', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached, ['flexvol', 'attached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_in_range'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_enough_auto'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['flexvol_not_enough_manual'])
# used for log output
if args.dry_run:
action_string = "dry-run:"
else:
action_string = "action:"
log.info("- INFO - === flexvol balancing ===")
# create a connection to openstack
log.info("- INFO - connecting to openstack in region %s", args.region)
oh = OpenstackHelper(args.region, os.getenv('OS_USER_DOMAIN_NAME'), os.getenv('OS_PROJECT_DOMAIN_NAME'),
os.getenv('OS_PROJECT_NAME'), os.getenv('OS_USERNAME'), os.getenv('OS_PASSWORD'))
vc = VCenterHelper(host=args.vcenter_host, user=args.vcenter_user, password=args.vcenter_password)
netapp_hosts = get_netapp_hosts(vc, args.region)
# there are some bbs with only vmfs and no vvols
if not netapp_hosts:
log.info("- INFO - netapp flexvol balancing - no vvol datastores found for this vc - giving up")
return
# connect to netapp and get the netapp version
flexvol_usage = []
aggr_usage = []
netapps = {}
for netapp_host in netapp_hosts:
log.info("- INFO - connecting to netapp %s", netapp_host)
netapps[netapp_host] = NetAppHelper(host=netapp_host, user=args.netapp_user, password=args.netapp_password)
nh = netapps[netapp_host]
vi = nh.get_single("system-get-version")
log.info("- INFO - {} is on version {}".format(netapp_host, vi['version']))
# TODO this can go maybe by changing the function to use an empty list by default
# make flexvol_denylist an empty list if not set via cmdline
if args.flexvol_denylist:
flexvol_denylist = args.flexvol_denylist
else:
flexvol_denylist = []
# TODO this can go maybe by changing the function to use an empty list by default
# make aggr_denylist an empty list if not set via cmdline
if args.aggr_denylist:
aggr_denylist = args.aggr_denylist
else:
aggr_denylist = []
# collect flexvol usage across all netapp hosts
flexvol_usage += get_flexvol_usage_list(nh, netapp_host, flexvol_denylist, nanny_metrics_data)
# collect aggregate usage across all netapp hosts
aggr_usage += get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data)
# sort the flexvols top down to start with the biggest ones
flexvol_usage.sort(key=lambda x: x[3], reverse=True)
# to keep things simple we only work on the largest flexvol on each nanny run
if flexvol_usage:
flexvol_most_used = flexvol_usage[0]
else:
log.warning("- WARNING - either flexvols are not named properly or maybe no flexvols at all")
return
# we only have to balance flexvols in case we are over the limit with the largest one
if flexvol_most_used[3] <= args.flexvol_size_limit * 1024**3:
log.info("- INFO - all flexvols are fine - nothing to be done - largest is {} at size {:.0f} gb".format(flexvol_most_used[1], flexvol_most_used[3] / 1024**3))
return
# TODO check if containing aggr is not least used aggr
nh_flexvol_most_used = netapps[flexvol_most_used[0]]
luns_on_flexvol_most_used = nh_flexvol_most_used.get_luns_for_flexvol(flexvol_most_used[1])
# sort the luns by used-size
luns_on_flexvol_most_used.sort(key=lambda x: int(x['size-used']), reverse=True)
# filter luns for desired size range
luns_on_flexvol_most_used = [lun for lun in luns_on_flexvol_most_used
if args.flexvol_lun_min_size * 1024**3 <= int(lun['size-used']) < args.flexvol_lun_max_size * 1024**3]
# we can only balance if there are any luns to balance on the flexvol within the given min and max regions
if len(luns_on_flexvol_most_used) == 0:
log.info("- PLEASE IGNORE - there are no movable volumes within the current min/max limits on flexvol {} - maybe limits should be adjusted?".format(flexvol_most_used[1]))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_in_range'])
return
# sort the aggregates top down to start with the highest usage percentage
aggr_usage.sort(key=lambda x: x[2], reverse=True)
# to keep things simple we only work on the largest aggr on each nanny run
# find aggr with highest usage, aggr with lowest usage (that is not on highest usage host)
# TODO see todo above - we need to check that the least used aggr is not the containing aggr of the most used flexvol
# in that case we want to use the second least used aggregate
aggr_most_used = aggr_usage[0]
aggr_least_used = None
for aggr in reversed(aggr_usage):
# make sure we are not on the same netapp and not on the netapp of the source flexvol
if aggr[0] != aggr_most_used[0] and aggr[0] != flexvol_most_used[0]:
aggr_least_used = aggr
break
# TODO this should be double checked and combined with the todo from above
else:
log.warning("- WARNING - no aggregate found that is not on the same netapp")
return 1
log.info("- INFO - least utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_least_used))
log.info("- INFO - using it as target for balancing volume movements")
log.info("- INFO - calculating volume move suggestions for automatic flexvol balancing ... this may take a moment")
# TODO we do not use the comments in the end - we map via vcenter backing
# <uuid>_brick.vmdk - DATA
# <uuid>.vmdk - DATA
# <uuid>_1.vmdk - DATA
comment_re = re.compile(r"^(?P<vmdk>.*\.vmdk) - DATA$")
# /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk
naa_path_re = re.compile(r"^/vol/.*/(?P<naa>naa\..*)\.vmdk$")
vmdks_flexvol = []
for lun in luns_on_flexvol_most_used:
# TODO we do not use the comments in the end - we map via vcenter backing
# looks like not all luns have a comment
if lun.get('comment'):
# does this map to an instance?
comment_match = comment_re.match(lun['comment'])
if not comment_match:
continue
else:
continue
# get the netapp id (naa.xyz...) name
path_match = naa_path_re.match(lun['path'])
if not path_match:
continue
# shadow vm uuid = volume uuid, netapp id, size-used
# TODO maybe also add the aggr this is on to keep track of that too ... requires adding that above too
vmdk_flexvol = (comment_match.group('vmdk'), path_match.group('naa'), int(lun['size-used']), lun['volume'])
vmdks_flexvol.append(vmdk_flexvol)
log.debug("==> flexvol vmdk file: {} - netapp id: {} - size {:.0f} gb"
.format(vmdk_flexvol[0], vmdk_flexvol[1], vmdk_flexvol[2] / 1024**3))
# limit to the largest --max-move-vms
# off for debug
#vmdks_flexvol = vmdks_flexvol[:args.max_move_vms]
valid_netapp_ids_flexvol = [vmdk_flexvol[1] for vmdk_flexvol in vmdks_flexvol]
attached_volumes = []
vms, attached_volumes = get_vcenter_info(vc)
# index = netapp-id, value = ( vm-name, attach-state )
vmvmdks_flexvol = dict()
for vm in vms:
# the double condition is just for safety - they usually should never both match
# TODO i think we already check for them to be a shadow vm in get_vcenter_info -> double check
if vc.is_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids_flexvol:
if device.backing.backingObjectId in attached_volumes:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'attached')
else:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'detached')
break
elif vc.is_snapshot_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids_flexvol:
if device.backing.backingObjectId in attached_volumes:
# not sure if this case is actually possible
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'snapshot attached')
else:
vmvmdks_flexvol[device.backing.backingObjectId] = (vm['name'], 'snapshot detached')
break
log.debug("==> snapshot shadow vm - netapp id: {} - volume uuid: {}".format(device.backing.backingObjectId, vm['name']))
# calculate to which percentage we want to bring down the most used aggregate
aggr_most_used_target_percentage = args.max_threshold - args.max_threshold_hysteresis
# like the last one but as absolute size instead of percentage and for the least used aggregate
aggr_least_used_target_size = int(aggr_least_used[3]) * (aggr_most_used_target_percentage / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_least_used_current_size = int(aggr_least_used[3]) * (int(aggr_least_used[2]) / 100)
# this is for tracking the size of the flexvol we are moving stuff away from after each lun movement
flexvol_most_used_current_size = int(flexvol_most_used[3])
log.info("- PLEASE IGNORE - below might be some debugging output for the planned automatic move of detached volumes")
for vmdk in vmdks_flexvol:
# if aggr_least_used_current_size >= aggr_least_used_target_size:
# log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1]))
# break
if vmvmdks_flexvol.get(vmdk[1]):
if vmvmdks_flexvol.get(vmdk[1])[1] == 'detached':
if oh.api.block_store.get_volume(vmvmdks_flexvol.get(vmdk[1])[0]).attachments:
log.info("- PLEASE IGNORE - the volume {} seems to be attached to instance {} meanwhile - doing nothing # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks_flexvol.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3))
else:
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
log.info("- PLEASE IGNORE - plan: move volume {} from flexvol {} to {} # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], flexvol_most_used[1], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3))
# debug
log.info("- PLEASE IGNORE - {} locking volume {} before moving it".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0]))
if not args.dry_run:
oh.lock_volume(vmvmdks_flexvol.get(vmdk[1])[0])
log.info("- PLEASE IGNORE - {} moving shadow vm of volume {} to {}".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
if not args.dry_run:
move_shadow_vm(vc, vmvmdks_flexvol.get(vmdk[1])[0], str(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
log.info("- PLEASE IGNORE - {} unlocking volume {} after moving it".format(action_string, vmvmdks_flexvol.get(vmdk[1])[0]))
if not args.dry_run:
oh.unlock_volume(vmvmdks_flexvol.get(vmdk[1])[0])
# trying to keep track of the actual usage of the participating flexvols and aggregates
flexvol_most_used_current_size -= int(vmdk[2])
#aggr_least_used_current_size += vmdk[2]
aggr_least_used_current_size += int(vmdk[2])
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
gauge_value_move_suggestions_detached += 1
if gauge_value_move_suggestions_detached >= args.max_move_vms:
log.info("- PLEASE IGNORE - max-move-vms of {} reached - stopping here".format(args.max_move_vms))
break
if aggr_least_used_current_size >= aggr_least_used_target_size:
log.info("- PLEASE IGNORE - further movements would fill up {} too much - stopping here".format(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
break
if flexvol_most_used_current_size < (args.flexvol_size_limit * 1024**3):
log.info("- PLEASE IGNORE - the size of the flexvol {} is below the limit of {:.0f} gb now - stopping here".format(flexvol_most_used[1], args.flexvol_size_limit))
break
if flexvol_most_used_current_size > (args.flexvol_size_limit * 1024**3):
log.info("- PLEASE IGNORE - there are not enough (or no) detached volumes within the current min/max limits to bring the flexvol {} below the limit of {:.0f} gb - stopping here".format(flexvol_most_used[1], args.flexvol_size_limit))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_enough_auto'])
# when the balancing goal is reached an "optional" string is appended to the recommendations
optional_string = ''
overload_warning_printed = 0
log.info("- INFO - volume move target size for flexvol {} is to get below {:.0f} gb".format(flexvol_most_used[1], args.flexvol_size_limit))
log.info("- INFO - volume move suggestions for manual flexvol balancing (from largest in range to smallest):")
# go through all the shadow vms found on the netapp
for vmdk in vmdks_flexvol:
# check if they actually exist in the vcenter
if vmvmdks_flexvol.get(vmdk[1]):
# for attached volumes a move suggestion is printed out
if vmvmdks_flexvol.get(vmdk[1])[1] == 'attached':
if (aggr_least_used_current_size >= aggr_least_used_target_size) and (overload_warning_printed == 0):
# stop if the aggregate we move to gets too full
log.info("- IMPORTANT - please stop with movements here as we would fill up {} too much".format(aggr_least_used[1]))
optional_string = ' (no move)'
overload_warning_printed = 1
if (flexvol_most_used_current_size < (args.flexvol_size_limit * 1024**3)) and (optional_string == ''):
optional_string = ' (optional)'
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
# print out info for the manual volume move
log.info("- INFO - netapp flexvol balancing - ./svmotion_cinder_v2.py {} {} # from flexvol {} on {} - size {:.0f} gb{}".format(vmvmdks_flexvol.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), flexvol_most_used[1], flexvol_most_used[2], vmdk[2] / 1024**3, optional_string))
# trying to keep track of the actual usage of the participating flexvols and aggregates
flexvol_most_used_current_size -= int(vmdk[2])
aggr_least_used_current_size += int(vmdk[2])
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - flexvol usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, flexvol_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
gauge_value_move_suggestions_attached += 1
if gauge_value_move_suggestions_attached >= args.max_move_vms:
log.info("- IMPORTANT - please stop with movements - max-move-vms of {} reached".format(args.max_move_vms))
optional_string = ' (no move)'
if flexvol_most_used_current_size > (args.flexvol_size_limit * 1024**3):
log.info("- INFO - there are not enough (or no) attached volumes within the current min/max limits to bring the flexvol {} below the limit of {:.0f} gb - maybe limits should be adjusted?".format(flexvol_most_used[1], args.flexvol_size_limit))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['flexvol_not_enough_manual'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached,['flexvol', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached,['flexvol', 'attached'])
# print out suggestions which volumes should be moved
# for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest
def move_suggestions_aggr(args, nanny_metrics_data):
# a counter for move suggestions
gauge_value_move_suggestions_detached = 0
gauge_value_move_suggestions_attached = 0
# send the empty metric now already in case we are returning before setting a new value
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached, ['aggregate', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached, ['aggregate', 'attached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_in_range'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_enough_auto'])
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 0,['aggregate_not_enough_manual'])
# used for log output
if args.dry_run:
action_string = "dry-run:"
else:
action_string = "action:"
log.info("- INFO - === aggregate balancing ===")
# create a connection to openstack
log.info("- INFO - connecting to openstack in region %s", args.region)
oh = OpenstackHelper(args.region, os.getenv('OS_USER_DOMAIN_NAME'), os.getenv('OS_PROJECT_DOMAIN_NAME'),
os.getenv('OS_PROJECT_NAME'), os.getenv('OS_USERNAME'), os.getenv('OS_PASSWORD'))
vc = VCenterHelper(host=args.vcenter_host, user=args.vcenter_user, password=args.vcenter_password)
netapp_hosts = get_netapp_hosts(vc, args.region)
# there are some bbs with only vmfs and no vvols
if not netapp_hosts:
log.info("- INFO - netapp aggregate balancing - no vvol datastores found for this vc - giving up")
return
# connect to netapp and get the netapp version
aggr_usage = []
netapps = {}
for netapp_host in netapp_hosts:
log.info("- INFO - connecting to netapp %s", netapp_host)
netapps[netapp_host] = NetAppHelper(host=netapp_host, user=args.netapp_user, password=<PASSWORD>)
nh = netapps[netapp_host]
vi = nh.get_single("system-get-version")
log.info("- INFO - {} is on version {}".format(netapp_host, vi['version']))
# make aggr_denylist an empty list if not set via cmdline
if args.aggr_denylist:
aggr_denylist = args.aggr_denylist
else:
aggr_denylist = []
# collect aggregate usage across all netapp hosts
aggr_usage += get_aggr_usage_list(nh, netapp_host, aggr_denylist, nanny_metrics_data)
# sort the aggregates top down to start with the highest usage percentage
aggr_usage.sort(key=lambda x: x[2], reverse=True)
# find aggr with highest usage, aggr with lowest usage (that is not on highest usage host)
aggr_most_used = aggr_usage[0]
aggr_least_used = None
for aggr in reversed(aggr_usage):
if aggr[0] != aggr_most_used[0]:
aggr_least_used = aggr
break
else:
log.warning("- WARNING - no aggregate found that is not on the same netapp")
return 1
log.info("- INFO - most utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_most_used))
log.info("- INFO - least utilized aggregate is {1} on {0} with a usage of {2}%".format(*aggr_least_used))
vvol_aggr_total_size = 0
vvol_aggr_used_size = 0
for aggr in aggr_usage:
vvol_aggr_total_size += aggr[3]
vvol_aggr_used_size += aggr[2] / 100 * aggr[3]
vvol_aggr_used_percentage_avg = 100 * vvol_aggr_used_size / vvol_aggr_total_size
log.info("- INFO - average usage of all vvol aggregates is {:.0f}% at a total used size of {:.0f} gb".format(vvol_aggr_used_percentage_avg, vvol_aggr_used_size / 1024**3))
nanny_metrics_data.set_data('netapp_balancing_nanny_aggregate_usage_avg',vvol_aggr_used_percentage_avg ,['dummy'])
if int(aggr_most_used[2]) < args.max_threshold:
log.info("- INFO - netapp aggregate balancing - usage of most used source aggregate {} of {}% is below threshold of {}% - doing nothing".format(aggr_most_used[1], aggr_most_used[2], args.max_threshold))
return
if int(aggr_least_used[2]) > args.min_threshold:
log.info("- INFO - netapp aggregate balancing - usage of least used target aggregate {} of {}% is above threshold of {}% - doing nothing".format(aggr_least_used[1], aggr_least_used[2], args.min_threshold))
return
log.info("- INFO - calculating volume move suggestions for automatic aggregate balancing ... this may take a moment")
log.info("- INFO - getting luns from the netapp")
nh_most_used = netapps[aggr_most_used[0]]
luns = nh_most_used.get_luns_for_aggr(aggr_most_used[1], "vv")
# filter luns for desired size range
luns = [lun for lun in luns
if args.aggr_lun_min_size * 1024**3 <= int(lun['size-used']) <= args.aggr_lun_max_size * 1024**3]
luns.sort(key=lambda lun: int(lun['size-used']), reverse=True)
# we can only balance if there are any luns to balance on the aggregate within the given min and max regions
if len(luns) == 0:
log.info("- IMPORTANT - there are no movable volumes within the current min/max limits on aggregate {} - maybe limits should be adjusted?".format(aggr_most_used[1]))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_in_range'])
return
# NOTE we do not use the comments in the end - we map via vcenter backing
# <uuid>_brick.vmdk - DATA
# <uuid>.vmdk - DATA
# <uuid>_1.vmdk - DATA
comment_re = re.compile(r"^(?P<vmdk>.*\.vmdk) - DATA$")
# /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk
naa_path_re = re.compile(r"^/vol/.*/(?P<naa>naa\..*)\.vmdk$")
vmdks = []
for lun in luns:
# TODO i think we no longer use the comments and map via vcenter
# looks like not all luns have a comment
if lun.get('comment'):
# does this map to an instance?
comment_match = comment_re.match(lun['comment'])
if not comment_match:
continue
else:
continue
# get the netapp id (naa.xyz...) name
path_match = naa_path_re.match(lun['path'])
if not path_match:
continue
vmdk = (comment_match.group('vmdk'), path_match.group('naa'), int(lun['size-used']))
vmdks.append(vmdk)
log.debug("==> vmdk file: {} - netapp id: {} - size {:.0f} gb"
.format(vmdk[0], vmdk[1], vmdk[2] / 1024**3))
# limit to the largest --max-move-vms
#vmdks = vmdks[:args.max_move_vms]
valid_netapp_ids = [vmdk[1] for vmdk in vmdks]
attached_volumes = []
vms, attached_volumes = get_vcenter_info(vc)
# index = netapp-id, value = ( vm-name, attach-state )
vmvmdks = dict()
vmvmdks_flexvol = dict()
for vm in vms:
# the double condition is just for safety - they usually should never both match
if vc.is_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids:
if device.backing.backingObjectId in attached_volumes:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'attached')
else:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'detached')
break
elif vc.is_snapshot_shadow_vm(vm) and not vc.is_openstack_vm(vm):
# find disk backing
for device in vm['config.hardware.device']:
if isinstance(device, vc.vim.vm.device.VirtualDisk):
if device.backing.backingObjectId:
# check if this disk references one of our netapp luns (via naa path thingy)
if device.backing.backingObjectId in valid_netapp_ids:
if device.backing.backingObjectId in attached_volumes:
# not sure if this case is actually possible
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'snapshot attached')
else:
vmvmdks[device.backing.backingObjectId] = (vm['name'], 'snapshot detached')
break
log.debug("==> snapshot shadow vm - netapp id: {} - volume uuid: {}".format(device.backing.backingObjectId, vm['name']))
# calculate to which percentage we want to bring down the most used aggregate
aggr_most_used_target_percentage = args.max_threshold - args.max_threshold_hysteresis
# like the last one but as absolute size instead of percentage
aggr_most_used_target_size = int(aggr_most_used[3]) * (aggr_most_used_target_percentage / 100)
# like the last one but for the least used aggregate
aggr_least_used_target_size = int(aggr_least_used[3]) * (aggr_most_used_target_percentage / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_most_used_current_size = int(aggr_most_used[3]) * (int(aggr_most_used[2]) / 100)
# this is for tracking how much the most used aggregate is used after each lun movement
aggr_least_used_current_size = int(aggr_least_used[3]) * (int(aggr_least_used[2]) / 100)
log.info("- PLEASE IGNORE - below might be some debugging output for the planned automatic move of detached volumes")
for vmdk in vmdks:
# if aggr_least_used_current_size >= aggr_least_used_target_size:
# log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1]))
# break
if vmvmdks.get(vmdk[1]):
if vmvmdks.get(vmdk[1])[1] == 'detached':
try:
if oh.api.block_store.get_volume(vmvmdks.get(vmdk[1])[0]).attachments:
log.info("- PLEASE IGNORE - the volume {} seems to be attached to instance {} meanwhile - doing nothing # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3))
else:
log.info("- PLEASE IGNORE - plan: move volume {} from {} to {} # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_most_used[0], aggr_most_used[1]), bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3))
# debug
log.info("- PLEASE IGNORE - {} locking volume {} before moving it".format(action_string, vmvmdks.get(vmdk[1])[0]))
if not args.dry_run:
oh.lock_volume(vmvmdks.get(vmdk[1])[0])
log.info("- PLEASE IGNORE - {} moving shadow vm of volume {} to {}".format(action_string, vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
if not args.dry_run:
move_shadow_vm(vc, vmvmdks.get(vmdk[1])[0], str(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
log.info("- PLEASE IGNORE - {} unlocking volume {} after moving it".format(action_string, vmvmdks.get(vmdk[1])[0]))
if not args.dry_run:
oh.unlock_volume(vmvmdks.get(vmdk[1])[0])
# trying to keep track of the actual usage of the participating aggregates
aggr_most_used_current_size -= vmdk[2]
aggr_least_used_current_size += vmdk[2]
gauge_value_move_suggestions_detached += 1
if gauge_value_move_suggestions_detached == args.max_move_vms:
log.info("- PLEASE IGNORE - max-move-vms of {} reached - stopping here".format(args.max_move_vms))
break
if aggr_least_used_current_size >= aggr_least_used_target_size:
log.info("- PLEASE IGNORE - further movements would fill up {} too much - stopping here".format(bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1])))
break
except Exception as e:
logging.error("- ERROR - exception while trying to get_volume %s from openstack - this should be investigated - error: %s",
str(vmvmdks.get(vmdk[1])[0]), str(e))
if aggr_most_used_current_size > aggr_most_used_target_size:
log.info("- PLEASE IGNORE - there are not enough (or no) detached volumes within the current min/max limits to bring the aggregate {} below the limit of {:.0f} gb - stopping here".format(aggr_most_used[1], aggr_most_used_target_size))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_enough_auto'])
# when the balancing goal is reached an "optional" string is appended to the recommendations
optional_string = ''
overload_warning_printed = 0
log.info("- INFO - volume move target usage for aggregate {} is {:.0f}% corresponding to a size of {:.0f} gb".format(aggr_most_used[1], aggr_most_used_target_percentage, aggr_most_used_target_size / 1024**3))
log.info("- INFO - volume move suggestions for manual aggregate balancing (from largest in range to smallest):")
# go through all the shadow vms found on the netapp
for vmdk in vmdks:
# check if they actually exist in the vcenter
if vmvmdks.get(vmdk[1]):
# for attached volumes a move suggestion is printed out
if vmvmdks.get(vmdk[1])[1] == 'attached':
if (aggr_least_used_current_size >= aggr_least_used_target_size) and (overload_warning_printed == 0):
# stop if the aggregate we move to gets too full
log.info("- IMPORTANT - please stop with movements here as we would fill up {} too much".format(aggr_least_used[1]))
optional_string = ' (no move)'
overload_warning_printed = 1
if (aggr_most_used_current_size < aggr_most_used_target_size) and (optional_string == ''):
optional_string = ' (optional)'
# this should be DEBUG later
log.debug("==> before - lun size: {:.0f} gb - source aggr usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, aggr_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
# trying to keep track of the actual usage of the participating aggregates
aggr_most_used_current_size -= vmdk[2]
aggr_least_used_current_size += vmdk[2]
gauge_value_move_suggestions_attached += 1
# print out info for the manual volume move
log.info("- INFO - netapp aggregate balancing - ./svmotion_cinder_v2.py {} {} # from {} - size {:.0f} gb{}".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), aggr_most_used[1], vmdk[2] / 1024**3, optional_string))
# this should be DEBUG later
log.debug("==> after - lun size: {:.0f} gb - source aggr usage: {:.0f} gb - target aggr usage: {:.0f} gb".format(vmdk[2] / 1024**3, aggr_most_used_current_size / 1024**3, aggr_least_used_current_size / 1024**3))
if gauge_value_move_suggestions_attached >= args.max_move_vms:
log.info("- IMPORTANT - please stop with movements - max-move-vms of {} reached".format(args.max_move_vms))
optional_string = ' (no move)'
if aggr_most_used_current_size > aggr_most_used_target_size:
log.info("- INFO - there are not enough (or no) attached volumes within the current min/max limits to bring the aggregate {} below the limit of {:.0f} gb - maybe limits should be adjusted?".format(aggr_most_used[1], aggr_most_used_target_size / 1024**3))
nanny_metrics_data.set_data('netapp_balancing_nanny_error_count', 1,['aggregate_not_enough_manual'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_detached,['aggregate', 'detached'])
nanny_metrics_data.set_data('netapp_balancing_nanny_move_suggestions', gauge_value_move_suggestions_attached,['aggregate', 'attached'])
def main():
args = parse_commandline()
nanny_metrics_data = prometheus_exporter_setup(args)
check_loop(args, nanny_metrics_data)
if __name__ == '__main__':
main() | en | 0.799719 | #!/usr/bin/env python3 # # Copyright (c) 2020 SAP SE # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # -*- coding: utf-8 -*- # prometheus export functionality # 4600 is about 90% of 5tb # 2050 is about 2tb # get all vvol netapps via the vcenter # TODO: correlate this info with openstack info # iterate over datastores to find vvols --> netapps # example for the pattern: vvol_bb123 # one of our netapps is inconsistent in its naming - handle this here # e.g. stnpca1-bb123.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a1..) # build a list of netapps # example for the pattern: vVOL_stnpca3_st030 # e.g. stnpca3-st030.cc.<region>.cloud.sap - those are the netapp cluster addresses (..np_c_a3..) # build a list of netapps # return a list of (netapp_host, aggregate-name, percent-used-capacity, size-totoal) per aggregates # get aggregates # print info for aggr_denylisted aggregates # examples: # (netapphost1, aggregate01, 50, 10000000) # return a list of (netapp_host, flexvol_name, containing-aggregate-name, size-used, size-total) per flexvol # get flexvols # print info for flexvol_denylisted aggregates # examples: # (netapphost1, flexvol01, aggr1, 50, 100) # (netapphost1, flexvol02, aggr2, 60, 100) # (netapphost1, flexvol03, aggr1, 60, 100) # generate the vvol datastore name from the name of an aggregate # example for the pattern for bb connected netapps: aggr_ssd_bb123_1 # example ds_name: vVOL_BB123 # example for the pattern for not bb connected netapps: aggr_ssd_st030_02 # example ds_name: vVOL_stnpca3_st030 # get all vms from vcenter # check if this is a shadow vm # build list of vvol attachments on non shadow openstack vms # the double condition is just for safety - they usually should never both match # iterate over all devices # TODO maybe use the "continue" method to skip non matching? # and filter out the virtual disks # we are only interested in vvols here # add backingObjectId to our list of attached volumes # we move the lun/volume between the netapps by telling the vcenter to move the attached # storage of the corresponding shadow vm to another datastore (i.e. anorther netapp) # vm = vc.get_object_by_name(vim.VirtualMachine, volume_uuid) # endless loop of generating move suggestions and wait for the next run # first blanace out flexvols # then balance out aggregates # set some fixed threshold value metrics based on the cmdline args # sync metrics to prometheus exporter # wait the interval time # print out suggestions which luns should be moved # for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest # TODO this might go maybe as we will not need the metrics in some return cases # a counter for move suggestions # send the empty metric now already in case we are returning before setting a new value # used for log output # create a connection to openstack # there are some bbs with only vmfs and no vvols # connect to netapp and get the netapp version # TODO this can go maybe by changing the function to use an empty list by default # make flexvol_denylist an empty list if not set via cmdline # TODO this can go maybe by changing the function to use an empty list by default # make aggr_denylist an empty list if not set via cmdline # collect flexvol usage across all netapp hosts # collect aggregate usage across all netapp hosts # sort the flexvols top down to start with the biggest ones # to keep things simple we only work on the largest flexvol on each nanny run # we only have to balance flexvols in case we are over the limit with the largest one # TODO check if containing aggr is not least used aggr # sort the luns by used-size # filter luns for desired size range # we can only balance if there are any luns to balance on the flexvol within the given min and max regions # sort the aggregates top down to start with the highest usage percentage # to keep things simple we only work on the largest aggr on each nanny run # find aggr with highest usage, aggr with lowest usage (that is not on highest usage host) # TODO see todo above - we need to check that the least used aggr is not the containing aggr of the most used flexvol # in that case we want to use the second least used aggregate # make sure we are not on the same netapp and not on the netapp of the source flexvol # TODO this should be double checked and combined with the todo from above # TODO we do not use the comments in the end - we map via vcenter backing # <uuid>_brick.vmdk - DATA # <uuid>.vmdk - DATA # <uuid>_1.vmdk - DATA # /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk # TODO we do not use the comments in the end - we map via vcenter backing # looks like not all luns have a comment # does this map to an instance? # get the netapp id (naa.xyz...) name # shadow vm uuid = volume uuid, netapp id, size-used # TODO maybe also add the aggr this is on to keep track of that too ... requires adding that above too # limit to the largest --max-move-vms # off for debug #vmdks_flexvol = vmdks_flexvol[:args.max_move_vms] # index = netapp-id, value = ( vm-name, attach-state ) # the double condition is just for safety - they usually should never both match # TODO i think we already check for them to be a shadow vm in get_vcenter_info -> double check # find disk backing # check if this disk references one of our netapp luns (via naa path thingy) # find disk backing # check if this disk references one of our netapp luns (via naa path thingy) # not sure if this case is actually possible # calculate to which percentage we want to bring down the most used aggregate # like the last one but as absolute size instead of percentage and for the least used aggregate # this is for tracking how much the most used aggregate is used after each lun movement # this is for tracking the size of the flexvol we are moving stuff away from after each lun movement # if aggr_least_used_current_size >= aggr_least_used_target_size: # log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1])) # break # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks_flexvol.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3)) # this should be DEBUG later # size {:.0f} gb".format(vmvmdks_flexvol.get(vmdk[1])[0], flexvol_most_used[1], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3)) # debug # trying to keep track of the actual usage of the participating flexvols and aggregates #aggr_least_used_current_size += vmdk[2] # this should be DEBUG later # when the balancing goal is reached an "optional" string is appended to the recommendations # go through all the shadow vms found on the netapp # check if they actually exist in the vcenter # for attached volumes a move suggestion is printed out # stop if the aggregate we move to gets too full # this should be DEBUG later # print out info for the manual volume move # from flexvol {} on {} - size {:.0f} gb{}".format(vmvmdks_flexvol.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), flexvol_most_used[1], flexvol_most_used[2], vmdk[2] / 1024**3, optional_string)) # trying to keep track of the actual usage of the participating flexvols and aggregates # this should be DEBUG later # print out suggestions which volumes should be moved # for now this is: suggest to move the largest attached volumes from the fullest netapp aggregate to the emptiest # a counter for move suggestions # send the empty metric now already in case we are returning before setting a new value # used for log output # create a connection to openstack # there are some bbs with only vmfs and no vvols # connect to netapp and get the netapp version # make aggr_denylist an empty list if not set via cmdline # collect aggregate usage across all netapp hosts # sort the aggregates top down to start with the highest usage percentage # find aggr with highest usage, aggr with lowest usage (that is not on highest usage host) # filter luns for desired size range # we can only balance if there are any luns to balance on the aggregate within the given min and max regions # NOTE we do not use the comments in the end - we map via vcenter backing # <uuid>_brick.vmdk - DATA # <uuid>.vmdk - DATA # <uuid>_1.vmdk - DATA # /vol/vv0_BB123_01/naa.<netapp uuid>.vmdk # TODO i think we no longer use the comments and map via vcenter # looks like not all luns have a comment # does this map to an instance? # get the netapp id (naa.xyz...) name # limit to the largest --max-move-vms #vmdks = vmdks[:args.max_move_vms] # index = netapp-id, value = ( vm-name, attach-state ) # the double condition is just for safety - they usually should never both match # find disk backing # check if this disk references one of our netapp luns (via naa path thingy) # find disk backing # check if this disk references one of our netapp luns (via naa path thingy) # not sure if this case is actually possible # calculate to which percentage we want to bring down the most used aggregate # like the last one but as absolute size instead of percentage # like the last one but for the least used aggregate # this is for tracking how much the most used aggregate is used after each lun movement # this is for tracking how much the most used aggregate is used after each lun movement # if aggr_least_used_current_size >= aggr_least_used_target_size: # log.info("- INFO - no automatic lun movements as we would fill up {} too much".format(aggr_least_used[1])) # break # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], oh.api.block_store.get_volume(vmvmdks.get(vmdk[1])[0]).attachments[0]['server_id'], vmdk[2] / 1024**3)) # size {:.0f} gb".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_most_used[0], aggr_most_used[1]), bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), vmdk[2] / 1024**3)) # debug # trying to keep track of the actual usage of the participating aggregates # when the balancing goal is reached an "optional" string is appended to the recommendations # go through all the shadow vms found on the netapp # check if they actually exist in the vcenter # for attached volumes a move suggestion is printed out # stop if the aggregate we move to gets too full # this should be DEBUG later # trying to keep track of the actual usage of the participating aggregates # print out info for the manual volume move # from {} - size {:.0f} gb{}".format(vmvmdks.get(vmdk[1])[0], bb_name_from_aggregate_name(aggr_least_used[0], aggr_least_used[1]), aggr_most_used[1], vmdk[2] / 1024**3, optional_string)) # this should be DEBUG later | 1.918887 | 2 |
sos_trades_core/sos_processes/test/test_disc1_disc3_simple_multi_scenario/process.py | os-climate/sostrades-core | 8 | 6631072 | <gh_stars>1-10
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8
#-- Generate test 1 process
from sos_trades_core.sos_processes.base_process_builder import BaseProcessBuilder
class ProcessBuilder(BaseProcessBuilder):
# ontology information
_ontology_data = {
'label': 'Core Test Disc1 Disc3 Simple Multi Scenario Process',
'description': '',
'category': '',
'version': '',
}
def get_builders(self):
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.ee.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_barrierr', 'ns_out_disc3']}
self.ee.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.ee.ns_manager.add_ns('ns_barrierr', self.ee.study_name)
self.ee.ns_manager.add_ns(
'ns_scatter_scenario', f'{self.ee.study_name}.multi_scenarios')
self.ee.ns_manager.add_ns(
'ns_disc3', f'{self.ee.study_name}.multi_scenarios.Disc3')
self.ee.ns_manager.add_ns(
'ns_out_disc3', f'{self.ee.study_name}.multi_scenarios')
self.ee.ns_manager.add_ns(
'ns_data_ac', self.ee.study_name)
# instantiate factory # get instantiator from Discipline class
builder_list = self.ee.factory.get_builder_from_process(repo='sos_trades_core.sos_processes.test',
mod_id='test_disc1_scenario')
scatter_list = self.ee.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=True)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc3_scenario.Disc3'
disc3_builder = self.ee.factory.get_builder_from_module(
'Disc3', mod_list)
scatter_list.append(disc3_builder)
multi_scenarios = self.ee.factory.create_simple_multi_scenario_builder(
'multi_scenarios', 'scenario_list', scatter_list, autogather=True, gather_node='Post-processing')
return multi_scenarios
| '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8
#-- Generate test 1 process
from sos_trades_core.sos_processes.base_process_builder import BaseProcessBuilder
class ProcessBuilder(BaseProcessBuilder):
# ontology information
_ontology_data = {
'label': 'Core Test Disc1 Disc3 Simple Multi Scenario Process',
'description': '',
'category': '',
'version': '',
}
def get_builders(self):
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.ee.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_barrierr', 'ns_out_disc3']}
self.ee.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.ee.ns_manager.add_ns('ns_barrierr', self.ee.study_name)
self.ee.ns_manager.add_ns(
'ns_scatter_scenario', f'{self.ee.study_name}.multi_scenarios')
self.ee.ns_manager.add_ns(
'ns_disc3', f'{self.ee.study_name}.multi_scenarios.Disc3')
self.ee.ns_manager.add_ns(
'ns_out_disc3', f'{self.ee.study_name}.multi_scenarios')
self.ee.ns_manager.add_ns(
'ns_data_ac', self.ee.study_name)
# instantiate factory # get instantiator from Discipline class
builder_list = self.ee.factory.get_builder_from_process(repo='sos_trades_core.sos_processes.test',
mod_id='test_disc1_scenario')
scatter_list = self.ee.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=True)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc3_scenario.Disc3'
disc3_builder = self.ee.factory.get_builder_from_module(
'Disc3', mod_list)
scatter_list.append(disc3_builder)
multi_scenarios = self.ee.factory.create_simple_multi_scenario_builder(
'multi_scenarios', 'scenario_list', scatter_list, autogather=True, gather_node='Post-processing')
return multi_scenarios | en | 0.794997 | Copyright 2022 Airbus SAS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8 #-- Generate test 1 process # ontology information # scatter build map # scenario build map # shared namespace # instantiate factory # get instantiator from Discipline class | 1.487466 | 1 |
sispo/sim/compositor.py | oknuutti/sispo | 0 | 6631073 | """
The compositor module combines the different output files of the simulation.
As the simulation module outputs different files for background and foreground
and because the intensity of the blender rendered images are not constant, the
compositor is required to fix the intensity issue and add the star background.
"""
from datetime import datetime
import json
from pathlib import Path
import threading
from astropy import constants as const
from astropy import units as u
import cv2
import numpy as np
from . import utils
#Astrometric calibrations
#https://www.cfa.harvard.edu/~dfabricant/huchra/ay145/mags.html
FLUX0_VBAND = 3640 * 1.51E7 * 0.16 * u.ph / (u.s * u.m ** 2) # Photons per m^2
SUN_MAG_VBAND = -26.74 * u.mag # 1 AU distance
SUN_FLUX_VBAND_1AU = np.power(10., -0.4 * SUN_MAG_VBAND.value) * FLUX0_VBAND
class ImageCompositorError(RuntimeError):
"""This is a generic error for the compositor."""
pass
class Frame():
"""Class to wrap all data of a single frame."""
metadata = None
stars = None
sssb_only = None
sssb_const_dist = None
light_ref = None
def __init__(self,
frame_id,
image_dir=None,
stars=None,
sssb_only=None,
sssb_const_dist=None,
light_ref=None):
self.id = frame_id
if None not in (stars, sssb_only, sssb_const_dist, light_ref):
self.stars = stars
self.sssb_only = sssb_only
self.sssb_const_dist = sssb_const_dist
self.light_ref = light_ref
elif image_dir is not None:
self.read_complete_frame(self.id, image_dir)
else:
raise ImageCompositorError("Unable to create frame.")
def calc_ref_intensity(self):
"""Calculates reference intensitiy using the light reference scene."""
(height, width, _) = self.light_ref.shape
h_slice = (height // 2 - 35, height // 2 + 35)
w_slice = (width // 2 - 35, width // 2 + 35)
area = self.light_ref[h_slice[0]:h_slice[1], w_slice[0]:w_slice[1], 0]
intensities = np.mean(area)
return intensities
def calc_stars_stats(self):
"""Calculate star scene parameters."""
star_c_max = []
star_c_sum = []
for i in range(3):
star_c_max.append(np.max(self.stars[:, :, i]))
star_c_sum.append(np.sum(self.stars[:, :, i]))
return (star_c_max, star_c_sum)
def calc_sssb_stats(self, const_dist=False):
"""Calculate SSSB max and sum corrected with alpha channel.
If const_dist is True, stats of const distant images are calculated.
"""
if const_dist:
sssb_max = np.max(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
sssb_sum = np.sum(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
else:
sssb_max = np.max(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
sssb_sum = np.sum(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
return (sssb_max, sssb_sum)
def read_complete_frame(self, frame_id, image_dir):
"""Reads all images for a given frame id.
This includes Stars, SssbOnly, SssbConstDist, and LightRef.
"""
frame_fmt_str = image_dir / ("{}_" + frame_id + ".exr")
frame_fmt_str = str(frame_fmt_str)
self.metadata = self.read_meta_file(frame_id, image_dir)
filename = frame_fmt_str.format("Stars")
self.stars = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbOnly")
self.sssb_only = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbConstDist")
self.sssb_const_dist = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("LightRef")
self.light_ref = utils.read_openexr_image(filename)
def read_meta_file(self, frame_id, image_dir):
"""Reads metafile of a frame."""
filename = image_dir / ("Metadata_" + frame_id + ".json")
with open(str(filename), "r") as metafile:
metadata = json.load(metafile)
date = datetime.strptime(metadata["date"], "%Y-%m-%dT%H%M%S-%f")
metadata["date"] = date
metadata["distance"] = metadata["distance"] * u.m
metadata["sc_pos"] = np.asarray(metadata["sc_pos"]) * u.m
metadata["sssb_pos"] = np.asarray(metadata["sssb_pos"]) * u.m
return metadata
class ImageCompositor():
"""This class provides functions to combine the final simulation images."""
IMG_MIN_SIZE_INFOBOX = (1200, 1000)
INFOBOX_SIZE = {"default": (400, 100), "min": (200, 50)}
def __init__(self,
res_dir,
img_dir,
instrument,
sssb,
with_infobox,
with_clipping,
ext_logger):
self.logger = ext_logger
self.res_dir = res_dir
self.image_dir = img_dir
self.image_extension = ".exr"
self._threads = []
self.inst = instrument
self.dlmult = 2
self.sssb = sssb
self.with_infobox = with_infobox
self.with_clipping = with_clipping
self.logger.debug(f"Infobox: {with_infobox}. Clip: {with_clipping}.")
def get_frame_ids(self):
"""Extract list of frame ids from file names of SssbOnly scenes."""
scene_name = "SssbOnly"
image_names = scene_name + "*" + self.image_extension
filenames = self.image_dir.glob(image_names)
ids = []
for filename in filenames:
filename = str(filename.name).strip(self.image_extension)
filename = filename.strip(scene_name)
ids.append(filename.strip("_"))
return ids
def calc_relative_intensity_curve(self):
"""Calculates the relative intensity curve for all sssb frames."""
only_stats = np.zeros(len(self.frames))
const_dist_stats = np.zeros(len(self.frames))
distances = np.zeros(len(self.frames))
for i, frame in enumerate(self.frames):
only_stats[i] = frame.calc_sssb_stats()[1]
const_dist_stats[i] = frame.calc_sssb_stats(True)[1]
distances[i] = frame.metadata["distance"]
rel_intensity = only_stats / const_dist_stats
ind_sorted = distances.argsort()
distances = distances[ind_sorted]
rel_intensity = rel_intensity[ind_sorted]
for last in range(len(distances)):
if rel_intensity[last] == 0:
break
#last -= 1
rel_intensity = rel_intensity[:last]
return rel_intensity
def compose(self, frames=None, max_threads=3):
"""
Composes different images into final image, uses multi-threading.
!!! CAUTION !!! Call only once at a time.
:type frames: String, Frame or List of Frame
:param frames: FrameID, Frame or list of frames for calibration and
composition.
:type name_suffix: str
:param name_suffix: Image suffix for file I/O. Used for constructing
file names to read different images of a frame as
well as used for composed image output.
"""
if frames is None:
self.frame_ids = self.get_frame_ids()
frames = []
for frame_id in self.frame_ids:
new_frame = Frame(frame_id, self.image_dir)
frames.append(new_frame)
elif isinstance(frames, str):
frames = [Frame(frames, self.image_dir)]
elif isinstance(frames, Frame):
frames = [frames]
elif isinstance(frames, list) and isinstance(frames[0], Frame):
pass
else:
raise ImageCompositorError(
"Compositor.compose requires frame or list of frames as input")
for frame in frames:
for thr in self._threads:
if not thr.is_alive():
self._threads.pop(self._threads.index(thr))
if len(self._threads) < max_threads - 1:
# Allow up to 2 additional threads
thr = threading.Thread(target=self._compose, args=(frame,))
thr.start()
self._threads.append(thr)
else:
# If too many, also compose in main thread to not drop a frame
self._compose(frame)
for thr in self._threads:
thr.join()
def _compose(self, frame):
"""
Composes raw images and adjusts light intensities.
:type frame: Frame
:param frame: Frame containing necessary inormation for composition.
"""
# Calculate Gaussian standard deviation for approx diffraction pattern
sigma = (self.dlmult * 0.45 * self.inst.wavelength
* self.inst.focal_l / (self.inst.aperture_d
* self.inst.pix_l))
# SSSB photometry
sc_sun_dist = np.linalg.norm(frame.metadata["sc_pos"]) * u.m
ref_flux = SUN_FLUX_VBAND_1AU * ((const.au / sc_sun_dist) ** 2)
ref_flux *= self.inst.aperture_a * self.inst.pix_a
ref_flux /= ((self.inst.focal_l ** 2) * np.pi)
ref_flux = ref_flux.decompose()
# Star photometry
starmap_flux = FLUX0_VBAND * frame.metadata["total_flux"]
starmap_flux *= self.inst.aperture_a
starmap_flux = starmap_flux.decompose()
# Calibrate starmap
(_, stars_sums) = frame.calc_stars_stats()
frame.stars[:, :, 0:3] *= starmap_flux.value / stars_sums[0]
# Create composition image array
composed_img = np.zeros(frame.stars.shape, dtype=np.float32)
# Calibrate SSSB, depending on visible size
dist_scale = np.power(1E6 * u.m / frame.metadata["distance"], 2.)
vis_dim = self.sssb["max_dim"] * dist_scale
# Kernel size calculated to equal skimage.filters.gaussian
# Reference:
# https://github.com/scipy/scipy/blob/4bfc152f6ee1ca48c73c06e27f7ef021d729f496/scipy/ndimage/filters.py#L214
kernel = int((4 * sigma + 0.5) * 2)
kernel = max(kernel, 5) # Don't use smaller than 5
ksize = (kernel, kernel)
if vis_dim < 0.1:
# Use point source sssb
# Generate point source reference image
sssb_ref = self.create_sssb_ref(self.inst.res)
alpha = frame.sssb_const_dist[:, :, 3]
scale = frame.sssb_const_dist[:, :, 0:3] * alpha
sssb_ref[:, :, 0:3] *= np.sum(scale, axis=-1) * dist_scale
composed_img = (sssb_ref[:, : , 0:3] + frame.stars)
composed_img *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
ref_sssb_max = np.max(sssb_ref[:, :, 0:3])
if composed_max > ref_sssb_max * 5:
composed_max = ref_sssb_max * 5
else:
# Calibrate sssb images
ref_int = frame.calc_ref_intensity()
sssb_cal_factor = ref_flux * self.sssb["albedo"] / ref_int
sssb_cal_factor = sssb_cal_factor.decompose().value
frame.sssb_only[:, :, 0:3] *= sssb_cal_factor
frame.sssb_const_dist[:, :, 0:3] *= sssb_cal_factor
# Merge images taking alpha channel and q.e. into account
alpha = frame.sssb_only[:, :, 3]
for c in range(3):
sssb = frame.sssb_only[:, :, c]
stars = frame.stars[:, :, c]
composed_img[:, :, c] = alpha * sssb + (1 - alpha) * stars
composed_img[:, :, 0:3] *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
composed_img[:, :, :] /= composed_max
if self.with_infobox:
infobox_img = composed_img[:, :, 0:3] * 255
infobox_img = infobox_img.astype(np.uint8)
try:
self.add_infobox(infobox_img, frame.metadata)
except ImageCompositorError as e:
self.logger.debug(f"No Infobox could be added. {str(e)}!")
filename = self.res_dir / ("Comp_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), infobox_img)
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
if self.with_clipping:
clipped_img = self.clip_color_depth(composed_img)
filename = self.res_dir / ("Inst_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), clipped_img)
rel_pos = frame.metadata["sc_pos"] - frame.metadata["sssb_pos"]
rel_pos = rel_pos.value / 1000.
filename = str(filename) + ".xyz"
with open(str(filename), "w") as priorfile:
priorfile.write(f"{rel_pos[0]} {rel_pos[1]} {rel_pos[2]}")
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
utils.write_openexr_image(exrfile, composed_img)
def create_sssb_ref(self, res, scale=5):
"""Creates a reference sssb image for calibration.
Sort of natural look by using image increased by factor of scale,
gaussian blur the result and decimate to match size of other images.
opencv resize algorithm needs integer divisable number of pixels
to have the same behaviour as skimage.transform.downscale_local_mean.
Zero-padding of skimage.transform.downscale_local_mean would be
necessary without scaling.
"""
res_x, res_y = res
# Rescale
res_x_sc = res_x * scale
res_y_sc = res_y * scale
sssb_point = np.zeros((res_x_sc, res_y_sc, 4), np.float32)
sig = scale / 2.
kernel = int((4 * sig + 0.5) * 2)
ksize = (kernel, kernel)
# Create point source and blur
sssb_point[res_x_sc//2, res_y_sc//2, :] = [1., 1., 1., 1.]
sssb_point = cv2.GaussianBlur(sssb_point, ksize, sig)
sssb = np.zeros((res_x, res_y, 4), np.float32)
sssb = cv2.resize(sssb_point, None, fx=1/scale, fy=1/scale,
interpolation=cv2.INTER_AREA)
sssb *= (scale * scale)
sssb[:, :, 0:3] /= np.sum(sssb[:, :, 0:3])
return sssb
def add_infobox(self, img, metadata, height=None, width=None):
"""Overlays an infobox to a given image in the lower right corner."""
# ~ Smallest size 1000 1200 for which 100 400 works
y_res, x_res, _ = img.shape
if height is None:
if y_res > self.IMG_MIN_SIZE_INFOBOX[1]:
height = self.INFOBOX_SIZE["default"][1]
else:
scale = y_res / self.IMG_MIN_SIZE_INFOBOX[1]
height = scale * self.INFOBOX_SIZE["default"][1]
height = int(np.ceil(height))
if width is None:
if x_res > self.IMG_MIN_SIZE_INFOBOX[0]:
width = self.INFOBOX_SIZE["default"][0]
else:
scale = x_res / self.IMG_MIN_SIZE_INFOBOX[0]
width = scale * self.INFOBOX_SIZE["default"][0]
width = int(np.ceil(width))
if height is not None or width is not None:
if height > y_res or width > x_res:
raise ImageCompositorError("Infobox is bigger than image.")
elif (height < self.INFOBOX_SIZE["min"][0] or
width < self.INFOBOX_SIZE["min"][1]):
raise ImageCompositorError("Infobox is too small to read.")
sig = 3
textbox = np.zeros((height * sig, width * sig, 4), np.float32)
pt1 = (0, 0)
pt2 = (width * sig, height * sig)
color = (128, 128, 128, 128)
cv2.rectangle(textbox, pt1, pt2, color, cv2.FILLED)
org_date = (10 * sig, 40 * sig)
org_dist = (10 * sig, 70 * sig)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font_size = 1.0 * sig
color = (255, 255, 255, 255)
date = str(metadata["date"])
dist = str(metadata["distance"])
cv2.putText(textbox, date, org_date, font, font_size, color, sig)
cv2.putText(textbox, dist, org_dist, font, font_size, color, sig)
# See link above for explanation
sigma = sig / 2.
kernel = int((4 * sigma + 0.5) * 2)
ksize = (kernel, kernel)
textbox = cv2.GaussianBlur(textbox, ksize, sigma)
textbox = cv2.resize(textbox, (width, height),
interpolation=cv2.INTER_AREA)
alpha_s = textbox[:, :, 3] / 255.0
alpha_l = 1. - alpha_s
for c in range(3):
tb_a = alpha_s * textbox[:, :, c]
img_a = alpha_l * img[y_res-height:y_res+1, x_res-width:x_res+1, c]
img_channel = (tb_a + img_a)
img[y_res-height:y_res+1, x_res-width:x_res+1, c] = img_channel
return img
def clip_color_depth(self, img):
"""Reduces color depth to the instrument color depth."""
max_val = int(2 ** self.inst.color_depth - 1)
if max_val <= 255:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint8)
else:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint16)
img = np.asarray(img * (65535. / max_val), np.float32)
img = img.astype(np.uint16)
return img
if __name__ == "__main__":
pass
| """
The compositor module combines the different output files of the simulation.
As the simulation module outputs different files for background and foreground
and because the intensity of the blender rendered images are not constant, the
compositor is required to fix the intensity issue and add the star background.
"""
from datetime import datetime
import json
from pathlib import Path
import threading
from astropy import constants as const
from astropy import units as u
import cv2
import numpy as np
from . import utils
#Astrometric calibrations
#https://www.cfa.harvard.edu/~dfabricant/huchra/ay145/mags.html
FLUX0_VBAND = 3640 * 1.51E7 * 0.16 * u.ph / (u.s * u.m ** 2) # Photons per m^2
SUN_MAG_VBAND = -26.74 * u.mag # 1 AU distance
SUN_FLUX_VBAND_1AU = np.power(10., -0.4 * SUN_MAG_VBAND.value) * FLUX0_VBAND
class ImageCompositorError(RuntimeError):
"""This is a generic error for the compositor."""
pass
class Frame():
"""Class to wrap all data of a single frame."""
metadata = None
stars = None
sssb_only = None
sssb_const_dist = None
light_ref = None
def __init__(self,
frame_id,
image_dir=None,
stars=None,
sssb_only=None,
sssb_const_dist=None,
light_ref=None):
self.id = frame_id
if None not in (stars, sssb_only, sssb_const_dist, light_ref):
self.stars = stars
self.sssb_only = sssb_only
self.sssb_const_dist = sssb_const_dist
self.light_ref = light_ref
elif image_dir is not None:
self.read_complete_frame(self.id, image_dir)
else:
raise ImageCompositorError("Unable to create frame.")
def calc_ref_intensity(self):
"""Calculates reference intensitiy using the light reference scene."""
(height, width, _) = self.light_ref.shape
h_slice = (height // 2 - 35, height // 2 + 35)
w_slice = (width // 2 - 35, width // 2 + 35)
area = self.light_ref[h_slice[0]:h_slice[1], w_slice[0]:w_slice[1], 0]
intensities = np.mean(area)
return intensities
def calc_stars_stats(self):
"""Calculate star scene parameters."""
star_c_max = []
star_c_sum = []
for i in range(3):
star_c_max.append(np.max(self.stars[:, :, i]))
star_c_sum.append(np.sum(self.stars[:, :, i]))
return (star_c_max, star_c_sum)
def calc_sssb_stats(self, const_dist=False):
"""Calculate SSSB max and sum corrected with alpha channel.
If const_dist is True, stats of const distant images are calculated.
"""
if const_dist:
sssb_max = np.max(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
sssb_sum = np.sum(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
else:
sssb_max = np.max(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
sssb_sum = np.sum(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
return (sssb_max, sssb_sum)
def read_complete_frame(self, frame_id, image_dir):
"""Reads all images for a given frame id.
This includes Stars, SssbOnly, SssbConstDist, and LightRef.
"""
frame_fmt_str = image_dir / ("{}_" + frame_id + ".exr")
frame_fmt_str = str(frame_fmt_str)
self.metadata = self.read_meta_file(frame_id, image_dir)
filename = frame_fmt_str.format("Stars")
self.stars = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbOnly")
self.sssb_only = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbConstDist")
self.sssb_const_dist = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("LightRef")
self.light_ref = utils.read_openexr_image(filename)
def read_meta_file(self, frame_id, image_dir):
"""Reads metafile of a frame."""
filename = image_dir / ("Metadata_" + frame_id + ".json")
with open(str(filename), "r") as metafile:
metadata = json.load(metafile)
date = datetime.strptime(metadata["date"], "%Y-%m-%dT%H%M%S-%f")
metadata["date"] = date
metadata["distance"] = metadata["distance"] * u.m
metadata["sc_pos"] = np.asarray(metadata["sc_pos"]) * u.m
metadata["sssb_pos"] = np.asarray(metadata["sssb_pos"]) * u.m
return metadata
class ImageCompositor():
"""This class provides functions to combine the final simulation images."""
IMG_MIN_SIZE_INFOBOX = (1200, 1000)
INFOBOX_SIZE = {"default": (400, 100), "min": (200, 50)}
def __init__(self,
res_dir,
img_dir,
instrument,
sssb,
with_infobox,
with_clipping,
ext_logger):
self.logger = ext_logger
self.res_dir = res_dir
self.image_dir = img_dir
self.image_extension = ".exr"
self._threads = []
self.inst = instrument
self.dlmult = 2
self.sssb = sssb
self.with_infobox = with_infobox
self.with_clipping = with_clipping
self.logger.debug(f"Infobox: {with_infobox}. Clip: {with_clipping}.")
def get_frame_ids(self):
"""Extract list of frame ids from file names of SssbOnly scenes."""
scene_name = "SssbOnly"
image_names = scene_name + "*" + self.image_extension
filenames = self.image_dir.glob(image_names)
ids = []
for filename in filenames:
filename = str(filename.name).strip(self.image_extension)
filename = filename.strip(scene_name)
ids.append(filename.strip("_"))
return ids
def calc_relative_intensity_curve(self):
"""Calculates the relative intensity curve for all sssb frames."""
only_stats = np.zeros(len(self.frames))
const_dist_stats = np.zeros(len(self.frames))
distances = np.zeros(len(self.frames))
for i, frame in enumerate(self.frames):
only_stats[i] = frame.calc_sssb_stats()[1]
const_dist_stats[i] = frame.calc_sssb_stats(True)[1]
distances[i] = frame.metadata["distance"]
rel_intensity = only_stats / const_dist_stats
ind_sorted = distances.argsort()
distances = distances[ind_sorted]
rel_intensity = rel_intensity[ind_sorted]
for last in range(len(distances)):
if rel_intensity[last] == 0:
break
#last -= 1
rel_intensity = rel_intensity[:last]
return rel_intensity
def compose(self, frames=None, max_threads=3):
"""
Composes different images into final image, uses multi-threading.
!!! CAUTION !!! Call only once at a time.
:type frames: String, Frame or List of Frame
:param frames: FrameID, Frame or list of frames for calibration and
composition.
:type name_suffix: str
:param name_suffix: Image suffix for file I/O. Used for constructing
file names to read different images of a frame as
well as used for composed image output.
"""
if frames is None:
self.frame_ids = self.get_frame_ids()
frames = []
for frame_id in self.frame_ids:
new_frame = Frame(frame_id, self.image_dir)
frames.append(new_frame)
elif isinstance(frames, str):
frames = [Frame(frames, self.image_dir)]
elif isinstance(frames, Frame):
frames = [frames]
elif isinstance(frames, list) and isinstance(frames[0], Frame):
pass
else:
raise ImageCompositorError(
"Compositor.compose requires frame or list of frames as input")
for frame in frames:
for thr in self._threads:
if not thr.is_alive():
self._threads.pop(self._threads.index(thr))
if len(self._threads) < max_threads - 1:
# Allow up to 2 additional threads
thr = threading.Thread(target=self._compose, args=(frame,))
thr.start()
self._threads.append(thr)
else:
# If too many, also compose in main thread to not drop a frame
self._compose(frame)
for thr in self._threads:
thr.join()
def _compose(self, frame):
"""
Composes raw images and adjusts light intensities.
:type frame: Frame
:param frame: Frame containing necessary inormation for composition.
"""
# Calculate Gaussian standard deviation for approx diffraction pattern
sigma = (self.dlmult * 0.45 * self.inst.wavelength
* self.inst.focal_l / (self.inst.aperture_d
* self.inst.pix_l))
# SSSB photometry
sc_sun_dist = np.linalg.norm(frame.metadata["sc_pos"]) * u.m
ref_flux = SUN_FLUX_VBAND_1AU * ((const.au / sc_sun_dist) ** 2)
ref_flux *= self.inst.aperture_a * self.inst.pix_a
ref_flux /= ((self.inst.focal_l ** 2) * np.pi)
ref_flux = ref_flux.decompose()
# Star photometry
starmap_flux = FLUX0_VBAND * frame.metadata["total_flux"]
starmap_flux *= self.inst.aperture_a
starmap_flux = starmap_flux.decompose()
# Calibrate starmap
(_, stars_sums) = frame.calc_stars_stats()
frame.stars[:, :, 0:3] *= starmap_flux.value / stars_sums[0]
# Create composition image array
composed_img = np.zeros(frame.stars.shape, dtype=np.float32)
# Calibrate SSSB, depending on visible size
dist_scale = np.power(1E6 * u.m / frame.metadata["distance"], 2.)
vis_dim = self.sssb["max_dim"] * dist_scale
# Kernel size calculated to equal skimage.filters.gaussian
# Reference:
# https://github.com/scipy/scipy/blob/4bfc152f6ee1ca48c73c06e27f7ef021d729f496/scipy/ndimage/filters.py#L214
kernel = int((4 * sigma + 0.5) * 2)
kernel = max(kernel, 5) # Don't use smaller than 5
ksize = (kernel, kernel)
if vis_dim < 0.1:
# Use point source sssb
# Generate point source reference image
sssb_ref = self.create_sssb_ref(self.inst.res)
alpha = frame.sssb_const_dist[:, :, 3]
scale = frame.sssb_const_dist[:, :, 0:3] * alpha
sssb_ref[:, :, 0:3] *= np.sum(scale, axis=-1) * dist_scale
composed_img = (sssb_ref[:, : , 0:3] + frame.stars)
composed_img *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
ref_sssb_max = np.max(sssb_ref[:, :, 0:3])
if composed_max > ref_sssb_max * 5:
composed_max = ref_sssb_max * 5
else:
# Calibrate sssb images
ref_int = frame.calc_ref_intensity()
sssb_cal_factor = ref_flux * self.sssb["albedo"] / ref_int
sssb_cal_factor = sssb_cal_factor.decompose().value
frame.sssb_only[:, :, 0:3] *= sssb_cal_factor
frame.sssb_const_dist[:, :, 0:3] *= sssb_cal_factor
# Merge images taking alpha channel and q.e. into account
alpha = frame.sssb_only[:, :, 3]
for c in range(3):
sssb = frame.sssb_only[:, :, c]
stars = frame.stars[:, :, c]
composed_img[:, :, c] = alpha * sssb + (1 - alpha) * stars
composed_img[:, :, 0:3] *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
composed_img[:, :, :] /= composed_max
if self.with_infobox:
infobox_img = composed_img[:, :, 0:3] * 255
infobox_img = infobox_img.astype(np.uint8)
try:
self.add_infobox(infobox_img, frame.metadata)
except ImageCompositorError as e:
self.logger.debug(f"No Infobox could be added. {str(e)}!")
filename = self.res_dir / ("Comp_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), infobox_img)
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
if self.with_clipping:
clipped_img = self.clip_color_depth(composed_img)
filename = self.res_dir / ("Inst_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), clipped_img)
rel_pos = frame.metadata["sc_pos"] - frame.metadata["sssb_pos"]
rel_pos = rel_pos.value / 1000.
filename = str(filename) + ".xyz"
with open(str(filename), "w") as priorfile:
priorfile.write(f"{rel_pos[0]} {rel_pos[1]} {rel_pos[2]}")
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
utils.write_openexr_image(exrfile, composed_img)
def create_sssb_ref(self, res, scale=5):
"""Creates a reference sssb image for calibration.
Sort of natural look by using image increased by factor of scale,
gaussian blur the result and decimate to match size of other images.
opencv resize algorithm needs integer divisable number of pixels
to have the same behaviour as skimage.transform.downscale_local_mean.
Zero-padding of skimage.transform.downscale_local_mean would be
necessary without scaling.
"""
res_x, res_y = res
# Rescale
res_x_sc = res_x * scale
res_y_sc = res_y * scale
sssb_point = np.zeros((res_x_sc, res_y_sc, 4), np.float32)
sig = scale / 2.
kernel = int((4 * sig + 0.5) * 2)
ksize = (kernel, kernel)
# Create point source and blur
sssb_point[res_x_sc//2, res_y_sc//2, :] = [1., 1., 1., 1.]
sssb_point = cv2.GaussianBlur(sssb_point, ksize, sig)
sssb = np.zeros((res_x, res_y, 4), np.float32)
sssb = cv2.resize(sssb_point, None, fx=1/scale, fy=1/scale,
interpolation=cv2.INTER_AREA)
sssb *= (scale * scale)
sssb[:, :, 0:3] /= np.sum(sssb[:, :, 0:3])
return sssb
def add_infobox(self, img, metadata, height=None, width=None):
"""Overlays an infobox to a given image in the lower right corner."""
# ~ Smallest size 1000 1200 for which 100 400 works
y_res, x_res, _ = img.shape
if height is None:
if y_res > self.IMG_MIN_SIZE_INFOBOX[1]:
height = self.INFOBOX_SIZE["default"][1]
else:
scale = y_res / self.IMG_MIN_SIZE_INFOBOX[1]
height = scale * self.INFOBOX_SIZE["default"][1]
height = int(np.ceil(height))
if width is None:
if x_res > self.IMG_MIN_SIZE_INFOBOX[0]:
width = self.INFOBOX_SIZE["default"][0]
else:
scale = x_res / self.IMG_MIN_SIZE_INFOBOX[0]
width = scale * self.INFOBOX_SIZE["default"][0]
width = int(np.ceil(width))
if height is not None or width is not None:
if height > y_res or width > x_res:
raise ImageCompositorError("Infobox is bigger than image.")
elif (height < self.INFOBOX_SIZE["min"][0] or
width < self.INFOBOX_SIZE["min"][1]):
raise ImageCompositorError("Infobox is too small to read.")
sig = 3
textbox = np.zeros((height * sig, width * sig, 4), np.float32)
pt1 = (0, 0)
pt2 = (width * sig, height * sig)
color = (128, 128, 128, 128)
cv2.rectangle(textbox, pt1, pt2, color, cv2.FILLED)
org_date = (10 * sig, 40 * sig)
org_dist = (10 * sig, 70 * sig)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font_size = 1.0 * sig
color = (255, 255, 255, 255)
date = str(metadata["date"])
dist = str(metadata["distance"])
cv2.putText(textbox, date, org_date, font, font_size, color, sig)
cv2.putText(textbox, dist, org_dist, font, font_size, color, sig)
# See link above for explanation
sigma = sig / 2.
kernel = int((4 * sigma + 0.5) * 2)
ksize = (kernel, kernel)
textbox = cv2.GaussianBlur(textbox, ksize, sigma)
textbox = cv2.resize(textbox, (width, height),
interpolation=cv2.INTER_AREA)
alpha_s = textbox[:, :, 3] / 255.0
alpha_l = 1. - alpha_s
for c in range(3):
tb_a = alpha_s * textbox[:, :, c]
img_a = alpha_l * img[y_res-height:y_res+1, x_res-width:x_res+1, c]
img_channel = (tb_a + img_a)
img[y_res-height:y_res+1, x_res-width:x_res+1, c] = img_channel
return img
def clip_color_depth(self, img):
"""Reduces color depth to the instrument color depth."""
max_val = int(2 ** self.inst.color_depth - 1)
if max_val <= 255:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint8)
else:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint16)
img = np.asarray(img * (65535. / max_val), np.float32)
img = img.astype(np.uint16)
return img
if __name__ == "__main__":
pass
| en | 0.768546 | The compositor module combines the different output files of the simulation. As the simulation module outputs different files for background and foreground and because the intensity of the blender rendered images are not constant, the compositor is required to fix the intensity issue and add the star background. #Astrometric calibrations #https://www.cfa.harvard.edu/~dfabricant/huchra/ay145/mags.html # Photons per m^2 # 1 AU distance This is a generic error for the compositor. Class to wrap all data of a single frame. Calculates reference intensitiy using the light reference scene. Calculate star scene parameters. Calculate SSSB max and sum corrected with alpha channel. If const_dist is True, stats of const distant images are calculated. Reads all images for a given frame id. This includes Stars, SssbOnly, SssbConstDist, and LightRef. Reads metafile of a frame. This class provides functions to combine the final simulation images. Extract list of frame ids from file names of SssbOnly scenes. Calculates the relative intensity curve for all sssb frames. #last -= 1 Composes different images into final image, uses multi-threading. !!! CAUTION !!! Call only once at a time. :type frames: String, Frame or List of Frame :param frames: FrameID, Frame or list of frames for calibration and composition. :type name_suffix: str :param name_suffix: Image suffix for file I/O. Used for constructing file names to read different images of a frame as well as used for composed image output. # Allow up to 2 additional threads # If too many, also compose in main thread to not drop a frame Composes raw images and adjusts light intensities. :type frame: Frame :param frame: Frame containing necessary inormation for composition. # Calculate Gaussian standard deviation for approx diffraction pattern # SSSB photometry # Star photometry # Calibrate starmap # Create composition image array # Calibrate SSSB, depending on visible size # Kernel size calculated to equal skimage.filters.gaussian # Reference: # https://github.com/scipy/scipy/blob/4bfc152f6ee1ca48c73c06e27f7ef021d729f496/scipy/ndimage/filters.py#L214 # Don't use smaller than 5 # Use point source sssb # Generate point source reference image # Calibrate sssb images # Merge images taking alpha channel and q.e. into account Creates a reference sssb image for calibration. Sort of natural look by using image increased by factor of scale, gaussian blur the result and decimate to match size of other images. opencv resize algorithm needs integer divisable number of pixels to have the same behaviour as skimage.transform.downscale_local_mean. Zero-padding of skimage.transform.downscale_local_mean would be necessary without scaling. # Rescale # Create point source and blur Overlays an infobox to a given image in the lower right corner. # ~ Smallest size 1000 1200 for which 100 400 works # See link above for explanation Reduces color depth to the instrument color depth. | 2.299898 | 2 |
tests/__init__.py | duypham2108/dev_st | 67 | 6631074 | """Unit test package for stlearn."""
| """Unit test package for stlearn."""
| en | 0.789885 | Unit test package for stlearn. | 0.786733 | 1 |
plugin.video.foxystreams/resources/lib/ui.py | rrosajp/foxystreams | 1 | 6631075 | <filename>plugin.video.foxystreams/resources/lib/ui.py
from .router import router
import xbmc
import xbmcgui
import xbmcplugin
def build_listitems(names, videos=False):
listitems = []
for name in names:
li = xbmcgui.ListItem(name)
if videos:
li.setProperty('IsPlayable', 'true')
li.setInfo('video', {
'title': name,
'mediatype': 'video',
})
listitems.append(li)
return listitems
def get_user_input(title='Search'):
return xbmcgui.Dialog().input(title)
def directory_view(names_urls, contexts=False, videos=False, folders=False,
more=False, cache=True):
if names_urls:
if contexts:
names, urls, contexts = zip(*names_urls)
else:
names, urls = zip(*names_urls)
contexts = []
true_list = [folders] * len(names)
listitems = build_listitems(names, videos=videos)
for li, context in zip(listitems, contexts):
li.addContextMenuItems(context)
xbmcplugin.addDirectoryItems(handle=router.handle,
items=list(zip(urls, listitems, true_list)))
if videos:
xbmcplugin.setContent(router.handle, 'videos')
if more:
return
xbmcplugin.endOfDirectory(handle=router.handle, cacheToDisc=cache)
def dialog_select(names):
listitems = build_listitems(names)
return xbmcgui.Dialog().select('Select', listitems)
def notify(message):
xbmc.executebuiltin('Notification(FoxyStreams, {})'.format(message))
def add_torrent(user_debrid, magnet, fn_filter=None):
dialog = xbmcgui.DialogProgressBG()
dialog.create('Adding To Debrid')
status = user_debrid.grab_torrent(magnet, fn_filter=fn_filter)
dialog.close()
if status:
notify('Added Torrent to Debrid')
else:
notify('Failed to add to Debrid')
| <filename>plugin.video.foxystreams/resources/lib/ui.py
from .router import router
import xbmc
import xbmcgui
import xbmcplugin
def build_listitems(names, videos=False):
listitems = []
for name in names:
li = xbmcgui.ListItem(name)
if videos:
li.setProperty('IsPlayable', 'true')
li.setInfo('video', {
'title': name,
'mediatype': 'video',
})
listitems.append(li)
return listitems
def get_user_input(title='Search'):
return xbmcgui.Dialog().input(title)
def directory_view(names_urls, contexts=False, videos=False, folders=False,
more=False, cache=True):
if names_urls:
if contexts:
names, urls, contexts = zip(*names_urls)
else:
names, urls = zip(*names_urls)
contexts = []
true_list = [folders] * len(names)
listitems = build_listitems(names, videos=videos)
for li, context in zip(listitems, contexts):
li.addContextMenuItems(context)
xbmcplugin.addDirectoryItems(handle=router.handle,
items=list(zip(urls, listitems, true_list)))
if videos:
xbmcplugin.setContent(router.handle, 'videos')
if more:
return
xbmcplugin.endOfDirectory(handle=router.handle, cacheToDisc=cache)
def dialog_select(names):
listitems = build_listitems(names)
return xbmcgui.Dialog().select('Select', listitems)
def notify(message):
xbmc.executebuiltin('Notification(FoxyStreams, {})'.format(message))
def add_torrent(user_debrid, magnet, fn_filter=None):
dialog = xbmcgui.DialogProgressBG()
dialog.create('Adding To Debrid')
status = user_debrid.grab_torrent(magnet, fn_filter=fn_filter)
dialog.close()
if status:
notify('Added Torrent to Debrid')
else:
notify('Failed to add to Debrid')
| none | 1 | 2.097019 | 2 |
|
third_party/virtualbox/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Xml/CommonXml.py | Fimbure/icebox-1 | 521 | 6631076 | <gh_stars>100-1000
## @file
# This file is used to parse a PCD file of .PKG file
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
CommonXml
'''
##
# Import Modules
#
from Core.DistributionPackageClass import DistributionPackageHeaderObject
from Library.String import ConvertNEToNOTEQ
from Library.String import ConvertNOTEQToNE
from Library.String import GetSplitValueList
from Library.String import GetStringOfList
from Library.Xml.XmlRoutines import XmlElement
from Library.Xml.XmlRoutines import XmlElement2
from Library.Xml.XmlRoutines import XmlAttribute
from Library.Xml.XmlRoutines import XmlNode
from Library.Xml.XmlRoutines import XmlList
from Library.Xml.XmlRoutines import CreateXmlElement
from Library.UniClassObject import ConvertSpecialUnicodes
from Library.UniClassObject import GetLanguageCode1766
from Object.POM.CommonObject import FileObject
from Object.POM.CommonObject import MiscFileObject
from Object.POM.CommonObject import UserExtensionObject
from Object.POM.CommonObject import ClonedRecordObject
from Object.POM.CommonObject import LibraryClassObject
from Object.POM.CommonObject import FileNameObject
from Object.POM.ModuleObject import ModuleObject
from Xml.XmlParserMisc import IsRequiredItemListNull
from Xml.XmlParserMisc import GetHelpTextList
import Library.DataType as DataType
##
# ClonedFromXml
#
class ClonedFromXml(object):
def __init__(self):
self.GUID = ''
self.Version = ''
def FromXml(self, Item, Key):
self.GUID = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
if self.GUID == '' and self.Version == '':
return None
ClonedFrom = ClonedRecordObject()
ClonedFrom.SetPackageGuid(self.GUID)
ClonedFrom.SetPackageVersion(self.Version)
return ClonedFrom
def ToXml(self, ClonedFrom, Key):
if self.GUID:
pass
Element1 = CreateXmlElement('GUID', ClonedFrom.GetPackageGuid(), [],
[['Version', ClonedFrom.GetPackageVersion()]])
AttributeList = []
NodeList = [Element1]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "GUID = %s Version = %s" % (self.GUID, self.Version)
##
# CommonDefinesXml
#
class CommonDefinesXml(object):
def __init__(self):
self.Usage = ''
self.SupArchList = []
self.SupModList = []
self.FeatureFlag = ''
def FromXml(self, Item, Key):
if Key:
pass
self.Usage = XmlAttribute(Item, 'Usage')
self.SupArchList = \
[Arch for Arch in GetSplitValueList(XmlAttribute(Item, 'SupArchList'), DataType.TAB_SPACE_SPLIT) if Arch]
self.SupModList = \
[Mod for Mod in GetSplitValueList(XmlAttribute(Item, 'SupModList'), DataType.TAB_SPACE_SPLIT) if Mod]
self.FeatureFlag = ConvertNOTEQToNE(XmlAttribute(Item, 'FeatureFlag'))
def ToXml(self):
pass
def __str__(self):
return "Usage = %s SupArchList = %s SupModList = %s FeatureFlag = %s" \
% (self.Usage, self.SupArchList, self.SupModList, self.FeatureFlag)
##
# PromptXml
#
class PromptXml(object):
def __init__(self):
self.Prompt = ''
self.Lang = ''
def FromXml(self, Item, Key):
if Key:
pass
self.Prompt = XmlElement2(Item, 'Prompt')
self.Lang = XmlAttribute(Item, 'Lang')
def ToXml(self, Prompt, Key='Prompt'):
if self.Prompt:
pass
return CreateXmlElement('%s' % Key, Prompt.GetString(), [], [['Lang', Prompt.GetLang()]])
def __str__(self):
return "Prompt = %s Lang = %s" % (self.Prompt, self.Lang)
##
# HelpTextXml
#
class HelpTextXml(object):
def __init__(self):
self.HelpText = ''
self.Lang = ''
def FromXml(self, Item, Key):
if Key:
pass
self.HelpText = XmlElement2(Item, 'HelpText')
self.Lang = XmlAttribute(Item, 'Lang')
def ToXml(self, HelpText, Key='HelpText'):
if self.HelpText:
pass
return CreateXmlElement('%s' % Key, HelpText.GetString(), [], [['Lang', HelpText.GetLang()]])
def __str__(self):
return "HelpText = %s Lang = %s" % (self.HelpText, self.Lang)
##
# HeaderXml
#
class HeaderXml(object):
def __init__(self):
self.Name = ''
self.BaseName = ''
self.GUID = ''
self.Version = ''
self.CopyrightList = []
self.LicenseList = []
self.AbstractList = []
self.DescriptionList = []
def FromXml(self, Item, Key, IsRequiredCheck=False, IsStandAlongModule=False):
if not Item and IsRequiredCheck:
XmlTreeLevel = []
if IsStandAlongModule:
XmlTreeLevel = ['DistributionPackage', 'ModuleSurfaceArea']
else:
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'ModuleSurfaceArea']
CheckDict = {'Header':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
self.Name = XmlElement(Item, '%s/Name' % Key)
self.BaseName = XmlAttribute(XmlNode(Item, '%s/Name' % Key), 'BaseName')
self.GUID = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
for SubItem in XmlList(Item, '%s/Abstract' % Key):
HeaderAbstractLang = XmlAttribute(SubItem, 'Lang')
self.AbstractList.append((HeaderAbstractLang, XmlElement(SubItem, '%s/Abstract' % Key)))
for SubItem in XmlList(Item, '%s/Description' % Key):
HeaderDescriptionLang = XmlAttribute(SubItem, 'Lang')
self.DescriptionList.append((HeaderDescriptionLang, XmlElement(SubItem, '%s/Description' % Key)))
for SubItem in XmlList(Item, '%s/Copyright' % Key):
HeaderCopyrightLang = XmlAttribute(SubItem, 'Lang')
self.CopyrightList.append((HeaderCopyrightLang, XmlElement(SubItem, '%s/Copyright' % Key)))
for SubItem in XmlList(Item, '%s/License' % Key):
HeaderLicenseLang = XmlAttribute(SubItem, 'Lang')
self.LicenseList.append((HeaderLicenseLang, XmlElement(SubItem, '%s/License' % Key)))
ModuleHeader = ModuleObject()
ModuleHeader.SetName(self.Name)
ModuleHeader.SetBaseName(self.BaseName)
ModuleHeader.SetGuid(self.GUID)
ModuleHeader.SetVersion(self.Version)
ModuleHeader.SetCopyright(self.CopyrightList)
ModuleHeader.SetLicense(self.LicenseList)
ModuleHeader.SetAbstract(self.AbstractList)
ModuleHeader.SetDescription(self.DescriptionList)
return ModuleHeader
def ToXml(self, Header, Key):
if self.GUID:
pass
Element1 = CreateXmlElement('Name', Header.GetName(), [], [['BaseName', Header.GetBaseName()]])
Element2 = CreateXmlElement('GUID', Header.GetGuid(), [], [['Version', Header.GetVersion()]])
NodeList = [Element1,
Element2,
]
UNIInfAbstractList = []
UNIInfDescriptionList = []
# Get Abstract and Description from Uni File
# if the Uni File exists
if Header.UniFileClassObject is not None:
UniStrDict = Header.UniFileClassObject.OrderedStringList
for Lang in UniStrDict:
for StringDefClassObject in UniStrDict[Lang]:
if not StringDefClassObject.StringValue:
continue
if StringDefClassObject.StringName == DataType.TAB_INF_ABSTRACT:
UNIInfAbstractList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
if StringDefClassObject.StringName == DataType.TAB_INF_DESCRIPTION:
UNIInfDescriptionList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
# Get Abstract and Description from INF File Header
for (Lang, Value) in Header.GetCopyright():
if Value:
NodeList.append(CreateXmlElement('Copyright', Value, [], []))
for (Lang, Value) in Header.GetLicense():
if Value:
NodeList.append(CreateXmlElement('License', Value, [], []))
for (Lang, Value) in Header.GetAbstract() + UNIInfAbstractList:
if Value:
NodeList.append(CreateXmlElement('Abstract', Value, [], [['Lang', Lang]]))
for (Lang, Value) in Header.GetDescription() + UNIInfDescriptionList:
if Value:
NodeList.append(CreateXmlElement('Description', Value, [], [['Lang', Lang]]))
AttributeList = []
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "Name = %s BaseName = %s GUID = %s Version = %s Copyright = %s \
License = %s Abstract = %s Description = %s" % \
(self.Name, self.BaseName, self.GUID, self.Version, self.CopyrightList, \
self.LicenseList, self.AbstractList, self.DescriptionList)
##
# DistributionPackageHeaderXml
#
class DistributionPackageHeaderXml(object):
def __init__(self):
self.Header = HeaderXml()
self.ReadOnly = ''
self.RePackage = ''
self.Vendor = ''
self.Date = ''
self.Signature = ''
self.XmlSpecification = ''
def FromXml(self, Item, Key):
if not Item:
return None
self.ReadOnly = XmlAttribute(XmlNode(Item, '%s' % Key), 'ReadOnly')
self.RePackage = XmlAttribute(XmlNode(Item, '%s' % Key), 'RePackage')
self.Vendor = XmlElement(Item, '%s/Vendor' % Key)
self.Date = XmlElement(Item, '%s/Date' % Key)
self.Signature = XmlElement(Item, '%s/Signature' % Key)
self.XmlSpecification = XmlElement(Item, '%s/XmlSpecification' % Key)
self.Header.FromXml(Item, Key)
DistributionPackageHeader = DistributionPackageHeaderObject()
if self.ReadOnly.upper() == 'TRUE':
DistributionPackageHeader.ReadOnly = True
elif self.ReadOnly.upper() == 'FALSE':
DistributionPackageHeader.ReadOnly = False
if self.RePackage.upper() == 'TRUE':
DistributionPackageHeader.RePackage = True
elif self.RePackage.upper() == 'FALSE':
DistributionPackageHeader.RePackage = False
DistributionPackageHeader.Vendor = self.Vendor
DistributionPackageHeader.Date = self.Date
DistributionPackageHeader.Signature = self.Signature
DistributionPackageHeader.XmlSpecification = self.XmlSpecification
DistributionPackageHeader.SetName(self.Header.Name)
DistributionPackageHeader.SetBaseName(self.Header.BaseName)
DistributionPackageHeader.SetGuid(self.Header.GUID)
DistributionPackageHeader.SetVersion(self.Header.Version)
DistributionPackageHeader.SetCopyright(self.Header.CopyrightList)
DistributionPackageHeader.SetLicense(self.Header.LicenseList)
DistributionPackageHeader.SetAbstract(self.Header.AbstractList)
DistributionPackageHeader.SetDescription(self.Header.DescriptionList)
return DistributionPackageHeader
def ToXml(self, DistributionPackageHeader, Key):
if self.Header:
pass
Element1 = CreateXmlElement('Name', \
DistributionPackageHeader.GetName(), [], \
[['BaseName', \
DistributionPackageHeader.GetBaseName()]])
Element2 = CreateXmlElement('GUID', \
DistributionPackageHeader.GetGuid(), [], \
[['Version', \
DistributionPackageHeader.GetVersion()]])
AttributeList = []
if DistributionPackageHeader.ReadOnly != '':
AttributeList.append(['ReadOnly', str(DistributionPackageHeader.ReadOnly).lower()])
if DistributionPackageHeader.RePackage != '':
AttributeList.append(['RePackage', str(DistributionPackageHeader.RePackage).lower()])
if DistributionPackageHeader.GetAbstract():
DPAbstract = DistributionPackageHeader.GetAbstract()[0][1]
else:
DPAbstract = ''
if DistributionPackageHeader.GetDescription():
DPDescription = DistributionPackageHeader.GetDescription()[0][1]
else:
DPDescription = ''
if DistributionPackageHeader.GetCopyright():
DPCopyright = DistributionPackageHeader.GetCopyright()[0][1]
else:
DPCopyright = ''
if DistributionPackageHeader.GetLicense():
DPLicense = DistributionPackageHeader.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [Element1,
Element2,
['Vendor', DistributionPackageHeader.Vendor],
['Date', DistributionPackageHeader.Date],
['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
['Signature', DistributionPackageHeader.Signature],
['XmlSpecification', \
DistributionPackageHeader.XmlSpecification],
]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "ReadOnly = %s RePackage = %s Vendor = %s Date = %s \
Signature = %s XmlSpecification = %s %s" % \
(self.ReadOnly, self.RePackage, self.Vendor, self.Date, \
self.Signature, self.XmlSpecification, self.Header)
##
# PackageHeaderXml
#
class PackageHeaderXml(object):
def __init__(self):
self.Header = HeaderXml()
self.PackagePath = ''
def FromXml(self, Item, Key, PackageObject2):
if not Item:
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea']
CheckDict = {'PackageHeader':None, }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
self.PackagePath = XmlElement(Item, '%s/PackagePath' % Key)
self.Header.FromXml(Item, Key)
PackageObject2.SetName(self.Header.Name)
PackageObject2.SetBaseName(self.Header.BaseName)
PackageObject2.SetGuid(self.Header.GUID)
PackageObject2.SetVersion(self.Header.Version)
PackageObject2.SetCopyright(self.Header.CopyrightList)
PackageObject2.SetLicense(self.Header.LicenseList)
PackageObject2.SetAbstract(self.Header.AbstractList)
PackageObject2.SetDescription(self.Header.DescriptionList)
PackageObject2.SetPackagePath(self.PackagePath)
def ToXml(self, PackageObject2, Key):
if self.PackagePath:
pass
Element1 = CreateXmlElement('Name', PackageObject2.GetName(), [], \
[['BaseName', PackageObject2.GetBaseName()]])
Element2 = CreateXmlElement('GUID', PackageObject2.GetGuid(), [], \
[['Version', PackageObject2.GetVersion()]])
NodeList = [Element1,
Element2
]
UNIPackageAbrstractList = []
UNIPackageDescriptionList = []
# Get Abstract and Description from Uni File
# if the Uni File exists
if PackageObject2.UniFileClassObject is not None:
UniStrDict = PackageObject2.UniFileClassObject.OrderedStringList
for Lang in UniStrDict:
for StringDefClassObject in UniStrDict[Lang]:
if not StringDefClassObject.StringValue:
continue
if StringDefClassObject.StringName == DataType.TAB_DEC_PACKAGE_ABSTRACT:
UNIPackageAbrstractList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
if StringDefClassObject.StringName == DataType.TAB_DEC_PACKAGE_DESCRIPTION:
UNIPackageDescriptionList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
# Get Abstract and Description from DEC File Header
for (Lang, Value) in PackageObject2.GetCopyright():
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_COPYRIGHT, Value, [], []))
for (Lang, Value) in PackageObject2.GetLicense():
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_LICENSE, Value, [], []))
for (Lang, Value) in PackageObject2.GetAbstract() + UNIPackageAbrstractList:
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_ABSTRACT, Value, [], [['Lang', Lang]]))
for (Lang, Value) in PackageObject2.GetDescription() + UNIPackageDescriptionList:
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_DESCRIPTION, Value, [], [['Lang', Lang]]))
NodeList.append(['PackagePath', PackageObject2.GetPackagePath()])
AttributeList = []
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "PackagePath = %s %s" \
% (self.PackagePath, self.Header)
##
# MiscellaneousFileXml
#
class MiscellaneousFileXml(object):
def __init__(self):
self.Header = HeaderXml()
self.Files = []
##
# This API is used for Package or Module's MiscellaneousFile section
#
def FromXml(self, Item, Key):
if not Item:
return None
self.Header.FromXml(Item, Key)
NewItem = XmlNode(Item, '%s/Header' % Key)
self.Header.FromXml(NewItem, 'Header')
for SubItem in XmlList(Item, '%s/Filename' % Key):
Filename = XmlElement(SubItem, '%s/Filename' % Key)
Executable = XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'Executable')
if Executable.upper() == "TRUE":
Executable = True
elif Executable.upper() == "FALSE":
Executable = False
else:
Executable = ''
self.Files.append([Filename, Executable])
MiscFile = MiscFileObject()
MiscFile.SetCopyright(self.Header.CopyrightList)
MiscFile.SetLicense(self.Header.LicenseList)
MiscFile.SetAbstract(self.Header.AbstractList)
MiscFile.SetDescription(self.Header.DescriptionList)
MiscFileList = []
for File in self.Files:
FileObj = FileObject()
FileObj.SetURI(File[0])
FileObj.SetExecutable(File[1])
MiscFileList.append(FileObj)
MiscFile.SetFileList(MiscFileList)
return MiscFile
##
# This API is used for DistP's tool section
#
def FromXml2(self, Item, Key):
if Item is None:
return None
NewItem = XmlNode(Item, '%s/Header' % Key)
self.Header.FromXml(NewItem, 'Header')
for SubItem in XmlList(Item, '%s/Filename' % Key):
Filename = XmlElement(SubItem, '%s/Filename' % Key)
Executable = \
XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'Executable')
OsType = XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'OS')
if Executable.upper() == "TRUE":
Executable = True
elif Executable.upper() == "FALSE":
Executable = False
else:
Executable = ''
self.Files.append([Filename, Executable, OsType])
MiscFile = MiscFileObject()
MiscFile.SetName(self.Header.Name)
MiscFile.SetCopyright(self.Header.CopyrightList)
MiscFile.SetLicense(self.Header.LicenseList)
MiscFile.SetAbstract(self.Header.AbstractList)
MiscFile.SetDescription(self.Header.DescriptionList)
MiscFileList = []
for File in self.Files:
FileObj = FileObject()
FileObj.SetURI(File[0])
FileObj.SetExecutable(File[1])
FileObj.SetOS(File[2])
MiscFileList.append(FileObj)
MiscFile.SetFileList(MiscFileList)
return MiscFile
##
# This API is used for Package or Module's MiscellaneousFile section
#
def ToXml(self, MiscFile, Key):
if self.Header:
pass
if MiscFile:
if MiscFile.GetAbstract():
DPAbstract = MiscFile.GetAbstract()[0][1]
else:
DPAbstract = ''
if MiscFile.GetDescription():
DPDescription = MiscFile.GetDescription()[0][1]
else:
DPDescription = ''
if MiscFile.GetCopyright():
DPCopyright = MiscFile.GetCopyright()[0][1]
else:
DPCopyright = ''
if MiscFile.GetLicense():
DPLicense = MiscFile.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
]
for File in MiscFile.GetFileList():
NodeList.append\
(CreateXmlElement\
('Filename', File.GetURI(), [], \
[['Executable', str(File.GetExecutable()).lower()]]))
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
##
# This API is used for DistP's tool section
#
def ToXml2(self, MiscFile, Key):
if self.Header:
pass
if MiscFile:
if MiscFile.GetAbstract():
DPAbstract = MiscFile.GetAbstract()[0][1]
else:
DPAbstract = ''
if MiscFile.GetDescription():
DPDescription = MiscFile.GetDescription()[0][1]
else:
DPDescription = ''
if MiscFile.GetCopyright():
DPCopyright = MiscFile.GetCopyright()[0][1]
else:
DPCopyright = ''
if MiscFile.GetLicense():
DPLicense = MiscFile.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [['Name', MiscFile.GetName()],
['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
]
HeaderNode = CreateXmlElement('Header', '', NodeList, [])
NodeList = [HeaderNode]
for File in MiscFile.GetFileList():
NodeList.append\
(CreateXmlElement\
('Filename', File.GetURI(), [], \
[['Executable', str(File.GetExecutable()).lower()], \
['OS', File.GetOS()]]))
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = str(self.Header)
for Item in self.Files:
Str = Str + '\n\tFilename:' + str(Item)
return Str
##
# UserExtensionsXml
#
class UserExtensionsXml(object):
def __init__(self):
self.UserId = ''
self.Identifier = ''
self.BinaryAbstractList = []
self.BinaryDescriptionList = []
self.BinaryCopyrightList = []
self.BinaryLicenseList = []
self.LangDefsList = []
self.DefineDict = {}
self.BuildOptionDict = {}
self.IncludesDict = {}
self.SourcesDict = {}
self.BinariesDict = {}
self.SupArchList = []
self.Statement = ''
self.Defines = ''
self.BuildOptions = ''
def FromXml2(self, Item, Key):
self.UserId = XmlAttribute(XmlNode(Item, '%s' % Key), 'UserId')
self.Identifier = XmlAttribute(XmlNode(Item, '%s' % Key), 'Identifier')
UserExtension = UserExtensionObject()
UserExtension.SetUserID(self.UserId)
UserExtension.SetIdentifier(self.Identifier)
return UserExtension
def FromXml(self, Item, Key):
self.UserId = XmlAttribute(XmlNode(Item, '%s' % Key), 'UserId')
self.Identifier = XmlAttribute(XmlNode(Item, '%s' % Key), 'Identifier')
if self.UserId == DataType.TAB_BINARY_HEADER_USERID \
and self.Identifier == DataType.TAB_BINARY_HEADER_IDENTIFIER:
for SubItem in XmlList(Item, '%s/BinaryAbstract' % Key):
BinaryAbstractLang = XmlAttribute(SubItem, 'Lang')
self.BinaryAbstractList.append((BinaryAbstractLang, XmlElement(SubItem, '%s/BinaryAbstract' % Key)))
for SubItem in XmlList(Item, '%s/BinaryDescription' % Key):
BinaryDescriptionLang = XmlAttribute(SubItem, 'Lang')
self.BinaryDescriptionList.append((BinaryDescriptionLang,
XmlElement(SubItem, '%s/BinaryDescription' % Key)))
for SubItem in XmlList(Item, '%s/BinaryCopyright' % Key):
BinaryCopyrightLang = XmlAttribute(SubItem, 'Lang')
self.BinaryCopyrightList.append((BinaryCopyrightLang,
XmlElement(SubItem, '%s/BinaryCopyright' % Key)))
for SubItem in XmlList(Item, '%s/BinaryLicense' % Key):
BinaryLicenseLang = XmlAttribute(SubItem, 'Lang')
self.BinaryLicenseList.append((BinaryLicenseLang,
XmlElement(SubItem, '%s/BinaryLicense' % Key)))
DefineItem = XmlNode(Item, '%s/Define' % Key)
for SubItem in XmlList(DefineItem, 'Define/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
self.DefineDict[Statement] = ""
BuildOptionItem = XmlNode(Item, '%s/BuildOption' % Key)
for SubItem in XmlList(BuildOptionItem, 'BuildOption/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
Arch = XmlAttribute(XmlNode(SubItem, '%s/Statement' % Key), 'SupArchList')
self.BuildOptionDict[Arch] = Statement
IncludesItem = XmlNode(Item, '%s/Includes' % Key)
for SubItem in XmlList(IncludesItem, 'Includes/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
Arch = XmlAttribute(XmlNode(SubItem, '%s/Statement' % Key), 'SupArchList')
self.IncludesDict[Statement] = Arch
SourcesItem = XmlNode(Item, '%s/Sources' % Key)
Tmp = UserExtensionSourceXml()
SourceDict = Tmp.FromXml(SourcesItem, 'Sources')
self.SourcesDict = SourceDict
BinariesItem = XmlNode(Item, '%s/Binaries' % Key)
Tmp = UserExtensionBinaryXml()
BinariesDict = Tmp.FromXml(BinariesItem, 'Binaries')
self.BinariesDict = BinariesDict
self.Statement = XmlElement(Item, 'UserExtensions')
SupArch = XmlAttribute(XmlNode(Item, '%s' % Key), 'SupArchList')
self.SupArchList = [Arch for Arch in GetSplitValueList(SupArch, DataType.TAB_SPACE_SPLIT) if Arch]
UserExtension = UserExtensionObject()
UserExtension.SetUserID(self.UserId)
UserExtension.SetIdentifier(self.Identifier)
UserExtension.SetBinaryAbstract(self.BinaryAbstractList)
UserExtension.SetBinaryDescription(self.BinaryDescriptionList)
UserExtension.SetBinaryCopyright(self.BinaryCopyrightList)
UserExtension.SetBinaryLicense(self.BinaryLicenseList)
UserExtension.SetStatement(self.Statement)
UserExtension.SetSupArchList(self.SupArchList)
UserExtension.SetDefinesDict(self.DefineDict)
UserExtension.SetBuildOptionDict(self.BuildOptionDict)
UserExtension.SetIncludesDict(self.IncludesDict)
UserExtension.SetSourcesDict(self.SourcesDict)
UserExtension.SetBinariesDict(self.BinariesDict)
return UserExtension
def ToXml(self, UserExtension, Key):
if self.UserId:
pass
AttributeList = [['UserId', str(UserExtension.GetUserID())],
['Identifier', str(UserExtension.GetIdentifier())],
['SupArchList', \
GetStringOfList(UserExtension.GetSupArchList())],
]
Root = CreateXmlElement('%s' % Key, UserExtension.GetStatement(), [], \
AttributeList)
if UserExtension.GetIdentifier() == DataType.TAB_BINARY_HEADER_IDENTIFIER and \
UserExtension.GetUserID() == DataType.TAB_BINARY_HEADER_USERID:
for (Lang, Value) in UserExtension.GetBinaryAbstract():
if Value:
ChildElement = CreateXmlElement('BinaryAbstract', Value, [], [['Lang', Lang]])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryDescription():
if Value:
ChildElement = CreateXmlElement('BinaryDescription', Value, [], [['Lang', Lang]])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryCopyright():
if Value:
ChildElement = CreateXmlElement('BinaryCopyright', Value, [], [])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryLicense():
if Value:
ChildElement = CreateXmlElement('BinaryLicense', Value, [], [])
Root.appendChild(ChildElement)
NodeList = []
DefineDict = UserExtension.GetDefinesDict()
if DefineDict:
for Item in DefineDict.keys():
NodeList.append(CreateXmlElement\
('Statement', Item, [], []))
DefineElement = CreateXmlElement('Define', '', NodeList, [])
Root.appendChild(DefineElement)
NodeList = []
BuildOptionDict = UserExtension.GetBuildOptionDict()
if BuildOptionDict:
for Item in BuildOptionDict.keys():
NodeList.append(CreateXmlElement\
('Statement', BuildOptionDict[Item], [], \
[['SupArchList', Item]]))
BuildOptionElement = \
CreateXmlElement('BuildOption', '', NodeList, [])
Root.appendChild(BuildOptionElement)
NodeList = []
IncludesDict = UserExtension.GetIncludesDict()
if IncludesDict:
for Item in IncludesDict.keys():
NodeList.append(CreateXmlElement\
('Statement', Item, [], \
[['SupArchList', IncludesDict[Item]]]))
IncludesElement = CreateXmlElement('Includes', '', NodeList, [])
Root.appendChild(IncludesElement)
NodeList = []
SourcesDict = UserExtension.GetSourcesDict()
if SourcesDict:
Tmp = UserExtensionSourceXml()
Root.appendChild(Tmp.ToXml(SourcesDict, 'Sources'))
NodeList = []
BinariesDict = UserExtension.GetBinariesDict()
if BinariesDict:
Tmp = UserExtensionBinaryXml()
Root.appendChild(Tmp.ToXml(BinariesDict, 'Binaries'))
return Root
def __str__(self):
Str = "UserId = %s Identifier = %s" % (self.UserId, self.Identifier)
Str = Str + '\n\tDefines:' + str(self.Defines)
Str = Str + '\n\tBuildOptions:' + str(self.BuildOptions)
return Str
##
# UserExtensionSourceXml
#
class UserExtensionSourceXml(object):
def __init__(self):
self.UserExtensionSource = ''
def FromXml(self, Item, Key):
if Key:
pass
if self.UserExtensionSource:
pass
Dict = {}
#SourcesItem = XmlNode(Item, '%s/Sources' % Key)
for SubItem in XmlList(Item, 'Sources/SourceFile'):
FileName = XmlElement(SubItem, 'SourceFile/FileName')
Family = XmlElement(SubItem, 'SourceFile/Family')
FeatureFlag = XmlElement(SubItem, 'SourceFile/FeatureFlag')
SupArchStr = XmlElement(SubItem, 'SourceFile/SupArchList')
DictKey = (FileName, Family, FeatureFlag, SupArchStr)
ValueList = []
for ValueNodeItem in XmlList(SubItem, \
'SourceFile/SourceFileOtherAttr'):
TagName = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/TagName')
ToolCode = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/ToolCode')
Comment = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/Comment')
if (TagName == ' ') and (ToolCode == ' ') and (Comment == ' '):
TagName = ''
ToolCode = ''
Comment = ''
ValueList.append((TagName, ToolCode, Comment))
Dict[DictKey] = ValueList
return Dict
def ToXml(self, Dict, Key):
if self.UserExtensionSource:
pass
SourcesNodeList = []
for Item in Dict:
ValueList = Dict[Item]
(FileName, Family, FeatureFlag, SupArchStr) = Item
SourceFileNodeList = []
SourceFileNodeList.append(["FileName", FileName])
SourceFileNodeList.append(["Family", Family])
SourceFileNodeList.append(["FeatureFlag", FeatureFlag])
SourceFileNodeList.append(["SupArchList", SupArchStr])
for (TagName, ToolCode, Comment) in ValueList:
ValueNodeList = []
if not (TagName or ToolCode or Comment):
TagName = ' '
ToolCode = ' '
Comment = ' '
ValueNodeList.append(["TagName", TagName])
ValueNodeList.append(["ToolCode", ToolCode])
ValueNodeList.append(["Comment", Comment])
ValueNodeXml = CreateXmlElement('SourceFileOtherAttr', '', \
ValueNodeList, [])
SourceFileNodeList.append(ValueNodeXml)
SourceFileNodeXml = CreateXmlElement('SourceFile', '', \
SourceFileNodeList, [])
SourcesNodeList.append(SourceFileNodeXml)
Root = CreateXmlElement('%s' % Key, '', SourcesNodeList, [])
return Root
##
# UserExtensionBinaryXml
#
class UserExtensionBinaryXml(object):
def __init__(self):
self.UserExtensionBinary = ''
def FromXml(self, Item, Key):
if Key:
pass
if self.UserExtensionBinary:
pass
Dict = {}
for SubItem in XmlList(Item, 'Binaries/Binary'):
FileName = XmlElement(SubItem, 'Binary/FileName')
FileType = XmlElement(SubItem, 'Binary/FileType')
FFE = XmlElement(SubItem, 'Binary/FeatureFlag')
SupArch = XmlElement(SubItem, 'Binary/SupArchList')
DictKey = (FileName, FileType, ConvertNOTEQToNE(FFE), SupArch)
ValueList = []
for ValueNodeItem in XmlList(SubItem, \
'Binary/BinaryFileOtherAttr'):
Target = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Target')
Family = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Family')
TagName = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/TagName')
Comment = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Comment')
if (Target == ' ') and (Family == ' ') and \
(TagName == ' ') and (Comment == ' '):
Target = ''
Family = ''
TagName = ''
Comment = ''
ValueList.append((Target, Family, TagName, Comment))
Dict[DictKey] = ValueList
return Dict
def ToXml(self, Dict, Key):
if self.UserExtensionBinary:
pass
BinariesNodeList = []
for Item in Dict:
ValueList = Dict[Item]
(FileName, FileType, FeatureFlag, SupArch) = Item
FileNodeList = []
FileNodeList.append(["FileName", FileName])
FileNodeList.append(["FileType", FileType])
FileNodeList.append(["FeatureFlag", ConvertNEToNOTEQ(FeatureFlag)])
FileNodeList.append(["SupArchList", SupArch])
for (Target, Family, TagName, Comment) in ValueList:
ValueNodeList = []
if not (Target or Family or TagName or Comment):
Target = ' '
Family = ' '
TagName = ' '
Comment = ' '
ValueNodeList.append(["Target", Target])
ValueNodeList.append(["Family", Family])
ValueNodeList.append(["TagName", TagName])
ValueNodeList.append(["Comment", Comment])
ValueNodeXml = CreateXmlElement('BinaryFileOtherAttr', '', \
ValueNodeList, [])
FileNodeList.append(ValueNodeXml)
FileNodeXml = CreateXmlElement('Binary', '', FileNodeList, [])
BinariesNodeList.append(FileNodeXml)
Root = CreateXmlElement('%s' % Key, '', BinariesNodeList, [])
return Root
##
# LibraryClassXml
#
class LibraryClassXml(object):
def __init__(self):
self.Keyword = ''
self.HeaderFile = ''
self.RecommendedInstanceGuid = ''
self.RecommendedInstanceVersion = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.Keyword = XmlAttribute(XmlNode(Item, '%s' % Key), 'Keyword')
if self.Keyword == '':
self.Keyword = XmlElement(Item, '%s/Keyword' % Key)
self.HeaderFile = XmlElement(Item, '%s/HeaderFile' % Key)
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
LibraryClass = LibraryClassObject()
LibraryClass.SetLibraryClass(self.Keyword)
LibraryClass.SetIncludeHeader(self.HeaderFile)
if self.CommonDefines.Usage:
LibraryClass.SetUsage(self.CommonDefines.Usage)
LibraryClass.SetSupArchList(self.CommonDefines.SupArchList)
LibraryClass.SetSupModuleList(self.CommonDefines.SupModList)
LibraryClass.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
LibraryClass.SetHelpTextList(GetHelpTextList(self.HelpText))
return LibraryClass
def ToXml(self, LibraryClass, Key):
if self.HeaderFile:
pass
AttributeList = \
[['Keyword', LibraryClass.GetLibraryClass()],
['SupArchList', GetStringOfList(LibraryClass.GetSupArchList())],
['SupModList', GetStringOfList(LibraryClass.GetSupModuleList())]
]
NodeList = [['HeaderFile', LibraryClass.GetIncludeHeader()]]
for Item in LibraryClass.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def ToXml2(self, LibraryClass, Key):
if self.HeaderFile:
pass
FeatureFlag = ConvertNEToNOTEQ(LibraryClass.GetFeatureFlag())
AttributeList = \
[['Usage', LibraryClass.GetUsage()], \
['SupArchList', GetStringOfList(LibraryClass.GetSupArchList())], \
['SupModList', GetStringOfList(LibraryClass.GetSupModuleList())], \
['FeatureFlag', FeatureFlag]
]
NodeList = [['Keyword', LibraryClass.GetLibraryClass()], ]
for Item in LibraryClass.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Keyword = %s HeaderFile = %s RecommendedInstanceGuid = %s RecommendedInstanceVersion = %s %s" % \
(self.Keyword, self.HeaderFile, self.RecommendedInstanceGuid, self.RecommendedInstanceVersion, \
self.CommonDefines)
for Item in self.HelpText:
Str = Str + "\n\t" + str(Item)
return Str
##
# FilenameXml
#
class FilenameXml(object):
def __init__(self):
self.FileType = ''
self.Filename = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.FileType = XmlAttribute(Item, 'FileType')
Guid = XmlAttribute(Item, 'GUID')
self.Filename = XmlElement(Item, 'Filename')
self.CommonDefines.FromXml(Item, Key)
FeatureFlag = ConvertNOTEQToNE(self.CommonDefines.FeatureFlag)
Filename = FileNameObject()
#
# Convert File Type
#
if self.FileType == 'UEFI_IMAGE':
self.FileType = 'PE32'
Filename.SetGuidValue(Guid)
Filename.SetFileType(self.FileType)
Filename.SetFilename(self.Filename)
Filename.SetSupArchList(self.CommonDefines.SupArchList)
Filename.SetFeatureFlag(FeatureFlag)
return Filename
def ToXml(self, Filename, Key):
if self.Filename:
pass
AttributeList = [['SupArchList', \
GetStringOfList(Filename.GetSupArchList())],
['FileType', Filename.GetFileType()],
['FeatureFlag', ConvertNEToNOTEQ(Filename.GetFeatureFlag())],
['GUID', Filename.GetGuidValue()]
]
Root = CreateXmlElement('%s' % Key, Filename.GetFilename(), [], AttributeList)
return Root
def __str__(self):
return "FileType = %s Filename = %s %s" \
% (self.FileType, self.Filename, self.CommonDefines)
| ## @file
# This file is used to parse a PCD file of .PKG file
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
CommonXml
'''
##
# Import Modules
#
from Core.DistributionPackageClass import DistributionPackageHeaderObject
from Library.String import ConvertNEToNOTEQ
from Library.String import ConvertNOTEQToNE
from Library.String import GetSplitValueList
from Library.String import GetStringOfList
from Library.Xml.XmlRoutines import XmlElement
from Library.Xml.XmlRoutines import XmlElement2
from Library.Xml.XmlRoutines import XmlAttribute
from Library.Xml.XmlRoutines import XmlNode
from Library.Xml.XmlRoutines import XmlList
from Library.Xml.XmlRoutines import CreateXmlElement
from Library.UniClassObject import ConvertSpecialUnicodes
from Library.UniClassObject import GetLanguageCode1766
from Object.POM.CommonObject import FileObject
from Object.POM.CommonObject import MiscFileObject
from Object.POM.CommonObject import UserExtensionObject
from Object.POM.CommonObject import ClonedRecordObject
from Object.POM.CommonObject import LibraryClassObject
from Object.POM.CommonObject import FileNameObject
from Object.POM.ModuleObject import ModuleObject
from Xml.XmlParserMisc import IsRequiredItemListNull
from Xml.XmlParserMisc import GetHelpTextList
import Library.DataType as DataType
##
# ClonedFromXml
#
class ClonedFromXml(object):
def __init__(self):
self.GUID = ''
self.Version = ''
def FromXml(self, Item, Key):
self.GUID = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
if self.GUID == '' and self.Version == '':
return None
ClonedFrom = ClonedRecordObject()
ClonedFrom.SetPackageGuid(self.GUID)
ClonedFrom.SetPackageVersion(self.Version)
return ClonedFrom
def ToXml(self, ClonedFrom, Key):
if self.GUID:
pass
Element1 = CreateXmlElement('GUID', ClonedFrom.GetPackageGuid(), [],
[['Version', ClonedFrom.GetPackageVersion()]])
AttributeList = []
NodeList = [Element1]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "GUID = %s Version = %s" % (self.GUID, self.Version)
##
# CommonDefinesXml
#
class CommonDefinesXml(object):
def __init__(self):
self.Usage = ''
self.SupArchList = []
self.SupModList = []
self.FeatureFlag = ''
def FromXml(self, Item, Key):
if Key:
pass
self.Usage = XmlAttribute(Item, 'Usage')
self.SupArchList = \
[Arch for Arch in GetSplitValueList(XmlAttribute(Item, 'SupArchList'), DataType.TAB_SPACE_SPLIT) if Arch]
self.SupModList = \
[Mod for Mod in GetSplitValueList(XmlAttribute(Item, 'SupModList'), DataType.TAB_SPACE_SPLIT) if Mod]
self.FeatureFlag = ConvertNOTEQToNE(XmlAttribute(Item, 'FeatureFlag'))
def ToXml(self):
pass
def __str__(self):
return "Usage = %s SupArchList = %s SupModList = %s FeatureFlag = %s" \
% (self.Usage, self.SupArchList, self.SupModList, self.FeatureFlag)
##
# PromptXml
#
class PromptXml(object):
def __init__(self):
self.Prompt = ''
self.Lang = ''
def FromXml(self, Item, Key):
if Key:
pass
self.Prompt = XmlElement2(Item, 'Prompt')
self.Lang = XmlAttribute(Item, 'Lang')
def ToXml(self, Prompt, Key='Prompt'):
if self.Prompt:
pass
return CreateXmlElement('%s' % Key, Prompt.GetString(), [], [['Lang', Prompt.GetLang()]])
def __str__(self):
return "Prompt = %s Lang = %s" % (self.Prompt, self.Lang)
##
# HelpTextXml
#
class HelpTextXml(object):
def __init__(self):
self.HelpText = ''
self.Lang = ''
def FromXml(self, Item, Key):
if Key:
pass
self.HelpText = XmlElement2(Item, 'HelpText')
self.Lang = XmlAttribute(Item, 'Lang')
def ToXml(self, HelpText, Key='HelpText'):
if self.HelpText:
pass
return CreateXmlElement('%s' % Key, HelpText.GetString(), [], [['Lang', HelpText.GetLang()]])
def __str__(self):
return "HelpText = %s Lang = %s" % (self.HelpText, self.Lang)
##
# HeaderXml
#
class HeaderXml(object):
def __init__(self):
self.Name = ''
self.BaseName = ''
self.GUID = ''
self.Version = ''
self.CopyrightList = []
self.LicenseList = []
self.AbstractList = []
self.DescriptionList = []
def FromXml(self, Item, Key, IsRequiredCheck=False, IsStandAlongModule=False):
if not Item and IsRequiredCheck:
XmlTreeLevel = []
if IsStandAlongModule:
XmlTreeLevel = ['DistributionPackage', 'ModuleSurfaceArea']
else:
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea', 'ModuleSurfaceArea']
CheckDict = {'Header':''}
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
self.Name = XmlElement(Item, '%s/Name' % Key)
self.BaseName = XmlAttribute(XmlNode(Item, '%s/Name' % Key), 'BaseName')
self.GUID = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
for SubItem in XmlList(Item, '%s/Abstract' % Key):
HeaderAbstractLang = XmlAttribute(SubItem, 'Lang')
self.AbstractList.append((HeaderAbstractLang, XmlElement(SubItem, '%s/Abstract' % Key)))
for SubItem in XmlList(Item, '%s/Description' % Key):
HeaderDescriptionLang = XmlAttribute(SubItem, 'Lang')
self.DescriptionList.append((HeaderDescriptionLang, XmlElement(SubItem, '%s/Description' % Key)))
for SubItem in XmlList(Item, '%s/Copyright' % Key):
HeaderCopyrightLang = XmlAttribute(SubItem, 'Lang')
self.CopyrightList.append((HeaderCopyrightLang, XmlElement(SubItem, '%s/Copyright' % Key)))
for SubItem in XmlList(Item, '%s/License' % Key):
HeaderLicenseLang = XmlAttribute(SubItem, 'Lang')
self.LicenseList.append((HeaderLicenseLang, XmlElement(SubItem, '%s/License' % Key)))
ModuleHeader = ModuleObject()
ModuleHeader.SetName(self.Name)
ModuleHeader.SetBaseName(self.BaseName)
ModuleHeader.SetGuid(self.GUID)
ModuleHeader.SetVersion(self.Version)
ModuleHeader.SetCopyright(self.CopyrightList)
ModuleHeader.SetLicense(self.LicenseList)
ModuleHeader.SetAbstract(self.AbstractList)
ModuleHeader.SetDescription(self.DescriptionList)
return ModuleHeader
def ToXml(self, Header, Key):
if self.GUID:
pass
Element1 = CreateXmlElement('Name', Header.GetName(), [], [['BaseName', Header.GetBaseName()]])
Element2 = CreateXmlElement('GUID', Header.GetGuid(), [], [['Version', Header.GetVersion()]])
NodeList = [Element1,
Element2,
]
UNIInfAbstractList = []
UNIInfDescriptionList = []
# Get Abstract and Description from Uni File
# if the Uni File exists
if Header.UniFileClassObject is not None:
UniStrDict = Header.UniFileClassObject.OrderedStringList
for Lang in UniStrDict:
for StringDefClassObject in UniStrDict[Lang]:
if not StringDefClassObject.StringValue:
continue
if StringDefClassObject.StringName == DataType.TAB_INF_ABSTRACT:
UNIInfAbstractList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
if StringDefClassObject.StringName == DataType.TAB_INF_DESCRIPTION:
UNIInfDescriptionList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
# Get Abstract and Description from INF File Header
for (Lang, Value) in Header.GetCopyright():
if Value:
NodeList.append(CreateXmlElement('Copyright', Value, [], []))
for (Lang, Value) in Header.GetLicense():
if Value:
NodeList.append(CreateXmlElement('License', Value, [], []))
for (Lang, Value) in Header.GetAbstract() + UNIInfAbstractList:
if Value:
NodeList.append(CreateXmlElement('Abstract', Value, [], [['Lang', Lang]]))
for (Lang, Value) in Header.GetDescription() + UNIInfDescriptionList:
if Value:
NodeList.append(CreateXmlElement('Description', Value, [], [['Lang', Lang]]))
AttributeList = []
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "Name = %s BaseName = %s GUID = %s Version = %s Copyright = %s \
License = %s Abstract = %s Description = %s" % \
(self.Name, self.BaseName, self.GUID, self.Version, self.CopyrightList, \
self.LicenseList, self.AbstractList, self.DescriptionList)
##
# DistributionPackageHeaderXml
#
class DistributionPackageHeaderXml(object):
def __init__(self):
self.Header = HeaderXml()
self.ReadOnly = ''
self.RePackage = ''
self.Vendor = ''
self.Date = ''
self.Signature = ''
self.XmlSpecification = ''
def FromXml(self, Item, Key):
if not Item:
return None
self.ReadOnly = XmlAttribute(XmlNode(Item, '%s' % Key), 'ReadOnly')
self.RePackage = XmlAttribute(XmlNode(Item, '%s' % Key), 'RePackage')
self.Vendor = XmlElement(Item, '%s/Vendor' % Key)
self.Date = XmlElement(Item, '%s/Date' % Key)
self.Signature = XmlElement(Item, '%s/Signature' % Key)
self.XmlSpecification = XmlElement(Item, '%s/XmlSpecification' % Key)
self.Header.FromXml(Item, Key)
DistributionPackageHeader = DistributionPackageHeaderObject()
if self.ReadOnly.upper() == 'TRUE':
DistributionPackageHeader.ReadOnly = True
elif self.ReadOnly.upper() == 'FALSE':
DistributionPackageHeader.ReadOnly = False
if self.RePackage.upper() == 'TRUE':
DistributionPackageHeader.RePackage = True
elif self.RePackage.upper() == 'FALSE':
DistributionPackageHeader.RePackage = False
DistributionPackageHeader.Vendor = self.Vendor
DistributionPackageHeader.Date = self.Date
DistributionPackageHeader.Signature = self.Signature
DistributionPackageHeader.XmlSpecification = self.XmlSpecification
DistributionPackageHeader.SetName(self.Header.Name)
DistributionPackageHeader.SetBaseName(self.Header.BaseName)
DistributionPackageHeader.SetGuid(self.Header.GUID)
DistributionPackageHeader.SetVersion(self.Header.Version)
DistributionPackageHeader.SetCopyright(self.Header.CopyrightList)
DistributionPackageHeader.SetLicense(self.Header.LicenseList)
DistributionPackageHeader.SetAbstract(self.Header.AbstractList)
DistributionPackageHeader.SetDescription(self.Header.DescriptionList)
return DistributionPackageHeader
def ToXml(self, DistributionPackageHeader, Key):
if self.Header:
pass
Element1 = CreateXmlElement('Name', \
DistributionPackageHeader.GetName(), [], \
[['BaseName', \
DistributionPackageHeader.GetBaseName()]])
Element2 = CreateXmlElement('GUID', \
DistributionPackageHeader.GetGuid(), [], \
[['Version', \
DistributionPackageHeader.GetVersion()]])
AttributeList = []
if DistributionPackageHeader.ReadOnly != '':
AttributeList.append(['ReadOnly', str(DistributionPackageHeader.ReadOnly).lower()])
if DistributionPackageHeader.RePackage != '':
AttributeList.append(['RePackage', str(DistributionPackageHeader.RePackage).lower()])
if DistributionPackageHeader.GetAbstract():
DPAbstract = DistributionPackageHeader.GetAbstract()[0][1]
else:
DPAbstract = ''
if DistributionPackageHeader.GetDescription():
DPDescription = DistributionPackageHeader.GetDescription()[0][1]
else:
DPDescription = ''
if DistributionPackageHeader.GetCopyright():
DPCopyright = DistributionPackageHeader.GetCopyright()[0][1]
else:
DPCopyright = ''
if DistributionPackageHeader.GetLicense():
DPLicense = DistributionPackageHeader.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [Element1,
Element2,
['Vendor', DistributionPackageHeader.Vendor],
['Date', DistributionPackageHeader.Date],
['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
['Signature', DistributionPackageHeader.Signature],
['XmlSpecification', \
DistributionPackageHeader.XmlSpecification],
]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "ReadOnly = %s RePackage = %s Vendor = %s Date = %s \
Signature = %s XmlSpecification = %s %s" % \
(self.ReadOnly, self.RePackage, self.Vendor, self.Date, \
self.Signature, self.XmlSpecification, self.Header)
##
# PackageHeaderXml
#
class PackageHeaderXml(object):
def __init__(self):
self.Header = HeaderXml()
self.PackagePath = ''
def FromXml(self, Item, Key, PackageObject2):
if not Item:
XmlTreeLevel = ['DistributionPackage', 'PackageSurfaceArea']
CheckDict = {'PackageHeader':None, }
IsRequiredItemListNull(CheckDict, XmlTreeLevel)
self.PackagePath = XmlElement(Item, '%s/PackagePath' % Key)
self.Header.FromXml(Item, Key)
PackageObject2.SetName(self.Header.Name)
PackageObject2.SetBaseName(self.Header.BaseName)
PackageObject2.SetGuid(self.Header.GUID)
PackageObject2.SetVersion(self.Header.Version)
PackageObject2.SetCopyright(self.Header.CopyrightList)
PackageObject2.SetLicense(self.Header.LicenseList)
PackageObject2.SetAbstract(self.Header.AbstractList)
PackageObject2.SetDescription(self.Header.DescriptionList)
PackageObject2.SetPackagePath(self.PackagePath)
def ToXml(self, PackageObject2, Key):
if self.PackagePath:
pass
Element1 = CreateXmlElement('Name', PackageObject2.GetName(), [], \
[['BaseName', PackageObject2.GetBaseName()]])
Element2 = CreateXmlElement('GUID', PackageObject2.GetGuid(), [], \
[['Version', PackageObject2.GetVersion()]])
NodeList = [Element1,
Element2
]
UNIPackageAbrstractList = []
UNIPackageDescriptionList = []
# Get Abstract and Description from Uni File
# if the Uni File exists
if PackageObject2.UniFileClassObject is not None:
UniStrDict = PackageObject2.UniFileClassObject.OrderedStringList
for Lang in UniStrDict:
for StringDefClassObject in UniStrDict[Lang]:
if not StringDefClassObject.StringValue:
continue
if StringDefClassObject.StringName == DataType.TAB_DEC_PACKAGE_ABSTRACT:
UNIPackageAbrstractList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
if StringDefClassObject.StringName == DataType.TAB_DEC_PACKAGE_DESCRIPTION:
UNIPackageDescriptionList.append((GetLanguageCode1766(Lang),
ConvertSpecialUnicodes(StringDefClassObject.StringValue)))
# Get Abstract and Description from DEC File Header
for (Lang, Value) in PackageObject2.GetCopyright():
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_COPYRIGHT, Value, [], []))
for (Lang, Value) in PackageObject2.GetLicense():
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_LICENSE, Value, [], []))
for (Lang, Value) in PackageObject2.GetAbstract() + UNIPackageAbrstractList:
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_ABSTRACT, Value, [], [['Lang', Lang]]))
for (Lang, Value) in PackageObject2.GetDescription() + UNIPackageDescriptionList:
if Value:
NodeList.append(CreateXmlElement(DataType.TAB_HEADER_DESCRIPTION, Value, [], [['Lang', Lang]]))
NodeList.append(['PackagePath', PackageObject2.GetPackagePath()])
AttributeList = []
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
return "PackagePath = %s %s" \
% (self.PackagePath, self.Header)
##
# MiscellaneousFileXml
#
class MiscellaneousFileXml(object):
def __init__(self):
self.Header = HeaderXml()
self.Files = []
##
# This API is used for Package or Module's MiscellaneousFile section
#
def FromXml(self, Item, Key):
if not Item:
return None
self.Header.FromXml(Item, Key)
NewItem = XmlNode(Item, '%s/Header' % Key)
self.Header.FromXml(NewItem, 'Header')
for SubItem in XmlList(Item, '%s/Filename' % Key):
Filename = XmlElement(SubItem, '%s/Filename' % Key)
Executable = XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'Executable')
if Executable.upper() == "TRUE":
Executable = True
elif Executable.upper() == "FALSE":
Executable = False
else:
Executable = ''
self.Files.append([Filename, Executable])
MiscFile = MiscFileObject()
MiscFile.SetCopyright(self.Header.CopyrightList)
MiscFile.SetLicense(self.Header.LicenseList)
MiscFile.SetAbstract(self.Header.AbstractList)
MiscFile.SetDescription(self.Header.DescriptionList)
MiscFileList = []
for File in self.Files:
FileObj = FileObject()
FileObj.SetURI(File[0])
FileObj.SetExecutable(File[1])
MiscFileList.append(FileObj)
MiscFile.SetFileList(MiscFileList)
return MiscFile
##
# This API is used for DistP's tool section
#
def FromXml2(self, Item, Key):
if Item is None:
return None
NewItem = XmlNode(Item, '%s/Header' % Key)
self.Header.FromXml(NewItem, 'Header')
for SubItem in XmlList(Item, '%s/Filename' % Key):
Filename = XmlElement(SubItem, '%s/Filename' % Key)
Executable = \
XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'Executable')
OsType = XmlAttribute(XmlNode(SubItem, '%s/Filename' % Key), 'OS')
if Executable.upper() == "TRUE":
Executable = True
elif Executable.upper() == "FALSE":
Executable = False
else:
Executable = ''
self.Files.append([Filename, Executable, OsType])
MiscFile = MiscFileObject()
MiscFile.SetName(self.Header.Name)
MiscFile.SetCopyright(self.Header.CopyrightList)
MiscFile.SetLicense(self.Header.LicenseList)
MiscFile.SetAbstract(self.Header.AbstractList)
MiscFile.SetDescription(self.Header.DescriptionList)
MiscFileList = []
for File in self.Files:
FileObj = FileObject()
FileObj.SetURI(File[0])
FileObj.SetExecutable(File[1])
FileObj.SetOS(File[2])
MiscFileList.append(FileObj)
MiscFile.SetFileList(MiscFileList)
return MiscFile
##
# This API is used for Package or Module's MiscellaneousFile section
#
def ToXml(self, MiscFile, Key):
if self.Header:
pass
if MiscFile:
if MiscFile.GetAbstract():
DPAbstract = MiscFile.GetAbstract()[0][1]
else:
DPAbstract = ''
if MiscFile.GetDescription():
DPDescription = MiscFile.GetDescription()[0][1]
else:
DPDescription = ''
if MiscFile.GetCopyright():
DPCopyright = MiscFile.GetCopyright()[0][1]
else:
DPCopyright = ''
if MiscFile.GetLicense():
DPLicense = MiscFile.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
]
for File in MiscFile.GetFileList():
NodeList.append\
(CreateXmlElement\
('Filename', File.GetURI(), [], \
[['Executable', str(File.GetExecutable()).lower()]]))
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
##
# This API is used for DistP's tool section
#
def ToXml2(self, MiscFile, Key):
if self.Header:
pass
if MiscFile:
if MiscFile.GetAbstract():
DPAbstract = MiscFile.GetAbstract()[0][1]
else:
DPAbstract = ''
if MiscFile.GetDescription():
DPDescription = MiscFile.GetDescription()[0][1]
else:
DPDescription = ''
if MiscFile.GetCopyright():
DPCopyright = MiscFile.GetCopyright()[0][1]
else:
DPCopyright = ''
if MiscFile.GetLicense():
DPLicense = MiscFile.GetLicense()[0][1]
else:
DPLicense = ''
NodeList = [['Name', MiscFile.GetName()],
['Copyright', DPCopyright],
['License', DPLicense],
['Abstract', DPAbstract],
['Description', DPDescription],
]
HeaderNode = CreateXmlElement('Header', '', NodeList, [])
NodeList = [HeaderNode]
for File in MiscFile.GetFileList():
NodeList.append\
(CreateXmlElement\
('Filename', File.GetURI(), [], \
[['Executable', str(File.GetExecutable()).lower()], \
['OS', File.GetOS()]]))
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = str(self.Header)
for Item in self.Files:
Str = Str + '\n\tFilename:' + str(Item)
return Str
##
# UserExtensionsXml
#
class UserExtensionsXml(object):
def __init__(self):
self.UserId = ''
self.Identifier = ''
self.BinaryAbstractList = []
self.BinaryDescriptionList = []
self.BinaryCopyrightList = []
self.BinaryLicenseList = []
self.LangDefsList = []
self.DefineDict = {}
self.BuildOptionDict = {}
self.IncludesDict = {}
self.SourcesDict = {}
self.BinariesDict = {}
self.SupArchList = []
self.Statement = ''
self.Defines = ''
self.BuildOptions = ''
def FromXml2(self, Item, Key):
self.UserId = XmlAttribute(XmlNode(Item, '%s' % Key), 'UserId')
self.Identifier = XmlAttribute(XmlNode(Item, '%s' % Key), 'Identifier')
UserExtension = UserExtensionObject()
UserExtension.SetUserID(self.UserId)
UserExtension.SetIdentifier(self.Identifier)
return UserExtension
def FromXml(self, Item, Key):
self.UserId = XmlAttribute(XmlNode(Item, '%s' % Key), 'UserId')
self.Identifier = XmlAttribute(XmlNode(Item, '%s' % Key), 'Identifier')
if self.UserId == DataType.TAB_BINARY_HEADER_USERID \
and self.Identifier == DataType.TAB_BINARY_HEADER_IDENTIFIER:
for SubItem in XmlList(Item, '%s/BinaryAbstract' % Key):
BinaryAbstractLang = XmlAttribute(SubItem, 'Lang')
self.BinaryAbstractList.append((BinaryAbstractLang, XmlElement(SubItem, '%s/BinaryAbstract' % Key)))
for SubItem in XmlList(Item, '%s/BinaryDescription' % Key):
BinaryDescriptionLang = XmlAttribute(SubItem, 'Lang')
self.BinaryDescriptionList.append((BinaryDescriptionLang,
XmlElement(SubItem, '%s/BinaryDescription' % Key)))
for SubItem in XmlList(Item, '%s/BinaryCopyright' % Key):
BinaryCopyrightLang = XmlAttribute(SubItem, 'Lang')
self.BinaryCopyrightList.append((BinaryCopyrightLang,
XmlElement(SubItem, '%s/BinaryCopyright' % Key)))
for SubItem in XmlList(Item, '%s/BinaryLicense' % Key):
BinaryLicenseLang = XmlAttribute(SubItem, 'Lang')
self.BinaryLicenseList.append((BinaryLicenseLang,
XmlElement(SubItem, '%s/BinaryLicense' % Key)))
DefineItem = XmlNode(Item, '%s/Define' % Key)
for SubItem in XmlList(DefineItem, 'Define/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
self.DefineDict[Statement] = ""
BuildOptionItem = XmlNode(Item, '%s/BuildOption' % Key)
for SubItem in XmlList(BuildOptionItem, 'BuildOption/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
Arch = XmlAttribute(XmlNode(SubItem, '%s/Statement' % Key), 'SupArchList')
self.BuildOptionDict[Arch] = Statement
IncludesItem = XmlNode(Item, '%s/Includes' % Key)
for SubItem in XmlList(IncludesItem, 'Includes/Statement'):
Statement = XmlElement(SubItem, '%s/Statement' % Key)
Arch = XmlAttribute(XmlNode(SubItem, '%s/Statement' % Key), 'SupArchList')
self.IncludesDict[Statement] = Arch
SourcesItem = XmlNode(Item, '%s/Sources' % Key)
Tmp = UserExtensionSourceXml()
SourceDict = Tmp.FromXml(SourcesItem, 'Sources')
self.SourcesDict = SourceDict
BinariesItem = XmlNode(Item, '%s/Binaries' % Key)
Tmp = UserExtensionBinaryXml()
BinariesDict = Tmp.FromXml(BinariesItem, 'Binaries')
self.BinariesDict = BinariesDict
self.Statement = XmlElement(Item, 'UserExtensions')
SupArch = XmlAttribute(XmlNode(Item, '%s' % Key), 'SupArchList')
self.SupArchList = [Arch for Arch in GetSplitValueList(SupArch, DataType.TAB_SPACE_SPLIT) if Arch]
UserExtension = UserExtensionObject()
UserExtension.SetUserID(self.UserId)
UserExtension.SetIdentifier(self.Identifier)
UserExtension.SetBinaryAbstract(self.BinaryAbstractList)
UserExtension.SetBinaryDescription(self.BinaryDescriptionList)
UserExtension.SetBinaryCopyright(self.BinaryCopyrightList)
UserExtension.SetBinaryLicense(self.BinaryLicenseList)
UserExtension.SetStatement(self.Statement)
UserExtension.SetSupArchList(self.SupArchList)
UserExtension.SetDefinesDict(self.DefineDict)
UserExtension.SetBuildOptionDict(self.BuildOptionDict)
UserExtension.SetIncludesDict(self.IncludesDict)
UserExtension.SetSourcesDict(self.SourcesDict)
UserExtension.SetBinariesDict(self.BinariesDict)
return UserExtension
def ToXml(self, UserExtension, Key):
if self.UserId:
pass
AttributeList = [['UserId', str(UserExtension.GetUserID())],
['Identifier', str(UserExtension.GetIdentifier())],
['SupArchList', \
GetStringOfList(UserExtension.GetSupArchList())],
]
Root = CreateXmlElement('%s' % Key, UserExtension.GetStatement(), [], \
AttributeList)
if UserExtension.GetIdentifier() == DataType.TAB_BINARY_HEADER_IDENTIFIER and \
UserExtension.GetUserID() == DataType.TAB_BINARY_HEADER_USERID:
for (Lang, Value) in UserExtension.GetBinaryAbstract():
if Value:
ChildElement = CreateXmlElement('BinaryAbstract', Value, [], [['Lang', Lang]])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryDescription():
if Value:
ChildElement = CreateXmlElement('BinaryDescription', Value, [], [['Lang', Lang]])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryCopyright():
if Value:
ChildElement = CreateXmlElement('BinaryCopyright', Value, [], [])
Root.appendChild(ChildElement)
for (Lang, Value) in UserExtension.GetBinaryLicense():
if Value:
ChildElement = CreateXmlElement('BinaryLicense', Value, [], [])
Root.appendChild(ChildElement)
NodeList = []
DefineDict = UserExtension.GetDefinesDict()
if DefineDict:
for Item in DefineDict.keys():
NodeList.append(CreateXmlElement\
('Statement', Item, [], []))
DefineElement = CreateXmlElement('Define', '', NodeList, [])
Root.appendChild(DefineElement)
NodeList = []
BuildOptionDict = UserExtension.GetBuildOptionDict()
if BuildOptionDict:
for Item in BuildOptionDict.keys():
NodeList.append(CreateXmlElement\
('Statement', BuildOptionDict[Item], [], \
[['SupArchList', Item]]))
BuildOptionElement = \
CreateXmlElement('BuildOption', '', NodeList, [])
Root.appendChild(BuildOptionElement)
NodeList = []
IncludesDict = UserExtension.GetIncludesDict()
if IncludesDict:
for Item in IncludesDict.keys():
NodeList.append(CreateXmlElement\
('Statement', Item, [], \
[['SupArchList', IncludesDict[Item]]]))
IncludesElement = CreateXmlElement('Includes', '', NodeList, [])
Root.appendChild(IncludesElement)
NodeList = []
SourcesDict = UserExtension.GetSourcesDict()
if SourcesDict:
Tmp = UserExtensionSourceXml()
Root.appendChild(Tmp.ToXml(SourcesDict, 'Sources'))
NodeList = []
BinariesDict = UserExtension.GetBinariesDict()
if BinariesDict:
Tmp = UserExtensionBinaryXml()
Root.appendChild(Tmp.ToXml(BinariesDict, 'Binaries'))
return Root
def __str__(self):
Str = "UserId = %s Identifier = %s" % (self.UserId, self.Identifier)
Str = Str + '\n\tDefines:' + str(self.Defines)
Str = Str + '\n\tBuildOptions:' + str(self.BuildOptions)
return Str
##
# UserExtensionSourceXml
#
class UserExtensionSourceXml(object):
def __init__(self):
self.UserExtensionSource = ''
def FromXml(self, Item, Key):
if Key:
pass
if self.UserExtensionSource:
pass
Dict = {}
#SourcesItem = XmlNode(Item, '%s/Sources' % Key)
for SubItem in XmlList(Item, 'Sources/SourceFile'):
FileName = XmlElement(SubItem, 'SourceFile/FileName')
Family = XmlElement(SubItem, 'SourceFile/Family')
FeatureFlag = XmlElement(SubItem, 'SourceFile/FeatureFlag')
SupArchStr = XmlElement(SubItem, 'SourceFile/SupArchList')
DictKey = (FileName, Family, FeatureFlag, SupArchStr)
ValueList = []
for ValueNodeItem in XmlList(SubItem, \
'SourceFile/SourceFileOtherAttr'):
TagName = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/TagName')
ToolCode = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/ToolCode')
Comment = XmlElement(ValueNodeItem, \
'SourceFileOtherAttr/Comment')
if (TagName == ' ') and (ToolCode == ' ') and (Comment == ' '):
TagName = ''
ToolCode = ''
Comment = ''
ValueList.append((TagName, ToolCode, Comment))
Dict[DictKey] = ValueList
return Dict
def ToXml(self, Dict, Key):
if self.UserExtensionSource:
pass
SourcesNodeList = []
for Item in Dict:
ValueList = Dict[Item]
(FileName, Family, FeatureFlag, SupArchStr) = Item
SourceFileNodeList = []
SourceFileNodeList.append(["FileName", FileName])
SourceFileNodeList.append(["Family", Family])
SourceFileNodeList.append(["FeatureFlag", FeatureFlag])
SourceFileNodeList.append(["SupArchList", SupArchStr])
for (TagName, ToolCode, Comment) in ValueList:
ValueNodeList = []
if not (TagName or ToolCode or Comment):
TagName = ' '
ToolCode = ' '
Comment = ' '
ValueNodeList.append(["TagName", TagName])
ValueNodeList.append(["ToolCode", ToolCode])
ValueNodeList.append(["Comment", Comment])
ValueNodeXml = CreateXmlElement('SourceFileOtherAttr', '', \
ValueNodeList, [])
SourceFileNodeList.append(ValueNodeXml)
SourceFileNodeXml = CreateXmlElement('SourceFile', '', \
SourceFileNodeList, [])
SourcesNodeList.append(SourceFileNodeXml)
Root = CreateXmlElement('%s' % Key, '', SourcesNodeList, [])
return Root
##
# UserExtensionBinaryXml
#
class UserExtensionBinaryXml(object):
def __init__(self):
self.UserExtensionBinary = ''
def FromXml(self, Item, Key):
if Key:
pass
if self.UserExtensionBinary:
pass
Dict = {}
for SubItem in XmlList(Item, 'Binaries/Binary'):
FileName = XmlElement(SubItem, 'Binary/FileName')
FileType = XmlElement(SubItem, 'Binary/FileType')
FFE = XmlElement(SubItem, 'Binary/FeatureFlag')
SupArch = XmlElement(SubItem, 'Binary/SupArchList')
DictKey = (FileName, FileType, ConvertNOTEQToNE(FFE), SupArch)
ValueList = []
for ValueNodeItem in XmlList(SubItem, \
'Binary/BinaryFileOtherAttr'):
Target = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Target')
Family = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Family')
TagName = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/TagName')
Comment = XmlElement(ValueNodeItem, \
'BinaryFileOtherAttr/Comment')
if (Target == ' ') and (Family == ' ') and \
(TagName == ' ') and (Comment == ' '):
Target = ''
Family = ''
TagName = ''
Comment = ''
ValueList.append((Target, Family, TagName, Comment))
Dict[DictKey] = ValueList
return Dict
def ToXml(self, Dict, Key):
if self.UserExtensionBinary:
pass
BinariesNodeList = []
for Item in Dict:
ValueList = Dict[Item]
(FileName, FileType, FeatureFlag, SupArch) = Item
FileNodeList = []
FileNodeList.append(["FileName", FileName])
FileNodeList.append(["FileType", FileType])
FileNodeList.append(["FeatureFlag", ConvertNEToNOTEQ(FeatureFlag)])
FileNodeList.append(["SupArchList", SupArch])
for (Target, Family, TagName, Comment) in ValueList:
ValueNodeList = []
if not (Target or Family or TagName or Comment):
Target = ' '
Family = ' '
TagName = ' '
Comment = ' '
ValueNodeList.append(["Target", Target])
ValueNodeList.append(["Family", Family])
ValueNodeList.append(["TagName", TagName])
ValueNodeList.append(["Comment", Comment])
ValueNodeXml = CreateXmlElement('BinaryFileOtherAttr', '', \
ValueNodeList, [])
FileNodeList.append(ValueNodeXml)
FileNodeXml = CreateXmlElement('Binary', '', FileNodeList, [])
BinariesNodeList.append(FileNodeXml)
Root = CreateXmlElement('%s' % Key, '', BinariesNodeList, [])
return Root
##
# LibraryClassXml
#
class LibraryClassXml(object):
def __init__(self):
self.Keyword = ''
self.HeaderFile = ''
self.RecommendedInstanceGuid = ''
self.RecommendedInstanceVersion = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.Keyword = XmlAttribute(XmlNode(Item, '%s' % Key), 'Keyword')
if self.Keyword == '':
self.Keyword = XmlElement(Item, '%s/Keyword' % Key)
self.HeaderFile = XmlElement(Item, '%s/HeaderFile' % Key)
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
LibraryClass = LibraryClassObject()
LibraryClass.SetLibraryClass(self.Keyword)
LibraryClass.SetIncludeHeader(self.HeaderFile)
if self.CommonDefines.Usage:
LibraryClass.SetUsage(self.CommonDefines.Usage)
LibraryClass.SetSupArchList(self.CommonDefines.SupArchList)
LibraryClass.SetSupModuleList(self.CommonDefines.SupModList)
LibraryClass.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
LibraryClass.SetHelpTextList(GetHelpTextList(self.HelpText))
return LibraryClass
def ToXml(self, LibraryClass, Key):
if self.HeaderFile:
pass
AttributeList = \
[['Keyword', LibraryClass.GetLibraryClass()],
['SupArchList', GetStringOfList(LibraryClass.GetSupArchList())],
['SupModList', GetStringOfList(LibraryClass.GetSupModuleList())]
]
NodeList = [['HeaderFile', LibraryClass.GetIncludeHeader()]]
for Item in LibraryClass.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def ToXml2(self, LibraryClass, Key):
if self.HeaderFile:
pass
FeatureFlag = ConvertNEToNOTEQ(LibraryClass.GetFeatureFlag())
AttributeList = \
[['Usage', LibraryClass.GetUsage()], \
['SupArchList', GetStringOfList(LibraryClass.GetSupArchList())], \
['SupModList', GetStringOfList(LibraryClass.GetSupModuleList())], \
['FeatureFlag', FeatureFlag]
]
NodeList = [['Keyword', LibraryClass.GetLibraryClass()], ]
for Item in LibraryClass.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Keyword = %s HeaderFile = %s RecommendedInstanceGuid = %s RecommendedInstanceVersion = %s %s" % \
(self.Keyword, self.HeaderFile, self.RecommendedInstanceGuid, self.RecommendedInstanceVersion, \
self.CommonDefines)
for Item in self.HelpText:
Str = Str + "\n\t" + str(Item)
return Str
##
# FilenameXml
#
class FilenameXml(object):
def __init__(self):
self.FileType = ''
self.Filename = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.FileType = XmlAttribute(Item, 'FileType')
Guid = XmlAttribute(Item, 'GUID')
self.Filename = XmlElement(Item, 'Filename')
self.CommonDefines.FromXml(Item, Key)
FeatureFlag = ConvertNOTEQToNE(self.CommonDefines.FeatureFlag)
Filename = FileNameObject()
#
# Convert File Type
#
if self.FileType == 'UEFI_IMAGE':
self.FileType = 'PE32'
Filename.SetGuidValue(Guid)
Filename.SetFileType(self.FileType)
Filename.SetFilename(self.Filename)
Filename.SetSupArchList(self.CommonDefines.SupArchList)
Filename.SetFeatureFlag(FeatureFlag)
return Filename
def ToXml(self, Filename, Key):
if self.Filename:
pass
AttributeList = [['SupArchList', \
GetStringOfList(Filename.GetSupArchList())],
['FileType', Filename.GetFileType()],
['FeatureFlag', ConvertNEToNOTEQ(Filename.GetFeatureFlag())],
['GUID', Filename.GetGuidValue()]
]
Root = CreateXmlElement('%s' % Key, Filename.GetFilename(), [], AttributeList)
return Root
def __str__(self):
return "FileType = %s Filename = %s %s" \
% (self.FileType, self.Filename, self.CommonDefines) | en | 0.63888 | ## @file # This file is used to parse a PCD file of .PKG file # # Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials are licensed and made available # under the terms and conditions of the BSD License which accompanies this # distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # CommonXml ## # Import Modules # ## # ClonedFromXml # ## # CommonDefinesXml # ## # PromptXml # ## # HelpTextXml # ## # HeaderXml # # Get Abstract and Description from Uni File # if the Uni File exists # Get Abstract and Description from INF File Header ## # DistributionPackageHeaderXml # ## # PackageHeaderXml # # Get Abstract and Description from Uni File # if the Uni File exists # Get Abstract and Description from DEC File Header ## # MiscellaneousFileXml # ## # This API is used for Package or Module's MiscellaneousFile section # ## # This API is used for DistP's tool section # ## # This API is used for Package or Module's MiscellaneousFile section # ## # This API is used for DistP's tool section # ## # UserExtensionsXml # ## # UserExtensionSourceXml # #SourcesItem = XmlNode(Item, '%s/Sources' % Key) ## # UserExtensionBinaryXml # ## # LibraryClassXml # ## # FilenameXml # # # Convert File Type # | 1.651989 | 2 |
GA_classifier/MLP.py | raj-1411/Deep-Convolutional-Neural-Networks-improvisation-with-applied-Genetic-Algorithm | 0 | 6631077 | from sklearn.neural_network import MLPClassifier
def model_classif(train_space, val_space):
model = MLPClassifier()
model.fit(train_space[:,:-1], train_space[:,-1])
val_pred = model.predict(val_space[:,:-1 ])
return val_pred | from sklearn.neural_network import MLPClassifier
def model_classif(train_space, val_space):
model = MLPClassifier()
model.fit(train_space[:,:-1], train_space[:,-1])
val_pred = model.predict(val_space[:,:-1 ])
return val_pred | none | 1 | 3.031672 | 3 |
|
tensorflow/contrib/opt/python/training/lars_optimizer_test.py | uve/tensorflow | 0 | 6631078 | <reponame>uve/tensorflow<filename>tensorflow/contrib/opt/python/training/lars_optimizer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0. Licensed to the Apache
# Software Foundation. You may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Layer-wise Adaptive Rate Scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lars_optimizer as lo
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class LARSOptimizerTest(test.TestCase):
def testLARSGradientOneStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
weight_decay=wd_np,
eeta=eeta,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np + grad_np
var_np -= scaled_lr * vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
def testLARSGradientMultiStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
eeta=eeta,
weight_decay=wd_np,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
for _ in range(10):
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np + grad_np
var_np -= scaled_lr * vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
if __name__ == '__main__':
test.main()
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0. Licensed to the Apache
# Software Foundation. You may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Layer-wise Adaptive Rate Scaling optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lars_optimizer as lo
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class LARSOptimizerTest(test.TestCase):
def testLARSGradientOneStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
weight_decay=wd_np,
eeta=eeta,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np + grad_np
var_np -= scaled_lr * vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
def testLARSGradientMultiStep(self):
for _ in range(10):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session() as sess:
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = 0.9
wd_np = 0.1
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = variables.Variable(var_np, dtype=dtype)
grad = variables.Variable(grad_np, dtype=dtype)
opt = lo.LARSOptimizer(
learning_rate=lr_np,
momentum=m_np,
eeta=eeta,
weight_decay=wd_np,
epsilon=ep_np)
step = opt.apply_gradients([(grad, var)])
variables.global_variables_initializer().run()
pre_var = sess.run(var)
pre_vel = sess.run(opt.get_slot(var, 'momentum'))
self.assertAllClose(var_np, pre_var)
self.assertAllClose(vel_np, pre_vel)
for _ in range(10):
step.run()
post_var = sess.run(var)
post_vel = sess.run(opt.get_slot(var, 'momentum'))
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + wd_np * w_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np + grad_np
var_np -= scaled_lr * vel_np
self.assertAllClose(var_np, post_var)
self.assertAllClose(vel_np, post_vel)
if __name__ == '__main__':
test.main() | en | 0.804283 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0. Licensed to the Apache # Software Foundation. You may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Test for Layer-wise Adaptive Rate Scaling optimizer. | 2.115706 | 2 |
medrecords_app/forms.py | davidwrq/medrecords | 0 | 6631079 | from django import forms
from django.contrib.auth import get_user_model
from medrecords_app.models import MedicalRecord
User = get_user_model()
class MedicalRecordCreationForm(forms.ModelForm):
class Meta:
model = MedicalRecord
fields = "__all__"
def __init__(self, *args, **kwargs):
super(MedicalRecordCreationForm, self).__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(user_type='2')
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
| from django import forms
from django.contrib.auth import get_user_model
from medrecords_app.models import MedicalRecord
User = get_user_model()
class MedicalRecordCreationForm(forms.ModelForm):
class Meta:
model = MedicalRecord
fields = "__all__"
def __init__(self, *args, **kwargs):
super(MedicalRecordCreationForm, self).__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(user_type='2')
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
| none | 1 | 2.269588 | 2 |
|
junction_manager.py | h3ct0r/junction_detection_occgrid | 0 | 6631080 | <reponame>h3ct0r/junction_detection_occgrid
#!/usr/bin/env python
""" Junction Manager"""
from copy import copy
import rospy
from threading import Lock
from nav_msgs.msg import OccupancyGrid
import numpy as np
import cv2
import time
import collections
class JunctionManager(object):
def __init__(self):
self.lock = Lock()
self.map_deque = collections.deque(maxlen=1) # size of maps to join, default to 1 (not joining anything)
self.latest_joined_map = None
self.font = cv2.FONT_HERSHEY_SIMPLEX
# Parameters
self.robot_name = rospy.get_namespace().split("/")[1]
self.robot_type = self.robot_name[:-1]
# Subscribers
self.latest_costmap = None
self._costmap_sub_topic_name = '/spot1/move_base/global_costmap/costmap_throttle'
self.costmap_sub = rospy.Subscriber(
self._costmap_sub_topic_name,
OccupancyGrid,
self.handle_costmap_cb
)
def run(self):
""" main entry point """
rate = rospy.Rate(5)
while not self.is_initialized():
rospy.logwarn("Waiting for initialization...")
rate.sleep()
while not rospy.is_shutdown():
self.join_maps()
if self.latest_joined_map is None:
rate.sleep()
continue
self.junction_detection_opencv()
rate.sleep()
def is_initialized(self):
""" check for initial data needed for this node """
try:
rospy.wait_for_message(self._costmap_sub_topic_name, OccupancyGrid, timeout=5)
except rospy.ROSException as rex:
rospy.logwarn(rex)
return False
return True
@staticmethod
def map_to_img(occ_grid):
""" convert nav_msgs/OccupancyGrid to OpenCV mat
small noise in the occ grid is removed by
thresholding on the occupancy probability (> 50%)
"""
data = occ_grid.data
w = occ_grid.info.width
h = occ_grid.info.height
img = np.zeros((h, w, 1), np.uint8)
img += 255 # start with a white canvas instead of a black one
# occupied cells (0 - 100 prob range)
# free cells (0)
# unknown -1
for i in range(0, h):
for j in range(0, w):
if data[i * w + j] >= 50:
img[i, j] = 0
elif 0 < data[i * w + j] < 50:
img[i, j] = 255
elif data[i * w + j] == -1:
img[i, j] = 205
# crop borders if performing map stitching
# img = img[20:380, 20:380]
return img
@staticmethod
def remove_isolated_pixels(img, is_debug=False):
""" remove isolated components
using a 8 point conectivity
small areas that are less than the standard deviation / 4.0
off all areas are removed TODO: check this heuristic?
"""
# 8 points conectivity (up, down, left, right and diagonals)
# 4 points conectivity (only up, down, left, right)
connectivity = 8
output = cv2.connectedComponentsWithStats(img, connectivity, cv2.CV_32S)
num_labels = output[0]
labels = output[1]
stats = output[2]
new_image = img.copy()
areas = [stats[label, cv2.CC_STAT_AREA] for label in range(num_labels)]
std_area = np.std(areas) / 4.0
if is_debug:
print("areas:", areas, "std:", std_area)
for label in range(num_labels):
# remove pixels from smaller connected components
# smaller than the std dev of the total areas
area = stats[label, cv2.CC_STAT_AREA]
if area < std_area:
new_image[labels == label] = 0
if is_debug:
cv2.imshow("new_image", new_image)
time.sleep(0.2)
return new_image
@staticmethod
def get_skeleton_intersection(skeleton):
""" Given a skeletonised image, it will give the coordinates of the intersections of the skeleton.
https://stackoverflow.com/questions/41705405/finding-intersections-of-a-skeletonised-image-in-python-opencv
Keyword arguments:
skeleton -- the skeletonised image to detect the intersections of
Returns:
List of 2-tuples (x,y) containing the intersection coordinates
"""
def neighbours(x, y, image):
""" Return 8-neighbours of image point P1(x,y), in a clockwise order """
img = image
x_1, y_1, x1, y1 = x - 1, y - 1, x + 1, y + 1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1],
img[x_1][y_1]]
# A biiiiiig list of valid intersections 2 3 4
# These are in the format shown to the right 1 C 5
# 8 7 6
validIntersection = [[0, 1, 0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 1], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 1, 0, 0, 1, 0], [1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0], [1, 0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 0, 1, 0], [1, 1, 0, 0, 1, 0, 1, 0], [0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 1], [1, 0, 0, 1, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 0, 0], [1, 0, 1, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0, 1, 1],
[0, 1, 1, 0, 1, 0, 0, 1], [1, 1, 0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 1, 0, 0]]
image = skeleton.copy()
image = image / 255
intersections = list()
for x in range(1, len(image) - 1):
for y in range(1, len(image[x]) - 1):
# If we have a white pixel
if image[x][y] == 1:
neigh = neighbours(x, y, image)
valid = True
if neigh in validIntersection:
intersections.append((y, x))
# Filter intersections to make sure we don't count them twice or ones that are very close together
for point1 in intersections:
for point2 in intersections:
if (((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) < 10 ** 2) and (point1 != point2):
intersections.remove(point2)
# Remove duplicates
intersections = list(set(intersections))
return intersections
@staticmethod
def merge_imgs(img1, img2):
""" merge two map images in a particularly fast way (for python anyways)
we only consider white and black areas for merging, unkown areas are not
merged
"""
from itertools import product
h = img1.shape[0]
w = img1.shape[1]
h2 = img2.shape[0]
w2 = img2.shape[1]
for pos in product(range(h), range(w)):
if pos[0] > h2 - 1 or pos[1] > w2 - 1:
continue
pixel = img2.item(pos)
if pixel != 205:
img1.itemset(pos, pixel)
return img1
@staticmethod
def count_branches(centroid, skeletonized_img, radius=50, is_debug=False):
""" Get number of branches given a skeletonized image and a branch point
we use a radius to detect collisions between the circle and the branches
"""
n_branches = 0
h = skeletonized_img.shape[0]
w = skeletonized_img.shape[1]
color_debug = cv2.cvtColor(skeletonized_img, cv2.COLOR_GRAY2RGB)
pts = cv2.ellipse2Poly(
(int(centroid[0]), int(centroid[1])),
(radius, radius),
0, 0, 360, 1)
if is_debug:
for p in pts:
if p[1] > h - 1 or p[0] > w - 1:
continue
color_debug[int(p[1]), int(p[0])] = [0, 0, 255]
non_zero = False
for p in pts:
if p[0] > h - 1 or p[1] > w - 1:
continue
pixel = abs(skeletonized_img.item(p[1], p[0]))
if not non_zero and pixel > 0.0:
non_zero = True
n_branches += 1
elif non_zero and pixel < 1.0:
non_zero = False
if is_debug:
if pixel > 0.0:
color_debug[int(p[1]), int(p[0])] = [255, 0, 0]
if is_debug:
cv2.imshow('color_debug', color_debug)
cv2.waitKey(1)
return n_branches
def junction_detection_opencv(self):
""" use opencv to filter the occupancy grid and extract
a workable skeleton of the traversable area
intersections are detected by a library of possible intersections
"""
costmap_mat = self.latest_joined_map
cv2.imshow("costmap_mat", costmap_mat)
_, occ_area = cv2.threshold(costmap_mat, 0, 100, cv2.THRESH_BINARY_INV)
_, free_area = cv2.threshold(costmap_mat, 250, 255, cv2.THRESH_BINARY)
# inverted obstacle mask, obstacles are converted from black to white
black_pixels = np.where(
(occ_area[:] > 0)
)
occ_area[black_pixels] = 255
# cv2.imshow("occ_area", occ_area)
# cv2.imshow("free_area", free_area)
for i in xrange(5):
occ_area = cv2.medianBlur(occ_area, 7);
# cv2.imshow("occ_area2", occ_area)
for i in xrange(5):
free_area = cv2.medianBlur(free_area, 7);
# cv2.imshow("free_area2", free_area)
kernel = np.ones((3, 3), np.uint8)
dilation_occ = cv2.morphologyEx(occ_area, cv2.MORPH_DILATE, kernel, iterations=10)
# cv2.imshow("dilation_occ", dilation_occ)
dilation_free = cv2.dilate(free_area, kernel, iterations=5)
# cv2.imshow("dilation_free", dilation_free)
# remove the inflated obstacles to the free navigable area
diff_im = cv2.subtract(dilation_free, dilation_occ)
# cv2.imshow("diff_im", diff_im)
filtered_diff = JunctionManager.remove_isolated_pixels(diff_im, is_debug=False)
cv2.imshow("filtered_diff", filtered_diff)
dist_result = cv2.distanceTransform(filtered_diff, distanceType=cv2.DIST_L2,
maskSize=5, dstType=cv2.CV_8U)
dist_normalized = cv2.normalize(dist_result, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_8U)
cv2.imshow("dist_normalized", dist_normalized)
# threshold the normalized distance map to remove outliers and low intensity regions
_, dist_no_small_branches = cv2.threshold(dist_normalized, 65, 255, cv2.THRESH_BINARY)
# cv2.imshow("dist_no_small_branches", dist_no_small_branches)
dist_biggest_component = JunctionManager.remove_isolated_pixels(dist_no_small_branches, is_debug=False)
# cv2.imshow("dist_biggest_component", dist_biggest_component)
dist_filtered = cv2.bitwise_and(dist_normalized, dist_biggest_component)
# cv2.imshow("dist_filtered", dist_filtered)
thinned_dist = cv2.ximgproc.thinning(dist_biggest_component)
#cv2.imshow("thinned_dist", thinned_dist)
roi_img = thinned_dist.copy()
roi_mask = np.zeros_like(roi_img)
roi_radius = 140
roi_mask = cv2.circle(roi_mask,
(int(roi_img.shape[0] / 2.0), int(roi_img.shape[1] / 2.0)),
roi_radius,
(255, 255, 255),
-1)
roi_img = cv2.bitwise_and(roi_img, roi_mask)
cv2.imshow("roi_img", roi_img)
# estimate corners and intersections
corners = JunctionManager.get_skeleton_intersection(roi_img)
color = cv2.cvtColor(thinned_dist, cv2.COLOR_GRAY2RGB) # color image for debugging purposes
# dilate skeleton to improve the detection of branches
roi_dilated = cv2.dilate(roi_img, np.ones((3, 3), np.uint8), iterations=1)
#cv2.imshow("roi_dilated", roi_dilated)
super_branch_found = False
for idx in range(len(corners)):
corner = corners[idx]
# simple heuristic to detect super branches (a true intersection)
n_branches = JunctionManager.count_branches(corner, roi_dilated)
if n_branches >= 4.0:
super_branch_found = True
branch_label = 'id {}: {} (SUPER)'.format(idx, n_branches)
else:
branch_label = 'id {}: {}'.format(idx, n_branches)
cv2.putText(color, branch_label,
(int(corner[0]), int(corner[1])),
self.font, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
color = cv2.circle(color, (int(corner[0]), int(corner[1])), 3, [0, 0, 255], -1)
color = cv2.circle(color, (int(corner[0]), int(corner[1])), 60, [0, 0, 255], 1)
if super_branch_found:
rospy.loginfo("Super branch found!")
cv2.imshow("color", color)
cv2.waitKey(1)
def handle_costmap_cb(self, msg):
""" receive the occupancy grid map and register it """
self.latest_costmap = msg
self.map_deque.append(self.latest_costmap)
def join_maps(self, is_debug=False):
""" join/stich multiple occupancy grid maps using the relative
transformation between them given by the map.info.origin location
"""
local_map = self.map_deque[0]
resolution = local_map.info.resolution
last_origin_x = local_map.info.origin.position.x / float(resolution)
last_origin_y = local_map.info.origin.position.y / float(resolution)
self.latest_joined_map = self.map_to_img(local_map)
accumulated_diff_x = 0
accumulated_diff_y = 0
for cur_map in list(self.map_deque)[1:]:
cur_origin_x = cur_map.info.origin.position.x / float(resolution)
cur_origin_y = cur_map.info.origin.position.y / float(resolution)
diff_x = cur_origin_x - last_origin_x
diff_y = cur_origin_y - last_origin_y
top = 0 if diff_y < 0 else abs(diff_y)
bottom = 0 if diff_y > 0 else abs(diff_y)
left = 0 if diff_x > 0 else abs(diff_x)
right = 0 if diff_x < 0 else abs(diff_x)
if is_debug:
print("top:{} bottom:{} left:{} right:{}".format(top, bottom, left, right))
self.latest_joined_map = cv2.copyMakeBorder(self.latest_joined_map,
int(round(top)), int(round(bottom)), int(round(left)),
int(round(right)),
cv2.BORDER_CONSTANT, value=205)
top = 0 if accumulated_diff_y < 0 else abs(accumulated_diff_y)
bottom = 0 if accumulated_diff_y > 0 else abs(accumulated_diff_y)
left = 0 if accumulated_diff_x > 0 else abs(accumulated_diff_x)
right = 0 if accumulated_diff_x < 0 else abs(accumulated_diff_x)
# print("top:{} bottom:{} left:{} right:{}".format(top, bottom, left, right))
cur_img = self.map_to_img(cur_map)
cur_img = cv2.copyMakeBorder(cur_img,
int(round(top)), int(round(bottom)), int(round(left)), int(round(right)),
cv2.BORDER_CONSTANT, value=205)
accumulated_diff_x += diff_x
accumulated_diff_y += diff_y
if is_debug:
print("joined:({}, {}); accumulated:({}, {});".format(diff_x, diff_y, accumulated_diff_x,
accumulated_diff_y))
M = np.float32([
[1, 0, accumulated_diff_x],
[0, 1, accumulated_diff_y]
])
cur_img = cv2.warpAffine(cur_img, M, (cur_img.shape[1], cur_img.shape[0]), borderValue=205)
self.latest_joined_map = JunctionManager.merge_imgs(self.latest_joined_map, cur_img)
last_origin_x = cur_origin_x
last_origin_y = cur_origin_y
if is_debug:
cv2.imshow("cur_img", cur_img)
# cv2.imshow("latest_joined_map2", self.latest_joined_map)
cv2.waitKey(1)
def main():
rospy.init_node('junction_manager')
manager = JunctionManager()
manager.run()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
""" Junction Manager"""
from copy import copy
import rospy
from threading import Lock
from nav_msgs.msg import OccupancyGrid
import numpy as np
import cv2
import time
import collections
class JunctionManager(object):
def __init__(self):
self.lock = Lock()
self.map_deque = collections.deque(maxlen=1) # size of maps to join, default to 1 (not joining anything)
self.latest_joined_map = None
self.font = cv2.FONT_HERSHEY_SIMPLEX
# Parameters
self.robot_name = rospy.get_namespace().split("/")[1]
self.robot_type = self.robot_name[:-1]
# Subscribers
self.latest_costmap = None
self._costmap_sub_topic_name = '/spot1/move_base/global_costmap/costmap_throttle'
self.costmap_sub = rospy.Subscriber(
self._costmap_sub_topic_name,
OccupancyGrid,
self.handle_costmap_cb
)
def run(self):
""" main entry point """
rate = rospy.Rate(5)
while not self.is_initialized():
rospy.logwarn("Waiting for initialization...")
rate.sleep()
while not rospy.is_shutdown():
self.join_maps()
if self.latest_joined_map is None:
rate.sleep()
continue
self.junction_detection_opencv()
rate.sleep()
def is_initialized(self):
""" check for initial data needed for this node """
try:
rospy.wait_for_message(self._costmap_sub_topic_name, OccupancyGrid, timeout=5)
except rospy.ROSException as rex:
rospy.logwarn(rex)
return False
return True
@staticmethod
def map_to_img(occ_grid):
""" convert nav_msgs/OccupancyGrid to OpenCV mat
small noise in the occ grid is removed by
thresholding on the occupancy probability (> 50%)
"""
data = occ_grid.data
w = occ_grid.info.width
h = occ_grid.info.height
img = np.zeros((h, w, 1), np.uint8)
img += 255 # start with a white canvas instead of a black one
# occupied cells (0 - 100 prob range)
# free cells (0)
# unknown -1
for i in range(0, h):
for j in range(0, w):
if data[i * w + j] >= 50:
img[i, j] = 0
elif 0 < data[i * w + j] < 50:
img[i, j] = 255
elif data[i * w + j] == -1:
img[i, j] = 205
# crop borders if performing map stitching
# img = img[20:380, 20:380]
return img
@staticmethod
def remove_isolated_pixels(img, is_debug=False):
""" remove isolated components
using a 8 point conectivity
small areas that are less than the standard deviation / 4.0
off all areas are removed TODO: check this heuristic?
"""
# 8 points conectivity (up, down, left, right and diagonals)
# 4 points conectivity (only up, down, left, right)
connectivity = 8
output = cv2.connectedComponentsWithStats(img, connectivity, cv2.CV_32S)
num_labels = output[0]
labels = output[1]
stats = output[2]
new_image = img.copy()
areas = [stats[label, cv2.CC_STAT_AREA] for label in range(num_labels)]
std_area = np.std(areas) / 4.0
if is_debug:
print("areas:", areas, "std:", std_area)
for label in range(num_labels):
# remove pixels from smaller connected components
# smaller than the std dev of the total areas
area = stats[label, cv2.CC_STAT_AREA]
if area < std_area:
new_image[labels == label] = 0
if is_debug:
cv2.imshow("new_image", new_image)
time.sleep(0.2)
return new_image
@staticmethod
def get_skeleton_intersection(skeleton):
""" Given a skeletonised image, it will give the coordinates of the intersections of the skeleton.
https://stackoverflow.com/questions/41705405/finding-intersections-of-a-skeletonised-image-in-python-opencv
Keyword arguments:
skeleton -- the skeletonised image to detect the intersections of
Returns:
List of 2-tuples (x,y) containing the intersection coordinates
"""
def neighbours(x, y, image):
""" Return 8-neighbours of image point P1(x,y), in a clockwise order """
img = image
x_1, y_1, x1, y1 = x - 1, y - 1, x + 1, y + 1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1],
img[x_1][y_1]]
# A biiiiiig list of valid intersections 2 3 4
# These are in the format shown to the right 1 C 5
# 8 7 6
validIntersection = [[0, 1, 0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 1], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 1, 0, 0, 1, 0], [1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0], [1, 0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 0, 1, 0], [1, 1, 0, 0, 1, 0, 1, 0], [0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 1], [1, 0, 0, 1, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 0, 0], [1, 0, 1, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0, 1, 1],
[0, 1, 1, 0, 1, 0, 0, 1], [1, 1, 0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 1, 0, 0]]
image = skeleton.copy()
image = image / 255
intersections = list()
for x in range(1, len(image) - 1):
for y in range(1, len(image[x]) - 1):
# If we have a white pixel
if image[x][y] == 1:
neigh = neighbours(x, y, image)
valid = True
if neigh in validIntersection:
intersections.append((y, x))
# Filter intersections to make sure we don't count them twice or ones that are very close together
for point1 in intersections:
for point2 in intersections:
if (((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) < 10 ** 2) and (point1 != point2):
intersections.remove(point2)
# Remove duplicates
intersections = list(set(intersections))
return intersections
@staticmethod
def merge_imgs(img1, img2):
""" merge two map images in a particularly fast way (for python anyways)
we only consider white and black areas for merging, unkown areas are not
merged
"""
from itertools import product
h = img1.shape[0]
w = img1.shape[1]
h2 = img2.shape[0]
w2 = img2.shape[1]
for pos in product(range(h), range(w)):
if pos[0] > h2 - 1 or pos[1] > w2 - 1:
continue
pixel = img2.item(pos)
if pixel != 205:
img1.itemset(pos, pixel)
return img1
@staticmethod
def count_branches(centroid, skeletonized_img, radius=50, is_debug=False):
""" Get number of branches given a skeletonized image and a branch point
we use a radius to detect collisions between the circle and the branches
"""
n_branches = 0
h = skeletonized_img.shape[0]
w = skeletonized_img.shape[1]
color_debug = cv2.cvtColor(skeletonized_img, cv2.COLOR_GRAY2RGB)
pts = cv2.ellipse2Poly(
(int(centroid[0]), int(centroid[1])),
(radius, radius),
0, 0, 360, 1)
if is_debug:
for p in pts:
if p[1] > h - 1 or p[0] > w - 1:
continue
color_debug[int(p[1]), int(p[0])] = [0, 0, 255]
non_zero = False
for p in pts:
if p[0] > h - 1 or p[1] > w - 1:
continue
pixel = abs(skeletonized_img.item(p[1], p[0]))
if not non_zero and pixel > 0.0:
non_zero = True
n_branches += 1
elif non_zero and pixel < 1.0:
non_zero = False
if is_debug:
if pixel > 0.0:
color_debug[int(p[1]), int(p[0])] = [255, 0, 0]
if is_debug:
cv2.imshow('color_debug', color_debug)
cv2.waitKey(1)
return n_branches
def junction_detection_opencv(self):
""" use opencv to filter the occupancy grid and extract
a workable skeleton of the traversable area
intersections are detected by a library of possible intersections
"""
costmap_mat = self.latest_joined_map
cv2.imshow("costmap_mat", costmap_mat)
_, occ_area = cv2.threshold(costmap_mat, 0, 100, cv2.THRESH_BINARY_INV)
_, free_area = cv2.threshold(costmap_mat, 250, 255, cv2.THRESH_BINARY)
# inverted obstacle mask, obstacles are converted from black to white
black_pixels = np.where(
(occ_area[:] > 0)
)
occ_area[black_pixels] = 255
# cv2.imshow("occ_area", occ_area)
# cv2.imshow("free_area", free_area)
for i in xrange(5):
occ_area = cv2.medianBlur(occ_area, 7);
# cv2.imshow("occ_area2", occ_area)
for i in xrange(5):
free_area = cv2.medianBlur(free_area, 7);
# cv2.imshow("free_area2", free_area)
kernel = np.ones((3, 3), np.uint8)
dilation_occ = cv2.morphologyEx(occ_area, cv2.MORPH_DILATE, kernel, iterations=10)
# cv2.imshow("dilation_occ", dilation_occ)
dilation_free = cv2.dilate(free_area, kernel, iterations=5)
# cv2.imshow("dilation_free", dilation_free)
# remove the inflated obstacles to the free navigable area
diff_im = cv2.subtract(dilation_free, dilation_occ)
# cv2.imshow("diff_im", diff_im)
filtered_diff = JunctionManager.remove_isolated_pixels(diff_im, is_debug=False)
cv2.imshow("filtered_diff", filtered_diff)
dist_result = cv2.distanceTransform(filtered_diff, distanceType=cv2.DIST_L2,
maskSize=5, dstType=cv2.CV_8U)
dist_normalized = cv2.normalize(dist_result, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_8U)
cv2.imshow("dist_normalized", dist_normalized)
# threshold the normalized distance map to remove outliers and low intensity regions
_, dist_no_small_branches = cv2.threshold(dist_normalized, 65, 255, cv2.THRESH_BINARY)
# cv2.imshow("dist_no_small_branches", dist_no_small_branches)
dist_biggest_component = JunctionManager.remove_isolated_pixels(dist_no_small_branches, is_debug=False)
# cv2.imshow("dist_biggest_component", dist_biggest_component)
dist_filtered = cv2.bitwise_and(dist_normalized, dist_biggest_component)
# cv2.imshow("dist_filtered", dist_filtered)
thinned_dist = cv2.ximgproc.thinning(dist_biggest_component)
#cv2.imshow("thinned_dist", thinned_dist)
roi_img = thinned_dist.copy()
roi_mask = np.zeros_like(roi_img)
roi_radius = 140
roi_mask = cv2.circle(roi_mask,
(int(roi_img.shape[0] / 2.0), int(roi_img.shape[1] / 2.0)),
roi_radius,
(255, 255, 255),
-1)
roi_img = cv2.bitwise_and(roi_img, roi_mask)
cv2.imshow("roi_img", roi_img)
# estimate corners and intersections
corners = JunctionManager.get_skeleton_intersection(roi_img)
color = cv2.cvtColor(thinned_dist, cv2.COLOR_GRAY2RGB) # color image for debugging purposes
# dilate skeleton to improve the detection of branches
roi_dilated = cv2.dilate(roi_img, np.ones((3, 3), np.uint8), iterations=1)
#cv2.imshow("roi_dilated", roi_dilated)
super_branch_found = False
for idx in range(len(corners)):
corner = corners[idx]
# simple heuristic to detect super branches (a true intersection)
n_branches = JunctionManager.count_branches(corner, roi_dilated)
if n_branches >= 4.0:
super_branch_found = True
branch_label = 'id {}: {} (SUPER)'.format(idx, n_branches)
else:
branch_label = 'id {}: {}'.format(idx, n_branches)
cv2.putText(color, branch_label,
(int(corner[0]), int(corner[1])),
self.font, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
color = cv2.circle(color, (int(corner[0]), int(corner[1])), 3, [0, 0, 255], -1)
color = cv2.circle(color, (int(corner[0]), int(corner[1])), 60, [0, 0, 255], 1)
if super_branch_found:
rospy.loginfo("Super branch found!")
cv2.imshow("color", color)
cv2.waitKey(1)
def handle_costmap_cb(self, msg):
""" receive the occupancy grid map and register it """
self.latest_costmap = msg
self.map_deque.append(self.latest_costmap)
def join_maps(self, is_debug=False):
""" join/stich multiple occupancy grid maps using the relative
transformation between them given by the map.info.origin location
"""
local_map = self.map_deque[0]
resolution = local_map.info.resolution
last_origin_x = local_map.info.origin.position.x / float(resolution)
last_origin_y = local_map.info.origin.position.y / float(resolution)
self.latest_joined_map = self.map_to_img(local_map)
accumulated_diff_x = 0
accumulated_diff_y = 0
for cur_map in list(self.map_deque)[1:]:
cur_origin_x = cur_map.info.origin.position.x / float(resolution)
cur_origin_y = cur_map.info.origin.position.y / float(resolution)
diff_x = cur_origin_x - last_origin_x
diff_y = cur_origin_y - last_origin_y
top = 0 if diff_y < 0 else abs(diff_y)
bottom = 0 if diff_y > 0 else abs(diff_y)
left = 0 if diff_x > 0 else abs(diff_x)
right = 0 if diff_x < 0 else abs(diff_x)
if is_debug:
print("top:{} bottom:{} left:{} right:{}".format(top, bottom, left, right))
self.latest_joined_map = cv2.copyMakeBorder(self.latest_joined_map,
int(round(top)), int(round(bottom)), int(round(left)),
int(round(right)),
cv2.BORDER_CONSTANT, value=205)
top = 0 if accumulated_diff_y < 0 else abs(accumulated_diff_y)
bottom = 0 if accumulated_diff_y > 0 else abs(accumulated_diff_y)
left = 0 if accumulated_diff_x > 0 else abs(accumulated_diff_x)
right = 0 if accumulated_diff_x < 0 else abs(accumulated_diff_x)
# print("top:{} bottom:{} left:{} right:{}".format(top, bottom, left, right))
cur_img = self.map_to_img(cur_map)
cur_img = cv2.copyMakeBorder(cur_img,
int(round(top)), int(round(bottom)), int(round(left)), int(round(right)),
cv2.BORDER_CONSTANT, value=205)
accumulated_diff_x += diff_x
accumulated_diff_y += diff_y
if is_debug:
print("joined:({}, {}); accumulated:({}, {});".format(diff_x, diff_y, accumulated_diff_x,
accumulated_diff_y))
M = np.float32([
[1, 0, accumulated_diff_x],
[0, 1, accumulated_diff_y]
])
cur_img = cv2.warpAffine(cur_img, M, (cur_img.shape[1], cur_img.shape[0]), borderValue=205)
self.latest_joined_map = JunctionManager.merge_imgs(self.latest_joined_map, cur_img)
last_origin_x = cur_origin_x
last_origin_y = cur_origin_y
if is_debug:
cv2.imshow("cur_img", cur_img)
# cv2.imshow("latest_joined_map2", self.latest_joined_map)
cv2.waitKey(1)
def main():
rospy.init_node('junction_manager')
manager = JunctionManager()
manager.run()
if __name__ == '__main__':
main() | en | 0.771274 | #!/usr/bin/env python Junction Manager # size of maps to join, default to 1 (not joining anything) # Parameters # Subscribers main entry point check for initial data needed for this node convert nav_msgs/OccupancyGrid to OpenCV mat small noise in the occ grid is removed by thresholding on the occupancy probability (> 50%) # start with a white canvas instead of a black one # occupied cells (0 - 100 prob range) # free cells (0) # unknown -1 # crop borders if performing map stitching # img = img[20:380, 20:380] remove isolated components using a 8 point conectivity small areas that are less than the standard deviation / 4.0 off all areas are removed TODO: check this heuristic? # 8 points conectivity (up, down, left, right and diagonals) # 4 points conectivity (only up, down, left, right) # remove pixels from smaller connected components # smaller than the std dev of the total areas Given a skeletonised image, it will give the coordinates of the intersections of the skeleton. https://stackoverflow.com/questions/41705405/finding-intersections-of-a-skeletonised-image-in-python-opencv Keyword arguments: skeleton -- the skeletonised image to detect the intersections of Returns: List of 2-tuples (x,y) containing the intersection coordinates Return 8-neighbours of image point P1(x,y), in a clockwise order # A biiiiiig list of valid intersections 2 3 4 # These are in the format shown to the right 1 C 5 # 8 7 6 # If we have a white pixel # Filter intersections to make sure we don't count them twice or ones that are very close together # Remove duplicates merge two map images in a particularly fast way (for python anyways) we only consider white and black areas for merging, unkown areas are not merged Get number of branches given a skeletonized image and a branch point we use a radius to detect collisions between the circle and the branches use opencv to filter the occupancy grid and extract a workable skeleton of the traversable area intersections are detected by a library of possible intersections # inverted obstacle mask, obstacles are converted from black to white # cv2.imshow("occ_area", occ_area) # cv2.imshow("free_area", free_area) # cv2.imshow("occ_area2", occ_area) # cv2.imshow("free_area2", free_area) # cv2.imshow("dilation_occ", dilation_occ) # cv2.imshow("dilation_free", dilation_free) # remove the inflated obstacles to the free navigable area # cv2.imshow("diff_im", diff_im) # threshold the normalized distance map to remove outliers and low intensity regions # cv2.imshow("dist_no_small_branches", dist_no_small_branches) # cv2.imshow("dist_biggest_component", dist_biggest_component) # cv2.imshow("dist_filtered", dist_filtered) #cv2.imshow("thinned_dist", thinned_dist) # estimate corners and intersections # color image for debugging purposes # dilate skeleton to improve the detection of branches #cv2.imshow("roi_dilated", roi_dilated) # simple heuristic to detect super branches (a true intersection) receive the occupancy grid map and register it join/stich multiple occupancy grid maps using the relative transformation between them given by the map.info.origin location # print("top:{} bottom:{} left:{} right:{}".format(top, bottom, left, right)) # cv2.imshow("latest_joined_map2", self.latest_joined_map) | 2.312369 | 2 |
python/sketch1/universe.py | ivukotic/GrayMatter | 0 | 6631081 | import random
import threading
import logging
from tkinter import Tk, Label, StringVar, BOTTOM
from brain import Brain
from code import Code
from environment import Environment
from vizualize import platno
import configuration as conf
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', )
class Universe:
def __init__(self, sx=conf.Universe["size_x"], sy=conf.Universe['size_y'], population_size=conf.Universe['population_size']):
self.apsolute_time = 0
self.environment = Environment()
self.sx = sx
self.sy = sy
self.population_size = population_size
self.population = []
self.start_event = threading.Event() # these two events start/restart brain threads processing.
self.continue_event = threading.Event()
for _i in range(population_size):
self.create_brain(_i)
self.init_graphics()
for _t in range(conf.Universe['max_age']):
self.tick()
def __str__(self):
return '--- ' + str(self.apsolute_time) + ' ---'
def init_graphics(self):
self.display = platno()
self.str_age = StringVar()
self.age_label = Label(self.display.canvas, textvariable=self.str_age, fg="black")
self.age_label.pack(side=BOTTOM)
self.balls = []
for b in self.population:
(x, y) = b.getPosition()
self.balls.append(self.display.canvas.create_oval(x - 2, y - 2, x + 2, y + 2))
self.display.update()
def create_brain(self, index):
c = Code()
c.initial_generation()
b = Brain(index, c, self.start_event, self.continue_event)
b.setPosition([random.randint(0, self.sx), random.randint(0, self.sy)])
self.population.append(b)
b.start()
def updateStates(self):
# wait until all brains delivered tick result.
while True:
if len(Brain.actions) == self.population_size:
break
Brain.actions.clear()
# logging.debug('do actual collection')
toRemove = []
for bi, b in enumerate(self.population):
# b.prnt()
if b.getQuality() <= 0:
toRemove.append(bi)
x, y = b.getPosition()
dx, dy = b.getMove()
(reward, new_position, view) = self.environment.get_response([x, y], [dx, dy])
# print('pos:', nx, ny, 'direct:', dx, dy, 'reward:', reward, view)
b.setPosition(new_position)
b.process_response(reward, view)
for i in sorted(toRemove, reverse=True):
del toRemove[i]
def tick(self):
self.continue_event.clear()
self.start_event.set()
self.apsolute_time += 1
self.updateStates()
self.start_event.clear()
self.continue_event.set()
if conf.viz:
self.showUniverse()
self.print_out()
if conf.special_show_at == self.apsolute_time:
self.print_detailed()
def print_out(self):
_ind, level = conf.print_opt['Universe']
if level == 0:
return
print(self)
if level > 1:
print('population size:', len(self.population))
def print_detailed(self):
self.population[0].print_detailed()
def showUniverse(self):
ax = []
ay = []
asize = []
for bi, b in enumerate(self.population):
x, y = b.getPosition()
size = b.getQuality()
self.display.canvas.coords(self.balls[bi], x - 2, y - 2, x + 2, y + 2)
self.str_age.set('t: ' + str(self.apsolute_time))
self.display.update()
if __name__ == "__main__":
un = Universe()
| import random
import threading
import logging
from tkinter import Tk, Label, StringVar, BOTTOM
from brain import Brain
from code import Code
from environment import Environment
from vizualize import platno
import configuration as conf
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', )
class Universe:
def __init__(self, sx=conf.Universe["size_x"], sy=conf.Universe['size_y'], population_size=conf.Universe['population_size']):
self.apsolute_time = 0
self.environment = Environment()
self.sx = sx
self.sy = sy
self.population_size = population_size
self.population = []
self.start_event = threading.Event() # these two events start/restart brain threads processing.
self.continue_event = threading.Event()
for _i in range(population_size):
self.create_brain(_i)
self.init_graphics()
for _t in range(conf.Universe['max_age']):
self.tick()
def __str__(self):
return '--- ' + str(self.apsolute_time) + ' ---'
def init_graphics(self):
self.display = platno()
self.str_age = StringVar()
self.age_label = Label(self.display.canvas, textvariable=self.str_age, fg="black")
self.age_label.pack(side=BOTTOM)
self.balls = []
for b in self.population:
(x, y) = b.getPosition()
self.balls.append(self.display.canvas.create_oval(x - 2, y - 2, x + 2, y + 2))
self.display.update()
def create_brain(self, index):
c = Code()
c.initial_generation()
b = Brain(index, c, self.start_event, self.continue_event)
b.setPosition([random.randint(0, self.sx), random.randint(0, self.sy)])
self.population.append(b)
b.start()
def updateStates(self):
# wait until all brains delivered tick result.
while True:
if len(Brain.actions) == self.population_size:
break
Brain.actions.clear()
# logging.debug('do actual collection')
toRemove = []
for bi, b in enumerate(self.population):
# b.prnt()
if b.getQuality() <= 0:
toRemove.append(bi)
x, y = b.getPosition()
dx, dy = b.getMove()
(reward, new_position, view) = self.environment.get_response([x, y], [dx, dy])
# print('pos:', nx, ny, 'direct:', dx, dy, 'reward:', reward, view)
b.setPosition(new_position)
b.process_response(reward, view)
for i in sorted(toRemove, reverse=True):
del toRemove[i]
def tick(self):
self.continue_event.clear()
self.start_event.set()
self.apsolute_time += 1
self.updateStates()
self.start_event.clear()
self.continue_event.set()
if conf.viz:
self.showUniverse()
self.print_out()
if conf.special_show_at == self.apsolute_time:
self.print_detailed()
def print_out(self):
_ind, level = conf.print_opt['Universe']
if level == 0:
return
print(self)
if level > 1:
print('population size:', len(self.population))
def print_detailed(self):
self.population[0].print_detailed()
def showUniverse(self):
ax = []
ay = []
asize = []
for bi, b in enumerate(self.population):
x, y = b.getPosition()
size = b.getQuality()
self.display.canvas.coords(self.balls[bi], x - 2, y - 2, x + 2, y + 2)
self.str_age.set('t: ' + str(self.apsolute_time))
self.display.update()
if __name__ == "__main__":
un = Universe()
| en | 0.62539 | # these two events start/restart brain threads processing. # wait until all brains delivered tick result. # logging.debug('do actual collection') # b.prnt() # print('pos:', nx, ny, 'direct:', dx, dy, 'reward:', reward, view) | 2.93566 | 3 |
test_api/WPM_OMRI.py | loucif/mcdm-cv | 0 | 6631082 | <reponame>loucif/mcdm-cv<filename>test_api/WPM_OMRI.py
# Filename: wpm_OMRI.py
# Description: wpm_OMRI method
from numpy import *
import BWM
import timeit
from normalisation_fuc import omri_normalisation
# Step 1+2: normalise and compute the values S_i
def wpm_omri(D, w, ab):
"""
D is the matrix of the population,
w is the criteria's weights array
p is the criteria min/max values,
"""
#calculate normalised dataset
nD = omri_normalisation(D, ab)
s = zeros(D.shape[0])
# calculate fij and sum wj*fij
for i in range(D.shape[0]):
k = 0
for j in range(D.shape[1]):
nD[i,j] = nD[i,j] ** w[j]
s[i] = prod(nD[i,:])
return s | # Filename: wpm_OMRI.py
# Description: wpm_OMRI method
from numpy import *
import BWM
import timeit
from normalisation_fuc import omri_normalisation
# Step 1+2: normalise and compute the values S_i
def wpm_omri(D, w, ab):
"""
D is the matrix of the population,
w is the criteria's weights array
p is the criteria min/max values,
"""
#calculate normalised dataset
nD = omri_normalisation(D, ab)
s = zeros(D.shape[0])
# calculate fij and sum wj*fij
for i in range(D.shape[0]):
k = 0
for j in range(D.shape[1]):
nD[i,j] = nD[i,j] ** w[j]
s[i] = prod(nD[i,:])
return s | en | 0.699414 | # Filename: wpm_OMRI.py # Description: wpm_OMRI method # Step 1+2: normalise and compute the values S_i D is the matrix of the population, w is the criteria's weights array p is the criteria min/max values, #calculate normalised dataset # calculate fij and sum wj*fij | 3.082231 | 3 |
postgresqleu/util/management/commands/check_messaging_integrations.py | bradfordboyle/pgeu-system | 11 | 6631083 | <reponame>bradfordboyle/pgeu-system
#
# Script to (optionally) validate all messaging integrations
#
# This can include for example checking that webhooks are still existing,
# and marked as valid. Actual implementation depends on the messaging provider.
#
from django.core.management.base import BaseCommand
import datetime
import io
import sys
from collections import defaultdict
from postgresqleu.util.messaging import get_messaging
from postgresqleu.confreg.models import MessagingProvider
class Command(BaseCommand):
help = 'Validate messaging integrations'
class ScheduledJob:
scheduled_time = datetime.time(4, 19)
default_notify_on_success = True
@classmethod
def should_run(self):
return MessagingProvider.objects.filter(active=True).exists()
def handle(self, *args, **options):
s = io.StringIO()
err = False
state = defaultdict(dict)
for provider in MessagingProvider.objects.filter(active=True).order_by('classname'):
impl = get_messaging(provider)
try:
result, out = impl.check_messaging_config(state[provider.classname])
except Exception as e:
result = False
out = "EXCEPTION: {}\n".format(e)
if out:
s.write("{}\n".format(provider.internalname))
s.write("{}\n".format('-' * len(provider.internalname)))
s.write(out)
s.write("\n\n")
if not result:
err = True
if s.tell() != 0:
print(s.getvalue())
if err:
sys.exit(1)
| #
# Script to (optionally) validate all messaging integrations
#
# This can include for example checking that webhooks are still existing,
# and marked as valid. Actual implementation depends on the messaging provider.
#
from django.core.management.base import BaseCommand
import datetime
import io
import sys
from collections import defaultdict
from postgresqleu.util.messaging import get_messaging
from postgresqleu.confreg.models import MessagingProvider
class Command(BaseCommand):
help = 'Validate messaging integrations'
class ScheduledJob:
scheduled_time = datetime.time(4, 19)
default_notify_on_success = True
@classmethod
def should_run(self):
return MessagingProvider.objects.filter(active=True).exists()
def handle(self, *args, **options):
s = io.StringIO()
err = False
state = defaultdict(dict)
for provider in MessagingProvider.objects.filter(active=True).order_by('classname'):
impl = get_messaging(provider)
try:
result, out = impl.check_messaging_config(state[provider.classname])
except Exception as e:
result = False
out = "EXCEPTION: {}\n".format(e)
if out:
s.write("{}\n".format(provider.internalname))
s.write("{}\n".format('-' * len(provider.internalname)))
s.write(out)
s.write("\n\n")
if not result:
err = True
if s.tell() != 0:
print(s.getvalue())
if err:
sys.exit(1) | en | 0.87929 | # # Script to (optionally) validate all messaging integrations # # This can include for example checking that webhooks are still existing, # and marked as valid. Actual implementation depends on the messaging provider. # | 2.125007 | 2 |
Adafruit_Python_MAX31855/setup.py | wadooddaoud/Gast_Iot_Sensor_Development | 41 | 6631084 | <filename>Adafruit_Python_MAX31855/setup.py<gh_stars>10-100
try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_packages
setup(name = 'Adafruit_MAX31855',
version = '1.6.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Library for accessing the MAX31855 thermocouple temperature sensor on a Raspberry Pi or Beaglebone Black.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_MAX31855/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.6.5'],
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages())
| <filename>Adafruit_Python_MAX31855/setup.py<gh_stars>10-100
try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_packages
setup(name = 'Adafruit_MAX31855',
version = '1.6.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Library for accessing the MAX31855 thermocouple temperature sensor on a Raspberry Pi or Beaglebone Black.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_MAX31855/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.6.5'],
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages())
| en | 0.789815 | # Try using ez_setup to install setuptools if not already installed. # Ignore import error and assume Python 3 which already has setuptools. #egg=Adafruit-GPIO-0.6.5'], | 1.786969 | 2 |
ietf/stats/resources.py | ekr/ietfdb | 2 | 6631085 | # Autogenerated by the makeresources management command 2017-02-15 10:10 PST
from tastypie.resources import ModelResource
from tastypie.fields import ToManyField # pyflakes:ignore
from tastypie.constants import ALL, ALL_WITH_RELATIONS # pyflakes:ignore
from tastypie.cache import SimpleCache
from ietf import api
from ietf.api import ToOneField # pyflakes:ignore
from ietf.stats.models import CountryAlias, AffiliationIgnoredEnding, AffiliationAlias, MeetingRegistration
from ietf.name.resources import CountryNameResource
class CountryAliasResource(ModelResource):
country = ToOneField(CountryNameResource, 'country')
class Meta:
queryset = CountryAlias.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'countryalias'
filtering = {
"id": ALL,
"alias": ALL,
"country": ALL_WITH_RELATIONS,
}
api.stats.register(CountryAliasResource())
class AffiliationIgnoredEndingResource(ModelResource):
class Meta:
queryset = AffiliationIgnoredEnding.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'affiliationignoredending'
filtering = {
"id": ALL,
"ending": ALL,
}
api.stats.register(AffiliationIgnoredEndingResource())
class AffiliationAliasResource(ModelResource):
class Meta:
queryset = AffiliationAlias.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'affiliationalias'
filtering = {
"id": ALL,
"alias": ALL,
"name": ALL,
}
api.stats.register(AffiliationAliasResource())
class MeetingRegistrationResource(ModelResource):
class Meta:
queryset = MeetingRegistration.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'meetingregistration'
filtering = {
"id": ALL,
"meeting": ALL_WITH_RELATIONS,
"first_name": ALL,
"last_name": ALL,
"affiliation": ALL,
"country_code": ALL,
"email": ALL,
"person": ALL_WITH_RELATIONS
}
api.stats.register(MeetingRegistrationResource())
| # Autogenerated by the makeresources management command 2017-02-15 10:10 PST
from tastypie.resources import ModelResource
from tastypie.fields import ToManyField # pyflakes:ignore
from tastypie.constants import ALL, ALL_WITH_RELATIONS # pyflakes:ignore
from tastypie.cache import SimpleCache
from ietf import api
from ietf.api import ToOneField # pyflakes:ignore
from ietf.stats.models import CountryAlias, AffiliationIgnoredEnding, AffiliationAlias, MeetingRegistration
from ietf.name.resources import CountryNameResource
class CountryAliasResource(ModelResource):
country = ToOneField(CountryNameResource, 'country')
class Meta:
queryset = CountryAlias.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'countryalias'
filtering = {
"id": ALL,
"alias": ALL,
"country": ALL_WITH_RELATIONS,
}
api.stats.register(CountryAliasResource())
class AffiliationIgnoredEndingResource(ModelResource):
class Meta:
queryset = AffiliationIgnoredEnding.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'affiliationignoredending'
filtering = {
"id": ALL,
"ending": ALL,
}
api.stats.register(AffiliationIgnoredEndingResource())
class AffiliationAliasResource(ModelResource):
class Meta:
queryset = AffiliationAlias.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'affiliationalias'
filtering = {
"id": ALL,
"alias": ALL,
"name": ALL,
}
api.stats.register(AffiliationAliasResource())
class MeetingRegistrationResource(ModelResource):
class Meta:
queryset = MeetingRegistration.objects.all()
serializer = api.Serializer()
cache = SimpleCache()
#resource_name = 'meetingregistration'
filtering = {
"id": ALL,
"meeting": ALL_WITH_RELATIONS,
"first_name": ALL,
"last_name": ALL,
"affiliation": ALL,
"country_code": ALL,
"email": ALL,
"person": ALL_WITH_RELATIONS
}
api.stats.register(MeetingRegistrationResource())
| en | 0.541816 | # Autogenerated by the makeresources management command 2017-02-15 10:10 PST # pyflakes:ignore # pyflakes:ignore # pyflakes:ignore #resource_name = 'countryalias' #resource_name = 'affiliationignoredending' #resource_name = 'affiliationalias' #resource_name = 'meetingregistration' | 1.869038 | 2 |
solver/solver_basic.py | cyclone923/sokoban_pygame | 0 | 6631086 | from game.logic import FLOOR, WALL, WORKER_ON_FLOOR, DOCK, BOX_ON_DOCK, BOX, WORKER_ON_DOCK
from collections import namedtuple
class SokobanSolverBasic(object):
def __init__(self, map):
self.walls = set()
self.playabel = set()
self.docks = set()
self.init_boxes_loc = set()
self.init_worker_loc = None
self.control_mapping = {(1, 0): "DOWN", (-1, 0): "UP", (0, 1): "RIGHT", (0,-1): "LEFT"}
self.Point = namedtuple("Point", ["x", "y"])
for i, row in enumerate(map):
for j, cell in enumerate(row):
if cell == WALL:
self.walls.add(self.Point(i,j))
else:
self.playabel.add(self.Point(i,j))
if cell == WORKER_ON_FLOOR:
assert self.init_worker_loc is None
self.init_worker_loc = (self.Point(i,j))
elif cell == WORKER_ON_DOCK:
assert self.init_worker_loc is None
self.init_worker_loc = (self.Point(i,j))
self.docks.add(self.Point(i,j))
elif cell == DOCK:
self.docks.add(self.Point(i,j))
elif cell == BOX_ON_DOCK:
self.init_boxes_loc.add(self.Point(i,j))
self.docks.add(self.Point(i,j))
elif cell == BOX:
self.init_boxes_loc.add(self.Point(i,j))
else:
assert cell == FLOOR
assert len(self.init_boxes_loc) == len(self.docks)
self.solver = None
def get_one_step_move(self, x, y):
reachabel = [self.Point(x-1,y), self.Point(x,y-1), self.Point(x,y+1), self.Point(x+1,y)]
reachabel = set(filter(lambda point: (point.x, point.y) not in self.walls, reachabel))
return reachabel
def solve_for_one(self):
pass
| from game.logic import FLOOR, WALL, WORKER_ON_FLOOR, DOCK, BOX_ON_DOCK, BOX, WORKER_ON_DOCK
from collections import namedtuple
class SokobanSolverBasic(object):
def __init__(self, map):
self.walls = set()
self.playabel = set()
self.docks = set()
self.init_boxes_loc = set()
self.init_worker_loc = None
self.control_mapping = {(1, 0): "DOWN", (-1, 0): "UP", (0, 1): "RIGHT", (0,-1): "LEFT"}
self.Point = namedtuple("Point", ["x", "y"])
for i, row in enumerate(map):
for j, cell in enumerate(row):
if cell == WALL:
self.walls.add(self.Point(i,j))
else:
self.playabel.add(self.Point(i,j))
if cell == WORKER_ON_FLOOR:
assert self.init_worker_loc is None
self.init_worker_loc = (self.Point(i,j))
elif cell == WORKER_ON_DOCK:
assert self.init_worker_loc is None
self.init_worker_loc = (self.Point(i,j))
self.docks.add(self.Point(i,j))
elif cell == DOCK:
self.docks.add(self.Point(i,j))
elif cell == BOX_ON_DOCK:
self.init_boxes_loc.add(self.Point(i,j))
self.docks.add(self.Point(i,j))
elif cell == BOX:
self.init_boxes_loc.add(self.Point(i,j))
else:
assert cell == FLOOR
assert len(self.init_boxes_loc) == len(self.docks)
self.solver = None
def get_one_step_move(self, x, y):
reachabel = [self.Point(x-1,y), self.Point(x,y-1), self.Point(x,y+1), self.Point(x+1,y)]
reachabel = set(filter(lambda point: (point.x, point.y) not in self.walls, reachabel))
return reachabel
def solve_for_one(self):
pass
| none | 1 | 3.077729 | 3 |
|
scripts/addons/mouselook_navigation/dairin0d/utils_arrangement.py | Tilapiatsu/blender-custom_conf | 2 | 6631087 | <filename>scripts/addons/mouselook_navigation/dairin0d/utils_arrangement.py
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENSE BLOCK *****
import math
import numbers
import itertools
import numpy as np
import bpy
from mathutils import Vector, Matrix, Quaternion, Euler, Color
from .utils_math import divide, ndrange, nan_min, nan_max, replace_nan
from .bounds import Bounds
#============================================================================#
Number = numbers.Number
_axis_name_map = {
"x":0, "X":0,
"y":1, "Y":1,
"z":2, "Z":2,
}
def _map_axes(axes):
for axis in axes:
if not isinstance(axis, str): yield axis
yield _axis_name_map[axis]
"""
TODO:
grid() -> (start indices, end indices)
// non-grid layouts are all-or-nothing and have O(n) or even worse complexity
// rebuilding on each frame is inefficient, but using a cached version would require keeping track of all dependencies
waterfall: pack items into N bins, trying to keep overall size[axis] as even as possible
justified (2d only):
a) pack into row until width is reached, then begin next row
b) pre-sort by bin packing efficiency (?)
atlas packing?
// note: in blender ui, only grid is possible, since template_icon() can only scale uniformly
"""
class Arranger:
_box_synonyms = {'BOX', 'CUBE', 'RECT', 'SQUARE'}
_ball_synonyms = {'BALL', 'SPHERE', 'CIRCLE'}
@classmethod
def arrange(cls, bboxes, axes=None, scale=1.0, fit_size=None, adjust_size=None):
"""
axes can be:
* dict: axis indices/names, and their modes
* iterable: axis indices/names (mode = BOX)
* str: axes mode (axis indices = all)
* None: axis indices = all, mode = BOX
axis modes: MIN, MAX, CENTER/MID, BOX/CUBE/RECT/SQUARE, BALL/SPHERE/CIRCLE
"""
axes, axis_modes = cls.__get_axes_modes(axes)
if not axes: return None, None # no work to be done
init_offset = cls.__get_offset_initializer(axes, axis_modes)
scales = []
offsets = []
cell_size = np.zeros(1)
for bbox in bboxes:
center, size = bbox.center, bbox.abs_size
cell_scale = scale * (1.0 if fit_size is None else divide(fit_size, size[axes].max()))
offset = init_offset(center, size, cell_scale)
scales.append(cell_scale)
offsets.append(offset)
count = len(size)
if len(cell_size) < count: cell_size.resize(count)
cell_size[:count] = np.maximum(cell_size[:count], size * cell_scale)
if not offsets: return offsets, scales
indices_box, indices_ball = cls.__get_shape_indices(axes, axis_modes, cell_size)
if indices_box or indices_ball:
# E.g.: ensure cell is larger than minimum, add a margin, etc.
if adjust_size: cell_size = np.array(adjust_size(cell_size))
if indices_box: cls._arrange_box(indices_box, cell_size, offsets)
if indices_ball: cls._arrange_ball(indices_ball, cell_size, offsets)
return offsets, scales
@classmethod
def __get_axes_modes(cls, axes):
if isinstance(axes, dict):
axes = sorted(axes.items())
axis_modes = [v for k, v in axes if v]
axes = list(_map_axes(k for k, v in axes if v))
elif isinstance(axes, str):
axis_modes = axes
axes = Ellipsis
else:
axis_modes = 'BOX'
axes = (Ellipsis if axes is None else sorted(set(_map_axes(axes))))
return axes, axis_modes
@classmethod
def __get_offset_initializer(cls, axes, axis_modes):
if isinstance(axis_modes, str):
is_min = (axis_modes == 'MIN')
is_max = (axis_modes == 'MAX')
is_mid = not (is_min or is_max)
indices_min = (Ellipsis if is_min else None)
indices_max = (Ellipsis if is_max else None)
indices_mid = (Ellipsis if is_mid else None)
else:
indices_min = []
indices_max = []
indices_mid = []
for axis, axis_mode in zip(axes, axis_modes):
if axis_mode == 'MIN':
indices_min.append(axis)
elif axis_mode == 'MAX':
indices_max.append(axis)
else: # CENTER, BOX, BALL...
indices_mid.append(axis)
use_extents = bool(indices_min or indices_max)
def init_offset(center, size, cell_scale):
offset = np.zeros(len(size))
# In order for this to work properly, world matrix
# needs to be scaled and then translated
scaled_center = (-cell_scale) * center
if indices_mid: offset[indices_mid] += scaled_center[indices_mid]
if use_extents:
scaled_extents = (-cell_scale * 0.5) * size
if indices_min: offset[indices_min] += scaled_center[indices_min] + scaled_extents[indices_min]
if indices_max: offset[indices_max] += scaled_center[indices_max] - scaled_extents[indices_max]
return offset
return init_offset
@classmethod
def __get_shape_indices(cls, axes, axis_modes, cell_size):
indices_box = []
indices_ball = []
if isinstance(axis_modes, str):
if axis_modes in cls._box_synonyms:
indices_box.extend(range(len(cell_size)))
elif axis_modes in cls._ball_synonyms:
indices_ball.extend(range(len(cell_size)))
else:
for axis, axis_mode in zip(axes, axis_modes):
if axis_mode in cls._box_synonyms:
indices_box.append(axis)
elif axis_mode in cls._ball_synonyms:
indices_ball.append(axis)
return indices_box, indices_ball
@classmethod
def _arrange_ball(cls, axes, cell_size, offsets):
cell_count = len(offsets)
sizes = cell_size[axes]
counts = cls._optimal_ball_counts(sizes, cell_count)
candidates = []
for ndindex in ndrange(-counts, counts+1):
offset = ndindex * sizes
candidates.append((np.linalg.norm(offset), tuple(offset)))
candidates.sort() # Note: numpy arrays cannot be sorted (ValueError)
for cell_index in range(cell_count):
offsets[cell_index][axes] += candidates[cell_index][1]
@classmethod
def _optimal_ball_counts(cls, sizes, cell_count):
n = len(sizes) # dimensions
ratios = sizes / sizes.max()
volume = cell_count * np.prod(ratios)
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
coef = math.pi**(n/2) / math.gamma(n/2 + 1)
radius = (volume / coef) ** (1.0 / n)
counts = [(math.ceil(radius / ratio) + 1) for ratio in ratios]
return np.array(counts)
@classmethod
def _arrange_box(cls, axes, cell_size, offsets):
cell_count = len(offsets)
sizes = cell_size[axes]
counts = cls._optimal_box_counts(sizes, cell_count)
offset_bbox = -0.5 * sizes * (counts - 1)
cell_index = 0
for ndindex in ndrange(counts):
offsets[cell_index][axes] += offset_bbox + ndindex * sizes
cell_index += 1
if cell_index >= cell_count: break
@classmethod
def _optimal_box_counts(cls, sizes, cell_count):
axes_count = len(sizes)
if axes_count == 1: return np.array([cell_count])
sizes_indices = sorted(zip(sizes, range(axes_count)))
sorted_sizes = [size for size, i in sizes_indices]
indices = [i for size, i in sizes_indices]
calc_cost = (lambda counts: np.linalg.norm(sorted_sizes * counts))
counts = np.ones(axes_count)
counts[0] = cell_count
cost = calc_cost(counts)
# Search for optimal box sides via gradient descent
while True:
best_counts = counts
best_cost = cost
for i in range(1, axes_count):
new_counts = np.array(counts)
new_counts[i] += 1
new_counts[0] = math.ceil(cell_count / np.prod(new_counts[1:]))
new_cost = calc_cost(new_counts)
if new_cost < best_cost:
best_cost = new_cost
best_counts = new_counts
if best_cost == cost: break
counts = best_counts
cost = best_cost
return counts[indices]
#============================================================================#
__grid_index_funcs = {
'OVERLAP': (lambda v0, v1, dv: (math.floor(v0), math.ceil(v1))),
'INSIDE': (lambda v0, v1, dv: (math.ceil(v0), math.floor(v1))),
'SIZE_FLOOR': (lambda v0, v1, dv: (0, math.floor(dv))),
'SIZE_CEIL': (lambda v0, v1, dv: (0, math.ceil(dv))),
'SIZE_ROUND': (lambda v0, v1, dv: (0, round(dv))),
}
@classmethod
def __grid_indices_parse_modes(cls, modes):
mode_default = 'OVERLAP'
if modes is None:
modes = {}
elif isinstance(modes, dict):
modes = {_axis_name_map.get(k, k): v for k, v in modes.items()}
elif isinstance(modes, str):
mode_default = modes
modes = {}
else:
modes = dict(enumerate(modes))
return modes, mode_default
@classmethod
def __grid_indices_limit(cls, array, limit, func):
if isinstance(limit, Number):
array[:] = [func(v, limit) for v in array]
elif limit is not None:
array[:] = [func(v, vm) for v, vm in zip(array, limit)]
@classmethod
def grid_indices(cls, bbox, cell, modes=None, min_count=None, max_count=None):
modes, mode_default = cls.__grid_indices_parse_modes(modes)
bbox_rel = bbox.relative(cell)
rel_min = bbox_rel.min
rel_max = bbox_rel.max
rel_size = bbox_rel.size
indices = np.zeros((2, bbox_rel.dimension), int)
for axis in range(bbox_rel.dimension):
mode = modes.get(axis, mode_default)
index_func = cls.__grid_index_funcs.get(mode, None)
if not index_func: raise ValueError(f"Unrecognized axis mode {mode} at modes[{axis}]")
indices[:, axis] = index_func(rel_min[axis], rel_max[axis], rel_size[axis])
indices[1] -= indices[0]
cls.__grid_indices_limit(indices[1], min_count, nan_max)
cls.__grid_indices_limit(indices[1], max_count, nan_min)
return indices
@classmethod
def __grid_parse_alignments(cls, alignments, dimension):
if not alignments: alignments = 'ABS'
if isinstance(alignments, str):
alignments = [alignments] * dimension
elif len(alignments) < dimension:
alignments = list(alignments)
alignments.extend(['ABS'] * (dimension - len(alignments)))
return alignments
@classmethod
def __grid_parse_flat(cls, flat):
if isinstance(flat, Number) and not isinstance(flat, bool):
return (0, flat) # bool is a Number in python
if hasattr(flat, "__len__"):
n = len(flat)
flat_min = (flat[0] if n > 0 else None)
flat_max = (flat[1] if n > 1 else None)
if flat_min is None: flat_min = -math.inf
if flat_max is None: flat_max = math.inf
return (flat_min, flat_max)
return (-math.inf, math.inf)
@classmethod
def __grid_parse_sorting(cls, sorting, dimension):
if (sorting is None) or (len(sorting) == 0):
return [(i, False) for i in range(dimension)]
result = []
axes = set()
for sort_item in sorting:
invert = False
if isinstance(sort_item, int):
axis = sort_item
elif isinstance(sort_item, str):
axis = _axis_name_map.get(sort_item, None)
if not axis: continue
elif len(sort_item) == 0:
continue
else:
axis = sort_item[0]
if isinstance(axis, str):
axis = _axis_name_map.get(axis, None)
if not axis: continue
invert = (len(sort_item) > 1) and (sort_item[1] < 0)
if (axis < 0) or (axis >= dimension): continue
if axis in axes: continue
axes.add(axis)
result.append((axis, invert))
return result or [(i, False) for i in range(dimension)]
@classmethod
def __grid_index_converters(cls, sorting, indices_count):
dimension = len(indices_count)
sort_i_max = len(sorting) - 1
stride = 1
index_scale = np.zeros(dimension, int)
index_offset = 0
nd_mods = np.ones(dimension, int)
nd_divs = np.ones(dimension, int)
nd_invs = np.zeros(dimension, bool)
last_axis = -1
for sort_i, sort_item in enumerate(sorting):
axis, invert = sort_item
index_scale[axis] = (-1 if invert else 1) * stride
if invert and (sort_i < sort_i_max):
index_offset += (indices_count[axis] - 1) * stride
nd_mods[axis] = indices_count[axis]
nd_divs[axis] = stride
nd_invs[axis] = invert
last_axis = axis
stride *= indices_count[axis]
nd_mods1 = nd_mods - 1
dot = np.dot
def to_flat(ndindex, use_invert=True):
ndindex = np.array(ndindex)
for axis in range(dimension):
if axis != last_axis:
ndindex[axis] = max(min(ndindex[axis], indices_count[axis]-1), 0)
if use_invert: return dot(ndindex, index_scale) + index_offset
return dot(ndindex, np.abs(index_scale))
def to_ndim(index, use_invert=True):
ndindex = index // nd_divs
for axis in range(dimension):
if axis != last_axis:
if use_invert and nd_invs[axis]:
ndindex[axis] = nd_mods1[axis] - (ndindex[axis] % nd_mods[axis])
else:
ndindex[axis] %= nd_mods[axis]
elif use_invert and nd_invs[axis]:
ndindex[axis] = -ndindex[axis]
return ndindex
return to_flat, to_ndim
@classmethod
def grid(cls, bbox, cell, indices, alignments=None, flat=None, sorting=None):
# alignment(s): ABS, MIN/LEFT, MAX/RIGHT, CENTER, JUSTIFY/DISTRIBUTE
# flat is None: n-dimensional index; otherwise, scalar index (grow along the sorting[-1] axis)
# flat is not None: True (-inf, inf); int (0, count); tuple (start, count)
# sorting: [(axis, direction), ...]
if (indices is None) or isinstance(indices, (dict, str)):
indices = cls.grid_indices(cell, bbox, modes=indices)
indices_min, indices_count = indices
grid_info = dict(bbox=bbox, indices=indices)
grid_info.update(alignments=alignments, flat=flat, sorting=sorting)
if flat is None:
def index_iterator():
for ndindex in ndrange(indices_count):
ndindex += indices_min
yield ndindex, ndindex
else:
index_min, index_max, to_flat, to_ndim = cls._grid_flat_prepare(flat, sorting, indices)
grid_info.update(index_min=index_min, index_max=index_max, to_flat=to_flat, to_ndim=to_ndim)
def index_iterator():
for index in range(index_min, index_max):
ndindex = to_ndim(index, True)
yield index, ndindex
if isinstance(cell, Bounds):
cell_min, cell_size = cell.min, cell.size
else:
cell_min, cell_size = np.zeros(len(cell)), np.asarray(cell)
pos_min, pos_step = cls._grid_calc_pos(cell_min, cell_size, bbox, indices, alignments)
grid_info.update(cell_min=cell_min, cell_size=cell_size, pos_min=pos_min, pos_step=pos_step)
def ndim_to_bounds(ndindex):
return Bounds.MinSize(pos_min + ndindex * pos_step, cell_size)
def point_to_ndim(point):
return np.floor((np.asarray(point) - pos_min) / pos_step).astype(int)
def grid_iterator():
for index, ndindex in index_iterator():
yield index, Bounds.MinSize(pos_min + ndindex * pos_step, cell_size)
grid_info.update(ndim_to_bounds=ndim_to_bounds, point_to_ndim=point_to_ndim, iterator=grid_iterator)
return grid_info
@classmethod
def _grid_flat_prepare(cls, flat, sorting, indices):
indices_min, indices_count = indices
dimension = len(indices_count)
flat_min, flat_count = cls.__grid_parse_flat(flat)
sorting = cls.__grid_parse_sorting(sorting, dimension)
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
flat_max = flat_min + flat_count
index_min = to_flat(indices_min, False)
index_max = to_flat(indices_min + indices_count - 1, False) + 1
# Convert to float when comparing so that there won't be
# "RuntimeWarning: invalid value encountered in greater"
if flat_min > float(index_min): index_min = int(flat_min)
if flat_max < float(index_max): index_max = int(flat_max)
if index_min < index_max:
limits = to_ndim(index_max - 1, False) - to_ndim(index_min, False) + 1
indices_count = np.array(indices_count) # make a copy, just in case
update_ndim = False
for axis, invert in reversed(sorting):
indices_count[axis] = limits[axis]
if limits[axis] > 1: break
update_ndim = True
if update_ndim:
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
return index_min, index_max, to_flat, to_ndim
@classmethod
def _grid_calc_pos(cls, cell_min, cell_size, bbox, indices, alignments):
indices_min, indices_count = indices
dimension = len(indices_count)
alignments = cls.__grid_parse_alignments(alignments, dimension)
size_diff = bbox.size - cell_size * indices_count
bbox_min = bbox.min - cell_size * indices_min
bbox_max = bbox_min + size_diff
bbox_mid = bbox_min + size_diff * 0.5
just_size = cell_size + size_diff / indices_count
just_min = bbox_min + (just_size - cell_size) * 0.5
pos_min, pos_step = np.zeros((2, dimension))
for axis, alignment in enumerate(alignments):
if alignment == 'ABS':
pos_min[axis] = cell_min[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'MIN':
pos_min[axis] = bbox_min[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'MAX':
pos_min[axis] = bbox_max[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'CENTER':
pos_min[axis] = bbox_mid[axis]
pos_step[axis] = cell_size[axis]
else: # JUSTIFY / DISTRIBUTE
pos_min[axis] = just_min[axis]
pos_step[axis] = just_size[axis]
return pos_min, pos_step
@classmethod
def grid_size(cls, indices, cell, padding=None, flat=None, sorting=None):
indices_min, indices_count = indices
dimension = len(indices_count)
if flat is not None:
flat_min, flat_count = cls.__grid_parse_flat(flat)
sorting = cls.__grid_parse_sorting(sorting, dimension)
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
index_min, index_max = flat_min, flat_min + flat_count
if index_min < index_max:
limits = to_ndim(index_max - 1, False) - to_ndim(index_min, False) + 1
indices_count = np.array(indices_count) # make a copy, just in case
for axis, invert in reversed(sorting):
indices_count[axis] = limits[axis]
if limits[axis] > 1: break
if padding is None:
padding = np.zeros(dimension)
elif isinstance(padding, Number):
padding = np.full(dimension, padding)
cell_size = (cell.size if isinstance(cell, Bounds) else np.asarray(cell))
return cell_size * indices_count + padding * np.maximum(indices_count - 1, 0)
| <filename>scripts/addons/mouselook_navigation/dairin0d/utils_arrangement.py
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENSE BLOCK *****
import math
import numbers
import itertools
import numpy as np
import bpy
from mathutils import Vector, Matrix, Quaternion, Euler, Color
from .utils_math import divide, ndrange, nan_min, nan_max, replace_nan
from .bounds import Bounds
#============================================================================#
Number = numbers.Number
_axis_name_map = {
"x":0, "X":0,
"y":1, "Y":1,
"z":2, "Z":2,
}
def _map_axes(axes):
for axis in axes:
if not isinstance(axis, str): yield axis
yield _axis_name_map[axis]
"""
TODO:
grid() -> (start indices, end indices)
// non-grid layouts are all-or-nothing and have O(n) or even worse complexity
// rebuilding on each frame is inefficient, but using a cached version would require keeping track of all dependencies
waterfall: pack items into N bins, trying to keep overall size[axis] as even as possible
justified (2d only):
a) pack into row until width is reached, then begin next row
b) pre-sort by bin packing efficiency (?)
atlas packing?
// note: in blender ui, only grid is possible, since template_icon() can only scale uniformly
"""
class Arranger:
_box_synonyms = {'BOX', 'CUBE', 'RECT', 'SQUARE'}
_ball_synonyms = {'BALL', 'SPHERE', 'CIRCLE'}
@classmethod
def arrange(cls, bboxes, axes=None, scale=1.0, fit_size=None, adjust_size=None):
"""
axes can be:
* dict: axis indices/names, and their modes
* iterable: axis indices/names (mode = BOX)
* str: axes mode (axis indices = all)
* None: axis indices = all, mode = BOX
axis modes: MIN, MAX, CENTER/MID, BOX/CUBE/RECT/SQUARE, BALL/SPHERE/CIRCLE
"""
axes, axis_modes = cls.__get_axes_modes(axes)
if not axes: return None, None # no work to be done
init_offset = cls.__get_offset_initializer(axes, axis_modes)
scales = []
offsets = []
cell_size = np.zeros(1)
for bbox in bboxes:
center, size = bbox.center, bbox.abs_size
cell_scale = scale * (1.0 if fit_size is None else divide(fit_size, size[axes].max()))
offset = init_offset(center, size, cell_scale)
scales.append(cell_scale)
offsets.append(offset)
count = len(size)
if len(cell_size) < count: cell_size.resize(count)
cell_size[:count] = np.maximum(cell_size[:count], size * cell_scale)
if not offsets: return offsets, scales
indices_box, indices_ball = cls.__get_shape_indices(axes, axis_modes, cell_size)
if indices_box or indices_ball:
# E.g.: ensure cell is larger than minimum, add a margin, etc.
if adjust_size: cell_size = np.array(adjust_size(cell_size))
if indices_box: cls._arrange_box(indices_box, cell_size, offsets)
if indices_ball: cls._arrange_ball(indices_ball, cell_size, offsets)
return offsets, scales
@classmethod
def __get_axes_modes(cls, axes):
if isinstance(axes, dict):
axes = sorted(axes.items())
axis_modes = [v for k, v in axes if v]
axes = list(_map_axes(k for k, v in axes if v))
elif isinstance(axes, str):
axis_modes = axes
axes = Ellipsis
else:
axis_modes = 'BOX'
axes = (Ellipsis if axes is None else sorted(set(_map_axes(axes))))
return axes, axis_modes
@classmethod
def __get_offset_initializer(cls, axes, axis_modes):
if isinstance(axis_modes, str):
is_min = (axis_modes == 'MIN')
is_max = (axis_modes == 'MAX')
is_mid = not (is_min or is_max)
indices_min = (Ellipsis if is_min else None)
indices_max = (Ellipsis if is_max else None)
indices_mid = (Ellipsis if is_mid else None)
else:
indices_min = []
indices_max = []
indices_mid = []
for axis, axis_mode in zip(axes, axis_modes):
if axis_mode == 'MIN':
indices_min.append(axis)
elif axis_mode == 'MAX':
indices_max.append(axis)
else: # CENTER, BOX, BALL...
indices_mid.append(axis)
use_extents = bool(indices_min or indices_max)
def init_offset(center, size, cell_scale):
offset = np.zeros(len(size))
# In order for this to work properly, world matrix
# needs to be scaled and then translated
scaled_center = (-cell_scale) * center
if indices_mid: offset[indices_mid] += scaled_center[indices_mid]
if use_extents:
scaled_extents = (-cell_scale * 0.5) * size
if indices_min: offset[indices_min] += scaled_center[indices_min] + scaled_extents[indices_min]
if indices_max: offset[indices_max] += scaled_center[indices_max] - scaled_extents[indices_max]
return offset
return init_offset
@classmethod
def __get_shape_indices(cls, axes, axis_modes, cell_size):
indices_box = []
indices_ball = []
if isinstance(axis_modes, str):
if axis_modes in cls._box_synonyms:
indices_box.extend(range(len(cell_size)))
elif axis_modes in cls._ball_synonyms:
indices_ball.extend(range(len(cell_size)))
else:
for axis, axis_mode in zip(axes, axis_modes):
if axis_mode in cls._box_synonyms:
indices_box.append(axis)
elif axis_mode in cls._ball_synonyms:
indices_ball.append(axis)
return indices_box, indices_ball
@classmethod
def _arrange_ball(cls, axes, cell_size, offsets):
cell_count = len(offsets)
sizes = cell_size[axes]
counts = cls._optimal_ball_counts(sizes, cell_count)
candidates = []
for ndindex in ndrange(-counts, counts+1):
offset = ndindex * sizes
candidates.append((np.linalg.norm(offset), tuple(offset)))
candidates.sort() # Note: numpy arrays cannot be sorted (ValueError)
for cell_index in range(cell_count):
offsets[cell_index][axes] += candidates[cell_index][1]
@classmethod
def _optimal_ball_counts(cls, sizes, cell_count):
n = len(sizes) # dimensions
ratios = sizes / sizes.max()
volume = cell_count * np.prod(ratios)
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
coef = math.pi**(n/2) / math.gamma(n/2 + 1)
radius = (volume / coef) ** (1.0 / n)
counts = [(math.ceil(radius / ratio) + 1) for ratio in ratios]
return np.array(counts)
@classmethod
def _arrange_box(cls, axes, cell_size, offsets):
cell_count = len(offsets)
sizes = cell_size[axes]
counts = cls._optimal_box_counts(sizes, cell_count)
offset_bbox = -0.5 * sizes * (counts - 1)
cell_index = 0
for ndindex in ndrange(counts):
offsets[cell_index][axes] += offset_bbox + ndindex * sizes
cell_index += 1
if cell_index >= cell_count: break
@classmethod
def _optimal_box_counts(cls, sizes, cell_count):
axes_count = len(sizes)
if axes_count == 1: return np.array([cell_count])
sizes_indices = sorted(zip(sizes, range(axes_count)))
sorted_sizes = [size for size, i in sizes_indices]
indices = [i for size, i in sizes_indices]
calc_cost = (lambda counts: np.linalg.norm(sorted_sizes * counts))
counts = np.ones(axes_count)
counts[0] = cell_count
cost = calc_cost(counts)
# Search for optimal box sides via gradient descent
while True:
best_counts = counts
best_cost = cost
for i in range(1, axes_count):
new_counts = np.array(counts)
new_counts[i] += 1
new_counts[0] = math.ceil(cell_count / np.prod(new_counts[1:]))
new_cost = calc_cost(new_counts)
if new_cost < best_cost:
best_cost = new_cost
best_counts = new_counts
if best_cost == cost: break
counts = best_counts
cost = best_cost
return counts[indices]
#============================================================================#
__grid_index_funcs = {
'OVERLAP': (lambda v0, v1, dv: (math.floor(v0), math.ceil(v1))),
'INSIDE': (lambda v0, v1, dv: (math.ceil(v0), math.floor(v1))),
'SIZE_FLOOR': (lambda v0, v1, dv: (0, math.floor(dv))),
'SIZE_CEIL': (lambda v0, v1, dv: (0, math.ceil(dv))),
'SIZE_ROUND': (lambda v0, v1, dv: (0, round(dv))),
}
@classmethod
def __grid_indices_parse_modes(cls, modes):
mode_default = 'OVERLAP'
if modes is None:
modes = {}
elif isinstance(modes, dict):
modes = {_axis_name_map.get(k, k): v for k, v in modes.items()}
elif isinstance(modes, str):
mode_default = modes
modes = {}
else:
modes = dict(enumerate(modes))
return modes, mode_default
@classmethod
def __grid_indices_limit(cls, array, limit, func):
if isinstance(limit, Number):
array[:] = [func(v, limit) for v in array]
elif limit is not None:
array[:] = [func(v, vm) for v, vm in zip(array, limit)]
@classmethod
def grid_indices(cls, bbox, cell, modes=None, min_count=None, max_count=None):
modes, mode_default = cls.__grid_indices_parse_modes(modes)
bbox_rel = bbox.relative(cell)
rel_min = bbox_rel.min
rel_max = bbox_rel.max
rel_size = bbox_rel.size
indices = np.zeros((2, bbox_rel.dimension), int)
for axis in range(bbox_rel.dimension):
mode = modes.get(axis, mode_default)
index_func = cls.__grid_index_funcs.get(mode, None)
if not index_func: raise ValueError(f"Unrecognized axis mode {mode} at modes[{axis}]")
indices[:, axis] = index_func(rel_min[axis], rel_max[axis], rel_size[axis])
indices[1] -= indices[0]
cls.__grid_indices_limit(indices[1], min_count, nan_max)
cls.__grid_indices_limit(indices[1], max_count, nan_min)
return indices
@classmethod
def __grid_parse_alignments(cls, alignments, dimension):
if not alignments: alignments = 'ABS'
if isinstance(alignments, str):
alignments = [alignments] * dimension
elif len(alignments) < dimension:
alignments = list(alignments)
alignments.extend(['ABS'] * (dimension - len(alignments)))
return alignments
@classmethod
def __grid_parse_flat(cls, flat):
if isinstance(flat, Number) and not isinstance(flat, bool):
return (0, flat) # bool is a Number in python
if hasattr(flat, "__len__"):
n = len(flat)
flat_min = (flat[0] if n > 0 else None)
flat_max = (flat[1] if n > 1 else None)
if flat_min is None: flat_min = -math.inf
if flat_max is None: flat_max = math.inf
return (flat_min, flat_max)
return (-math.inf, math.inf)
@classmethod
def __grid_parse_sorting(cls, sorting, dimension):
if (sorting is None) or (len(sorting) == 0):
return [(i, False) for i in range(dimension)]
result = []
axes = set()
for sort_item in sorting:
invert = False
if isinstance(sort_item, int):
axis = sort_item
elif isinstance(sort_item, str):
axis = _axis_name_map.get(sort_item, None)
if not axis: continue
elif len(sort_item) == 0:
continue
else:
axis = sort_item[0]
if isinstance(axis, str):
axis = _axis_name_map.get(axis, None)
if not axis: continue
invert = (len(sort_item) > 1) and (sort_item[1] < 0)
if (axis < 0) or (axis >= dimension): continue
if axis in axes: continue
axes.add(axis)
result.append((axis, invert))
return result or [(i, False) for i in range(dimension)]
@classmethod
def __grid_index_converters(cls, sorting, indices_count):
dimension = len(indices_count)
sort_i_max = len(sorting) - 1
stride = 1
index_scale = np.zeros(dimension, int)
index_offset = 0
nd_mods = np.ones(dimension, int)
nd_divs = np.ones(dimension, int)
nd_invs = np.zeros(dimension, bool)
last_axis = -1
for sort_i, sort_item in enumerate(sorting):
axis, invert = sort_item
index_scale[axis] = (-1 if invert else 1) * stride
if invert and (sort_i < sort_i_max):
index_offset += (indices_count[axis] - 1) * stride
nd_mods[axis] = indices_count[axis]
nd_divs[axis] = stride
nd_invs[axis] = invert
last_axis = axis
stride *= indices_count[axis]
nd_mods1 = nd_mods - 1
dot = np.dot
def to_flat(ndindex, use_invert=True):
ndindex = np.array(ndindex)
for axis in range(dimension):
if axis != last_axis:
ndindex[axis] = max(min(ndindex[axis], indices_count[axis]-1), 0)
if use_invert: return dot(ndindex, index_scale) + index_offset
return dot(ndindex, np.abs(index_scale))
def to_ndim(index, use_invert=True):
ndindex = index // nd_divs
for axis in range(dimension):
if axis != last_axis:
if use_invert and nd_invs[axis]:
ndindex[axis] = nd_mods1[axis] - (ndindex[axis] % nd_mods[axis])
else:
ndindex[axis] %= nd_mods[axis]
elif use_invert and nd_invs[axis]:
ndindex[axis] = -ndindex[axis]
return ndindex
return to_flat, to_ndim
@classmethod
def grid(cls, bbox, cell, indices, alignments=None, flat=None, sorting=None):
# alignment(s): ABS, MIN/LEFT, MAX/RIGHT, CENTER, JUSTIFY/DISTRIBUTE
# flat is None: n-dimensional index; otherwise, scalar index (grow along the sorting[-1] axis)
# flat is not None: True (-inf, inf); int (0, count); tuple (start, count)
# sorting: [(axis, direction), ...]
if (indices is None) or isinstance(indices, (dict, str)):
indices = cls.grid_indices(cell, bbox, modes=indices)
indices_min, indices_count = indices
grid_info = dict(bbox=bbox, indices=indices)
grid_info.update(alignments=alignments, flat=flat, sorting=sorting)
if flat is None:
def index_iterator():
for ndindex in ndrange(indices_count):
ndindex += indices_min
yield ndindex, ndindex
else:
index_min, index_max, to_flat, to_ndim = cls._grid_flat_prepare(flat, sorting, indices)
grid_info.update(index_min=index_min, index_max=index_max, to_flat=to_flat, to_ndim=to_ndim)
def index_iterator():
for index in range(index_min, index_max):
ndindex = to_ndim(index, True)
yield index, ndindex
if isinstance(cell, Bounds):
cell_min, cell_size = cell.min, cell.size
else:
cell_min, cell_size = np.zeros(len(cell)), np.asarray(cell)
pos_min, pos_step = cls._grid_calc_pos(cell_min, cell_size, bbox, indices, alignments)
grid_info.update(cell_min=cell_min, cell_size=cell_size, pos_min=pos_min, pos_step=pos_step)
def ndim_to_bounds(ndindex):
return Bounds.MinSize(pos_min + ndindex * pos_step, cell_size)
def point_to_ndim(point):
return np.floor((np.asarray(point) - pos_min) / pos_step).astype(int)
def grid_iterator():
for index, ndindex in index_iterator():
yield index, Bounds.MinSize(pos_min + ndindex * pos_step, cell_size)
grid_info.update(ndim_to_bounds=ndim_to_bounds, point_to_ndim=point_to_ndim, iterator=grid_iterator)
return grid_info
@classmethod
def _grid_flat_prepare(cls, flat, sorting, indices):
indices_min, indices_count = indices
dimension = len(indices_count)
flat_min, flat_count = cls.__grid_parse_flat(flat)
sorting = cls.__grid_parse_sorting(sorting, dimension)
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
flat_max = flat_min + flat_count
index_min = to_flat(indices_min, False)
index_max = to_flat(indices_min + indices_count - 1, False) + 1
# Convert to float when comparing so that there won't be
# "RuntimeWarning: invalid value encountered in greater"
if flat_min > float(index_min): index_min = int(flat_min)
if flat_max < float(index_max): index_max = int(flat_max)
if index_min < index_max:
limits = to_ndim(index_max - 1, False) - to_ndim(index_min, False) + 1
indices_count = np.array(indices_count) # make a copy, just in case
update_ndim = False
for axis, invert in reversed(sorting):
indices_count[axis] = limits[axis]
if limits[axis] > 1: break
update_ndim = True
if update_ndim:
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
return index_min, index_max, to_flat, to_ndim
@classmethod
def _grid_calc_pos(cls, cell_min, cell_size, bbox, indices, alignments):
indices_min, indices_count = indices
dimension = len(indices_count)
alignments = cls.__grid_parse_alignments(alignments, dimension)
size_diff = bbox.size - cell_size * indices_count
bbox_min = bbox.min - cell_size * indices_min
bbox_max = bbox_min + size_diff
bbox_mid = bbox_min + size_diff * 0.5
just_size = cell_size + size_diff / indices_count
just_min = bbox_min + (just_size - cell_size) * 0.5
pos_min, pos_step = np.zeros((2, dimension))
for axis, alignment in enumerate(alignments):
if alignment == 'ABS':
pos_min[axis] = cell_min[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'MIN':
pos_min[axis] = bbox_min[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'MAX':
pos_min[axis] = bbox_max[axis]
pos_step[axis] = cell_size[axis]
elif alignment == 'CENTER':
pos_min[axis] = bbox_mid[axis]
pos_step[axis] = cell_size[axis]
else: # JUSTIFY / DISTRIBUTE
pos_min[axis] = just_min[axis]
pos_step[axis] = just_size[axis]
return pos_min, pos_step
@classmethod
def grid_size(cls, indices, cell, padding=None, flat=None, sorting=None):
indices_min, indices_count = indices
dimension = len(indices_count)
if flat is not None:
flat_min, flat_count = cls.__grid_parse_flat(flat)
sorting = cls.__grid_parse_sorting(sorting, dimension)
to_flat, to_ndim = cls.__grid_index_converters(sorting, indices_count)
index_min, index_max = flat_min, flat_min + flat_count
if index_min < index_max:
limits = to_ndim(index_max - 1, False) - to_ndim(index_min, False) + 1
indices_count = np.array(indices_count) # make a copy, just in case
for axis, invert in reversed(sorting):
indices_count[axis] = limits[axis]
if limits[axis] > 1: break
if padding is None:
padding = np.zeros(dimension)
elif isinstance(padding, Number):
padding = np.full(dimension, padding)
cell_size = (cell.size if isinstance(cell, Bounds) else np.asarray(cell))
return cell_size * indices_count + padding * np.maximum(indices_count - 1, 0)
| en | 0.806342 | # ***** BEGIN GPL LICENSE BLOCK ***** # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ***** END GPL LICENSE BLOCK ***** #============================================================================# TODO: grid() -> (start indices, end indices) // non-grid layouts are all-or-nothing and have O(n) or even worse complexity // rebuilding on each frame is inefficient, but using a cached version would require keeping track of all dependencies waterfall: pack items into N bins, trying to keep overall size[axis] as even as possible justified (2d only): a) pack into row until width is reached, then begin next row b) pre-sort by bin packing efficiency (?) atlas packing? // note: in blender ui, only grid is possible, since template_icon() can only scale uniformly axes can be: * dict: axis indices/names, and their modes * iterable: axis indices/names (mode = BOX) * str: axes mode (axis indices = all) * None: axis indices = all, mode = BOX axis modes: MIN, MAX, CENTER/MID, BOX/CUBE/RECT/SQUARE, BALL/SPHERE/CIRCLE # no work to be done # E.g.: ensure cell is larger than minimum, add a margin, etc. # CENTER, BOX, BALL... # In order for this to work properly, world matrix # needs to be scaled and then translated # Note: numpy arrays cannot be sorted (ValueError) # dimensions # https://en.wikipedia.org/wiki/Volume_of_an_n-ball # Search for optimal box sides via gradient descent #============================================================================# # bool is a Number in python # alignment(s): ABS, MIN/LEFT, MAX/RIGHT, CENTER, JUSTIFY/DISTRIBUTE # flat is None: n-dimensional index; otherwise, scalar index (grow along the sorting[-1] axis) # flat is not None: True (-inf, inf); int (0, count); tuple (start, count) # sorting: [(axis, direction), ...] # Convert to float when comparing so that there won't be # "RuntimeWarning: invalid value encountered in greater" # make a copy, just in case # JUSTIFY / DISTRIBUTE # make a copy, just in case | 1.962651 | 2 |
PaddleCV/rcnn/models/model_builder.py | suoych/models | 0 | 6631088 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
from paddle.fluid.initializer import Normal
from paddle.fluid.initializer import MSRA
from paddle.fluid.regularizer import L2Decay
from config import cfg
class RCNN(object):
def __init__(self,
add_conv_body_func=None,
add_roi_box_head_func=None,
mode='train',
use_pyreader=True,
use_random=True):
self.add_conv_body_func = add_conv_body_func
self.add_roi_box_head_func = add_roi_box_head_func
self.mode = mode
self.use_pyreader = use_pyreader
self.use_random = use_random
self.checkpoints = []
def build_model(self, image_shape):
self.build_input(image_shape)
body_conv = self.add_conv_body_func(self.image)
# RPN
self.rpn_heads(body_conv)
# Fast RCNN
self.fast_rcnn_heads(body_conv)
if self.mode != 'train':
self.eval_bbox()
# Mask RCNN
if cfg.MASK_ON:
self.mask_rcnn_heads(body_conv)
def loss(self):
losses = []
# Fast RCNN loss
loss_cls, loss_bbox = self.fast_rcnn_loss()
# RPN loss
rpn_cls_loss, rpn_reg_loss = self.rpn_loss()
losses = [loss_cls, loss_bbox, rpn_cls_loss, rpn_reg_loss]
rkeys = ['loss', 'loss_cls', 'loss_bbox', \
'loss_rpn_cls', 'loss_rpn_bbox',]
if cfg.MASK_ON:
loss_mask = self.mask_rcnn_loss()
losses = losses + [loss_mask]
rkeys = rkeys + ["loss_mask"]
loss = fluid.layers.sum(losses)
rloss = [loss] + losses
return rloss, rkeys
def eval_mask_out(self):
return self.mask_fcn_logits
def eval_bbox_out(self):
return self.pred_result
def build_input(self, image_shape):
if self.use_pyreader:
in_shapes = [[-1] + image_shape, [-1, 4], [-1, 1], [-1, 1],
[-1, 3], [-1, 1]]
lod_levels = [0, 1, 1, 1, 0, 0]
dtypes = [
'float32', 'float32', 'int32', 'int32', 'float32', 'int64'
]
if cfg.MASK_ON:
in_shapes.append([-1, 2])
lod_levels.append(3)
dtypes.append('float32')
self.py_reader = fluid.layers.py_reader(
capacity=64,
shapes=in_shapes,
lod_levels=lod_levels,
dtypes=dtypes,
use_double_buffer=True)
ins = fluid.layers.read_file(self.py_reader)
self.image = ins[0]
self.gt_box = ins[1]
self.gt_label = ins[2]
self.is_crowd = ins[3]
self.im_info = ins[4]
self.im_id = ins[5]
if cfg.MASK_ON:
self.gt_masks = ins[6]
else:
self.image = fluid.layers.data(
name='image', shape=image_shape, dtype='float32')
self.gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
self.gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
self.is_crowd = fluid.layers.data(
name='is_crowd', shape=[1], dtype='int32', lod_level=1)
self.im_info = fluid.layers.data(
name='im_info', shape=[3], dtype='float32')
self.im_id = fluid.layers.data(
name='im_id', shape=[1], dtype='int64')
if cfg.MASK_ON:
self.gt_masks = fluid.layers.data(
name='gt_masks', shape=[2], dtype='float32', lod_level=3)
def feeds(self):
if self.mode == 'infer':
return [self.image, self.im_info]
if self.mode == 'val':
return [self.image, self.im_info, self.im_id]
if not cfg.MASK_ON:
return [
self.image, self.gt_box, self.gt_label, self.is_crowd,
self.im_info, self.im_id
]
return [
self.image, self.gt_box, self.gt_label, self.is_crowd, self.im_info,
self.im_id, self.gt_masks
]
def eval_bbox(self):
self.im_scale = fluid.layers.slice(
self.im_info, [1], starts=[2], ends=[3])
im_scale_lod = fluid.layers.sequence_expand(self.im_scale,
self.rpn_rois)
boxes = self.rpn_rois / im_scale_lod
cls_prob = fluid.layers.softmax(self.cls_score, use_cudnn=False)
bbox_pred_reshape = fluid.layers.reshape(self.bbox_pred,
(-1, cfg.class_num, 4))
decoded_box = fluid.layers.box_coder(
prior_box=boxes,
prior_box_var=cfg.bbox_reg_weights,
target_box=bbox_pred_reshape,
code_type='decode_center_size',
box_normalized=False,
axis=1)
cliped_box = fluid.layers.box_clip(
input=decoded_box, im_info=self.im_info)
self.pred_result = fluid.layers.multiclass_nms(
bboxes=cliped_box,
scores=cls_prob,
score_threshold=cfg.TEST.score_thresh,
nms_top_k=-1,
nms_threshold=cfg.TEST.nms_thresh,
keep_top_k=cfg.TEST.detections_per_im,
normalized=False)
def rpn_heads(self, rpn_input):
# RPN hidden representation
dim_out = rpn_input.shape[1]
rpn_conv = fluid.layers.conv2d(
input=rpn_input,
num_filters=dim_out,
filter_size=3,
stride=1,
padding=1,
act='relu',
name='conv_rpn',
param_attr=ParamAttr(
name="conv_rpn_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="conv_rpn_b", learning_rate=2., regularizer=L2Decay(0.)))
self.checkpoints.append(rpn_conv)
self.anchor, self.var = fluid.layers.anchor_generator(
input=rpn_conv,
anchor_sizes=cfg.anchor_sizes,
aspect_ratios=cfg.aspect_ratio,
variance=cfg.variances,
stride=cfg.rpn_stride)
num_anchor = self.anchor.shape[2]
# Proposal classification scores
self.rpn_cls_score = fluid.layers.conv2d(
rpn_conv,
num_filters=num_anchor,
filter_size=1,
stride=1,
padding=0,
act=None,
name='rpn_cls_score',
param_attr=ParamAttr(
name="rpn_cls_logits_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="rpn_cls_logits_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
self.checkpoints.append(self.rpn_cls_score)
# Proposal bbox regression deltas
self.rpn_bbox_pred = fluid.layers.conv2d(
rpn_conv,
num_filters=4 * num_anchor,
filter_size=1,
stride=1,
padding=0,
act=None,
name='rpn_bbox_pred',
param_attr=ParamAttr(
name="rpn_bbox_pred_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="rpn_bbox_pred_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
self.checkpoints.append(self.rpn_bbox_pred)
rpn_cls_score_prob = fluid.layers.sigmoid(
self.rpn_cls_score, name='rpn_cls_score_prob')
self.checkpoints.append(rpn_cls_score_prob)
param_obj = cfg.TRAIN if self.mode == 'train' else cfg.TEST
pre_nms_top_n = param_obj.rpn_pre_nms_top_n
post_nms_top_n = param_obj.rpn_post_nms_top_n
nms_thresh = param_obj.rpn_nms_thresh
min_size = param_obj.rpn_min_size
eta = param_obj.rpn_eta
self.rpn_rois, self.rpn_roi_probs = fluid.layers.generate_proposals(
scores=rpn_cls_score_prob,
bbox_deltas=self.rpn_bbox_pred,
im_info=self.im_info,
anchors=self.anchor,
variances=self.var,
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
eta=eta)
if self.mode == 'train':
outs = fluid.layers.generate_proposal_labels(
rpn_rois=self.rpn_rois,
gt_classes=self.gt_label,
is_crowd=self.is_crowd,
gt_boxes=self.gt_box,
im_info=self.im_info,
batch_size_per_im=cfg.TRAIN.batch_size_per_im,
fg_fraction=cfg.TRAIN.fg_fractrion,
fg_thresh=cfg.TRAIN.fg_thresh,
bg_thresh_hi=cfg.TRAIN.bg_thresh_hi,
bg_thresh_lo=cfg.TRAIN.bg_thresh_lo,
bbox_reg_weights=cfg.bbox_reg_weights,
class_nums=cfg.class_num,
use_random=self.use_random)
self.rois = outs[0]
self.labels_int32 = outs[1]
self.bbox_targets = outs[2]
self.bbox_inside_weights = outs[3]
self.bbox_outside_weights = outs[4]
if cfg.MASK_ON:
mask_out = fluid.layers.generate_mask_labels(
im_info=self.im_info,
gt_classes=self.gt_label,
is_crowd=self.is_crowd,
gt_segms=self.gt_masks,
rois=self.rois,
labels_int32=self.labels_int32,
num_classes=cfg.class_num,
resolution=cfg.resolution)
self.mask_rois = mask_out[0]
self.roi_has_mask_int32 = mask_out[1]
self.mask_int32 = mask_out[2]
def fast_rcnn_heads(self, roi_input):
if self.mode == 'train':
pool_rois = self.rois
else:
pool_rois = self.rpn_rois
self.res5_2_sum = self.add_roi_box_head_func(roi_input, pool_rois)
rcnn_out = fluid.layers.pool2d(
self.res5_2_sum, pool_type='avg', pool_size=7, name='res5_pool')
self.cls_score = fluid.layers.fc(input=rcnn_out,
size=cfg.class_num,
act=None,
name='cls_score',
param_attr=ParamAttr(
name='cls_score_w',
initializer=Normal(
loc=0.0, scale=0.001)),
bias_attr=ParamAttr(
name='cls_score_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
self.bbox_pred = fluid.layers.fc(input=rcnn_out,
size=4 * cfg.class_num,
act=None,
name='bbox_pred',
param_attr=ParamAttr(
name='bbox_pred_w',
initializer=Normal(
loc=0.0, scale=0.01)),
bias_attr=ParamAttr(
name='bbox_pred_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
def SuffixNet(self, conv5):
mask_out = fluid.layers.conv2d_transpose(
input=conv5,
num_filters=cfg.dim_reduced,
filter_size=2,
stride=2,
act='relu',
param_attr=ParamAttr(
name='conv5_mask_w', initializer=MSRA(uniform=False)),
bias_attr=ParamAttr(
name='conv5_mask_b', learning_rate=2., regularizer=L2Decay(0.)))
act_func = None
if self.mode != 'train':
act_func = 'sigmoid'
mask_fcn_logits = fluid.layers.conv2d(
input=mask_out,
num_filters=cfg.class_num,
filter_size=1,
act=act_func,
param_attr=ParamAttr(
name='mask_fcn_logits_w', initializer=MSRA(uniform=False)),
bias_attr=ParamAttr(
name="mask_fcn_logits_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
if self.mode != 'train':
mask_fcn_logits = fluid.layers.lod_reset(mask_fcn_logits,
self.pred_result)
return mask_fcn_logits
def mask_rcnn_heads(self, mask_input):
pass
if self.mode == 'train':
conv5 = fluid.layers.gather(self.res5_2_sum,
self.roi_has_mask_int32)
self.mask_fcn_logits = self.SuffixNet(conv5)
else:
pred_res_shape = fluid.layers.shape(self.pred_result)
shape = fluid.layers.reduce_prod(pred_res_shape)
shape = fluid.layers.reshape(shape, [1, 1])
ones = fluid.layers.fill_constant([1, 1], value=1, dtype='int32')
cond = fluid.layers.equal(x=shape, y=ones)
print("IfElse!!!")
ie = fluid.layers.IfElse(cond)
with ie.true_block():
pred_res_null = ie.input(self.pred_result)
ie.output(pred_res_null)
with ie.false_block():
pred_res = ie.input(self.pred_result)
pred_boxes = fluid.layers.slice(
pred_res, [1], starts=[2], ends=[6])
im_scale_lod = fluid.layers.sequence_expand(self.im_scale,
pred_boxes)
mask_rois = pred_boxes * im_scale_lod
conv5 = self.add_roi_box_head_func(mask_input, mask_rois)
mask_fcn = self.SuffixNet(conv5)
ie.output(mask_fcn)
self.mask_fcn_logits = ie()[0]
def mask_rcnn_loss(self):
mask_label = fluid.layers.cast(x=self.mask_int32, dtype='float32')
reshape_dim = cfg.class_num * cfg.resolution * cfg.resolution
mask_fcn_logits_reshape = fluid.layers.reshape(self.mask_fcn_logits,
(-1, reshape_dim))
loss_mask = fluid.layers.sigmoid_cross_entropy_with_logits(
x=mask_fcn_logits_reshape,
label=mask_label,
ignore_index=-1,
normalize=True)
loss_mask = fluid.layers.reduce_sum(loss_mask, name='loss_mask')
return loss_mask
def fast_rcnn_loss(self):
labels_int64 = fluid.layers.cast(x=self.labels_int32, dtype='int64')
labels_int64.stop_gradient = True
loss_cls = fluid.layers.softmax_with_cross_entropy(
logits=self.cls_score,
label=labels_int64,
numeric_stable_mode=True, )
loss_cls = fluid.layers.reduce_mean(loss_cls)
loss_bbox = fluid.layers.smooth_l1(
x=self.bbox_pred,
y=self.bbox_targets,
inside_weight=self.bbox_inside_weights,
outside_weight=self.bbox_outside_weights,
sigma=1.0)
loss_bbox = fluid.layers.reduce_mean(loss_bbox)
return loss_cls, loss_bbox
def rpn_loss(self):
rpn_cls_score_reshape = fluid.layers.transpose(
self.rpn_cls_score, perm=[0, 2, 3, 1])
rpn_bbox_pred_reshape = fluid.layers.transpose(
self.rpn_bbox_pred, perm=[0, 2, 3, 1])
anchor_reshape = fluid.layers.reshape(self.anchor, shape=(-1, 4))
var_reshape = fluid.layers.reshape(self.var, shape=(-1, 4))
rpn_cls_score_reshape = fluid.layers.reshape(
x=rpn_cls_score_reshape, shape=(0, -1, 1))
rpn_bbox_pred_reshape = fluid.layers.reshape(
x=rpn_bbox_pred_reshape, shape=(0, -1, 4))
score_pred, loc_pred, score_tgt, loc_tgt, bbox_weight = \
fluid.layers.rpn_target_assign(
bbox_pred=rpn_bbox_pred_reshape,
cls_logits=rpn_cls_score_reshape,
anchor_box=anchor_reshape,
anchor_var=var_reshape,
gt_boxes=self.gt_box,
is_crowd=self.is_crowd,
im_info=self.im_info,
rpn_batch_size_per_im=cfg.TRAIN.rpn_batch_size_per_im,
rpn_straddle_thresh=cfg.TRAIN.rpn_straddle_thresh,
rpn_fg_fraction=cfg.TRAIN.rpn_fg_fraction,
rpn_positive_overlap=cfg.TRAIN.rpn_positive_overlap,
rpn_negative_overlap=cfg.TRAIN.rpn_negative_overlap,
use_random=self.use_random)
score_tgt = fluid.layers.cast(x=score_tgt, dtype='float32')
rpn_cls_loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=score_pred, label=score_tgt)
rpn_cls_loss = fluid.layers.reduce_mean(
rpn_cls_loss, name='loss_rpn_cls')
rpn_reg_loss = fluid.layers.smooth_l1(
x=loc_pred,
y=loc_tgt,
sigma=3.0,
inside_weight=bbox_weight,
outside_weight=bbox_weight)
rpn_reg_loss = fluid.layers.reduce_sum(
rpn_reg_loss, name='loss_rpn_bbox')
score_shape = fluid.layers.shape(score_tgt)
score_shape = fluid.layers.cast(x=score_shape, dtype='float32')
norm = fluid.layers.reduce_prod(score_shape)
norm.stop_gradient = True
rpn_reg_loss = rpn_reg_loss / norm
return rpn_cls_loss, rpn_reg_loss
| # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
from paddle.fluid.initializer import Normal
from paddle.fluid.initializer import MSRA
from paddle.fluid.regularizer import L2Decay
from config import cfg
class RCNN(object):
def __init__(self,
add_conv_body_func=None,
add_roi_box_head_func=None,
mode='train',
use_pyreader=True,
use_random=True):
self.add_conv_body_func = add_conv_body_func
self.add_roi_box_head_func = add_roi_box_head_func
self.mode = mode
self.use_pyreader = use_pyreader
self.use_random = use_random
self.checkpoints = []
def build_model(self, image_shape):
self.build_input(image_shape)
body_conv = self.add_conv_body_func(self.image)
# RPN
self.rpn_heads(body_conv)
# Fast RCNN
self.fast_rcnn_heads(body_conv)
if self.mode != 'train':
self.eval_bbox()
# Mask RCNN
if cfg.MASK_ON:
self.mask_rcnn_heads(body_conv)
def loss(self):
losses = []
# Fast RCNN loss
loss_cls, loss_bbox = self.fast_rcnn_loss()
# RPN loss
rpn_cls_loss, rpn_reg_loss = self.rpn_loss()
losses = [loss_cls, loss_bbox, rpn_cls_loss, rpn_reg_loss]
rkeys = ['loss', 'loss_cls', 'loss_bbox', \
'loss_rpn_cls', 'loss_rpn_bbox',]
if cfg.MASK_ON:
loss_mask = self.mask_rcnn_loss()
losses = losses + [loss_mask]
rkeys = rkeys + ["loss_mask"]
loss = fluid.layers.sum(losses)
rloss = [loss] + losses
return rloss, rkeys
def eval_mask_out(self):
return self.mask_fcn_logits
def eval_bbox_out(self):
return self.pred_result
def build_input(self, image_shape):
if self.use_pyreader:
in_shapes = [[-1] + image_shape, [-1, 4], [-1, 1], [-1, 1],
[-1, 3], [-1, 1]]
lod_levels = [0, 1, 1, 1, 0, 0]
dtypes = [
'float32', 'float32', 'int32', 'int32', 'float32', 'int64'
]
if cfg.MASK_ON:
in_shapes.append([-1, 2])
lod_levels.append(3)
dtypes.append('float32')
self.py_reader = fluid.layers.py_reader(
capacity=64,
shapes=in_shapes,
lod_levels=lod_levels,
dtypes=dtypes,
use_double_buffer=True)
ins = fluid.layers.read_file(self.py_reader)
self.image = ins[0]
self.gt_box = ins[1]
self.gt_label = ins[2]
self.is_crowd = ins[3]
self.im_info = ins[4]
self.im_id = ins[5]
if cfg.MASK_ON:
self.gt_masks = ins[6]
else:
self.image = fluid.layers.data(
name='image', shape=image_shape, dtype='float32')
self.gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
self.gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
self.is_crowd = fluid.layers.data(
name='is_crowd', shape=[1], dtype='int32', lod_level=1)
self.im_info = fluid.layers.data(
name='im_info', shape=[3], dtype='float32')
self.im_id = fluid.layers.data(
name='im_id', shape=[1], dtype='int64')
if cfg.MASK_ON:
self.gt_masks = fluid.layers.data(
name='gt_masks', shape=[2], dtype='float32', lod_level=3)
def feeds(self):
if self.mode == 'infer':
return [self.image, self.im_info]
if self.mode == 'val':
return [self.image, self.im_info, self.im_id]
if not cfg.MASK_ON:
return [
self.image, self.gt_box, self.gt_label, self.is_crowd,
self.im_info, self.im_id
]
return [
self.image, self.gt_box, self.gt_label, self.is_crowd, self.im_info,
self.im_id, self.gt_masks
]
def eval_bbox(self):
self.im_scale = fluid.layers.slice(
self.im_info, [1], starts=[2], ends=[3])
im_scale_lod = fluid.layers.sequence_expand(self.im_scale,
self.rpn_rois)
boxes = self.rpn_rois / im_scale_lod
cls_prob = fluid.layers.softmax(self.cls_score, use_cudnn=False)
bbox_pred_reshape = fluid.layers.reshape(self.bbox_pred,
(-1, cfg.class_num, 4))
decoded_box = fluid.layers.box_coder(
prior_box=boxes,
prior_box_var=cfg.bbox_reg_weights,
target_box=bbox_pred_reshape,
code_type='decode_center_size',
box_normalized=False,
axis=1)
cliped_box = fluid.layers.box_clip(
input=decoded_box, im_info=self.im_info)
self.pred_result = fluid.layers.multiclass_nms(
bboxes=cliped_box,
scores=cls_prob,
score_threshold=cfg.TEST.score_thresh,
nms_top_k=-1,
nms_threshold=cfg.TEST.nms_thresh,
keep_top_k=cfg.TEST.detections_per_im,
normalized=False)
def rpn_heads(self, rpn_input):
# RPN hidden representation
dim_out = rpn_input.shape[1]
rpn_conv = fluid.layers.conv2d(
input=rpn_input,
num_filters=dim_out,
filter_size=3,
stride=1,
padding=1,
act='relu',
name='conv_rpn',
param_attr=ParamAttr(
name="conv_rpn_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="conv_rpn_b", learning_rate=2., regularizer=L2Decay(0.)))
self.checkpoints.append(rpn_conv)
self.anchor, self.var = fluid.layers.anchor_generator(
input=rpn_conv,
anchor_sizes=cfg.anchor_sizes,
aspect_ratios=cfg.aspect_ratio,
variance=cfg.variances,
stride=cfg.rpn_stride)
num_anchor = self.anchor.shape[2]
# Proposal classification scores
self.rpn_cls_score = fluid.layers.conv2d(
rpn_conv,
num_filters=num_anchor,
filter_size=1,
stride=1,
padding=0,
act=None,
name='rpn_cls_score',
param_attr=ParamAttr(
name="rpn_cls_logits_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="rpn_cls_logits_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
self.checkpoints.append(self.rpn_cls_score)
# Proposal bbox regression deltas
self.rpn_bbox_pred = fluid.layers.conv2d(
rpn_conv,
num_filters=4 * num_anchor,
filter_size=1,
stride=1,
padding=0,
act=None,
name='rpn_bbox_pred',
param_attr=ParamAttr(
name="rpn_bbox_pred_w", initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name="rpn_bbox_pred_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
self.checkpoints.append(self.rpn_bbox_pred)
rpn_cls_score_prob = fluid.layers.sigmoid(
self.rpn_cls_score, name='rpn_cls_score_prob')
self.checkpoints.append(rpn_cls_score_prob)
param_obj = cfg.TRAIN if self.mode == 'train' else cfg.TEST
pre_nms_top_n = param_obj.rpn_pre_nms_top_n
post_nms_top_n = param_obj.rpn_post_nms_top_n
nms_thresh = param_obj.rpn_nms_thresh
min_size = param_obj.rpn_min_size
eta = param_obj.rpn_eta
self.rpn_rois, self.rpn_roi_probs = fluid.layers.generate_proposals(
scores=rpn_cls_score_prob,
bbox_deltas=self.rpn_bbox_pred,
im_info=self.im_info,
anchors=self.anchor,
variances=self.var,
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
eta=eta)
if self.mode == 'train':
outs = fluid.layers.generate_proposal_labels(
rpn_rois=self.rpn_rois,
gt_classes=self.gt_label,
is_crowd=self.is_crowd,
gt_boxes=self.gt_box,
im_info=self.im_info,
batch_size_per_im=cfg.TRAIN.batch_size_per_im,
fg_fraction=cfg.TRAIN.fg_fractrion,
fg_thresh=cfg.TRAIN.fg_thresh,
bg_thresh_hi=cfg.TRAIN.bg_thresh_hi,
bg_thresh_lo=cfg.TRAIN.bg_thresh_lo,
bbox_reg_weights=cfg.bbox_reg_weights,
class_nums=cfg.class_num,
use_random=self.use_random)
self.rois = outs[0]
self.labels_int32 = outs[1]
self.bbox_targets = outs[2]
self.bbox_inside_weights = outs[3]
self.bbox_outside_weights = outs[4]
if cfg.MASK_ON:
mask_out = fluid.layers.generate_mask_labels(
im_info=self.im_info,
gt_classes=self.gt_label,
is_crowd=self.is_crowd,
gt_segms=self.gt_masks,
rois=self.rois,
labels_int32=self.labels_int32,
num_classes=cfg.class_num,
resolution=cfg.resolution)
self.mask_rois = mask_out[0]
self.roi_has_mask_int32 = mask_out[1]
self.mask_int32 = mask_out[2]
def fast_rcnn_heads(self, roi_input):
if self.mode == 'train':
pool_rois = self.rois
else:
pool_rois = self.rpn_rois
self.res5_2_sum = self.add_roi_box_head_func(roi_input, pool_rois)
rcnn_out = fluid.layers.pool2d(
self.res5_2_sum, pool_type='avg', pool_size=7, name='res5_pool')
self.cls_score = fluid.layers.fc(input=rcnn_out,
size=cfg.class_num,
act=None,
name='cls_score',
param_attr=ParamAttr(
name='cls_score_w',
initializer=Normal(
loc=0.0, scale=0.001)),
bias_attr=ParamAttr(
name='cls_score_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
self.bbox_pred = fluid.layers.fc(input=rcnn_out,
size=4 * cfg.class_num,
act=None,
name='bbox_pred',
param_attr=ParamAttr(
name='bbox_pred_w',
initializer=Normal(
loc=0.0, scale=0.01)),
bias_attr=ParamAttr(
name='bbox_pred_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
def SuffixNet(self, conv5):
mask_out = fluid.layers.conv2d_transpose(
input=conv5,
num_filters=cfg.dim_reduced,
filter_size=2,
stride=2,
act='relu',
param_attr=ParamAttr(
name='conv5_mask_w', initializer=MSRA(uniform=False)),
bias_attr=ParamAttr(
name='conv5_mask_b', learning_rate=2., regularizer=L2Decay(0.)))
act_func = None
if self.mode != 'train':
act_func = 'sigmoid'
mask_fcn_logits = fluid.layers.conv2d(
input=mask_out,
num_filters=cfg.class_num,
filter_size=1,
act=act_func,
param_attr=ParamAttr(
name='mask_fcn_logits_w', initializer=MSRA(uniform=False)),
bias_attr=ParamAttr(
name="mask_fcn_logits_b",
learning_rate=2.,
regularizer=L2Decay(0.)))
if self.mode != 'train':
mask_fcn_logits = fluid.layers.lod_reset(mask_fcn_logits,
self.pred_result)
return mask_fcn_logits
def mask_rcnn_heads(self, mask_input):
pass
if self.mode == 'train':
conv5 = fluid.layers.gather(self.res5_2_sum,
self.roi_has_mask_int32)
self.mask_fcn_logits = self.SuffixNet(conv5)
else:
pred_res_shape = fluid.layers.shape(self.pred_result)
shape = fluid.layers.reduce_prod(pred_res_shape)
shape = fluid.layers.reshape(shape, [1, 1])
ones = fluid.layers.fill_constant([1, 1], value=1, dtype='int32')
cond = fluid.layers.equal(x=shape, y=ones)
print("IfElse!!!")
ie = fluid.layers.IfElse(cond)
with ie.true_block():
pred_res_null = ie.input(self.pred_result)
ie.output(pred_res_null)
with ie.false_block():
pred_res = ie.input(self.pred_result)
pred_boxes = fluid.layers.slice(
pred_res, [1], starts=[2], ends=[6])
im_scale_lod = fluid.layers.sequence_expand(self.im_scale,
pred_boxes)
mask_rois = pred_boxes * im_scale_lod
conv5 = self.add_roi_box_head_func(mask_input, mask_rois)
mask_fcn = self.SuffixNet(conv5)
ie.output(mask_fcn)
self.mask_fcn_logits = ie()[0]
def mask_rcnn_loss(self):
mask_label = fluid.layers.cast(x=self.mask_int32, dtype='float32')
reshape_dim = cfg.class_num * cfg.resolution * cfg.resolution
mask_fcn_logits_reshape = fluid.layers.reshape(self.mask_fcn_logits,
(-1, reshape_dim))
loss_mask = fluid.layers.sigmoid_cross_entropy_with_logits(
x=mask_fcn_logits_reshape,
label=mask_label,
ignore_index=-1,
normalize=True)
loss_mask = fluid.layers.reduce_sum(loss_mask, name='loss_mask')
return loss_mask
def fast_rcnn_loss(self):
labels_int64 = fluid.layers.cast(x=self.labels_int32, dtype='int64')
labels_int64.stop_gradient = True
loss_cls = fluid.layers.softmax_with_cross_entropy(
logits=self.cls_score,
label=labels_int64,
numeric_stable_mode=True, )
loss_cls = fluid.layers.reduce_mean(loss_cls)
loss_bbox = fluid.layers.smooth_l1(
x=self.bbox_pred,
y=self.bbox_targets,
inside_weight=self.bbox_inside_weights,
outside_weight=self.bbox_outside_weights,
sigma=1.0)
loss_bbox = fluid.layers.reduce_mean(loss_bbox)
return loss_cls, loss_bbox
def rpn_loss(self):
rpn_cls_score_reshape = fluid.layers.transpose(
self.rpn_cls_score, perm=[0, 2, 3, 1])
rpn_bbox_pred_reshape = fluid.layers.transpose(
self.rpn_bbox_pred, perm=[0, 2, 3, 1])
anchor_reshape = fluid.layers.reshape(self.anchor, shape=(-1, 4))
var_reshape = fluid.layers.reshape(self.var, shape=(-1, 4))
rpn_cls_score_reshape = fluid.layers.reshape(
x=rpn_cls_score_reshape, shape=(0, -1, 1))
rpn_bbox_pred_reshape = fluid.layers.reshape(
x=rpn_bbox_pred_reshape, shape=(0, -1, 4))
score_pred, loc_pred, score_tgt, loc_tgt, bbox_weight = \
fluid.layers.rpn_target_assign(
bbox_pred=rpn_bbox_pred_reshape,
cls_logits=rpn_cls_score_reshape,
anchor_box=anchor_reshape,
anchor_var=var_reshape,
gt_boxes=self.gt_box,
is_crowd=self.is_crowd,
im_info=self.im_info,
rpn_batch_size_per_im=cfg.TRAIN.rpn_batch_size_per_im,
rpn_straddle_thresh=cfg.TRAIN.rpn_straddle_thresh,
rpn_fg_fraction=cfg.TRAIN.rpn_fg_fraction,
rpn_positive_overlap=cfg.TRAIN.rpn_positive_overlap,
rpn_negative_overlap=cfg.TRAIN.rpn_negative_overlap,
use_random=self.use_random)
score_tgt = fluid.layers.cast(x=score_tgt, dtype='float32')
rpn_cls_loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=score_pred, label=score_tgt)
rpn_cls_loss = fluid.layers.reduce_mean(
rpn_cls_loss, name='loss_rpn_cls')
rpn_reg_loss = fluid.layers.smooth_l1(
x=loc_pred,
y=loc_tgt,
sigma=3.0,
inside_weight=bbox_weight,
outside_weight=bbox_weight)
rpn_reg_loss = fluid.layers.reduce_sum(
rpn_reg_loss, name='loss_rpn_bbox')
score_shape = fluid.layers.shape(score_tgt)
score_shape = fluid.layers.cast(x=score_shape, dtype='float32')
norm = fluid.layers.reduce_prod(score_shape)
norm.stop_gradient = True
rpn_reg_loss = rpn_reg_loss / norm
return rpn_cls_loss, rpn_reg_loss
| en | 0.8189 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # RPN # Fast RCNN # Mask RCNN # Fast RCNN loss # RPN loss # RPN hidden representation # Proposal classification scores # Proposal bbox regression deltas | 1.971057 | 2 |
scripts/util/system_util.py | tksharpless/facebook360_dep | 6 | 6631089 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""General systems utility used across all scripts.
Defines functions for abstracting away command line interfaces and for defining a globally
accessible map of image paths. Any script that wishes to reference the path of an image
type (e.g. color or disparity) should do so through this utility rather than hard-coding it.
Example:
To reference the image type path (as do many of the render scripts):
>>> from system_util import image_type_paths
>>> input_root = "/path/to/data"
>>> color_path = os.path.join(input_root, image_type_paths["color"])
Attributes:
image_type_paths (map[str, str]): Map from image type to its location in the standard
structure. The paths are relative to the respective root an image type is defined
for (e.g. "color" with respect to the input root and "disparity" to the output). For
a full list of the input types, see: source/util/ImageTypes.h.
"""
import os
import re
import signal
import subprocess
import sys
import tarfile
from enum import Enum
from itertools import chain
from functools import reduce
from pathlib import Path
facebook360_dep_root = str(Path(os.path.abspath(__file__)).parents[2])
class OSType(Enum):
"""Enum for referencing operating systems.
Attributes:
LINUX (int): Any Linux distro.
MAC (int): Any version of Mac OS X.
WINDOWS (int): Any version of Windows.
"""
WINDOWS = 1
MAC = 2
LINUX = 3
def get_os_type_local(platform=None):
if not platform:
platform = sys.platform
if platform.startswith("win"):
platform = "windows"
if "darwin" in platform:
os_type = OSType.MAC
elif "windows" in platform:
os_type = OSType.WINDOWS
elif "linux" in platform:
os_type = OSType.LINUX
else:
raise Exception("Unsupported OS!")
return os_type
def _set_image_type_paths():
"""Creates up the image type paths map.
Returns:
(map[str, str]): Map from image type to its location in the standard structure.
The paths are relative to the respective root an image type is defined
for (e.g. "color" with respect to the input root and "disparity" to the output). For
a full list of the input types, see: source/util/ImageTypes.h.
"""
derp_util_h = os.path.join(facebook360_dep_root, "source", "util", "ImageTypes.h")
with open(derp_util_h) as f:
content = f.readlines()
content = [x.strip() for x in content if x.strip().startswith("X(")]
image_type_paths = {}
for line in content:
# ImageType definitions have format: X(type, value)
m = re.search(r"X\((.*), \"(.*)\"\)", line)
image_type_paths[m.group(1)] = m.group(2)
return image_type_paths
image_type_paths = _set_image_type_paths()
def get_flags(source):
"""Gets flags from a source file.
Args:
source (str): Path to the source file (could be any extension).
Returns:
list[dict[str, _]]: List of maps with keys "type", "name", "default", and "descr" for the
respective fields corresponding to the flag.
"""
flags = []
_, ext = os.path.splitext(source)
if ext == ".py":
delim = "flags.DEFINE_"
else:
delim = "DEFINE_"
split_comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
cppflag_to_pygflag = {"int32": "integer", "double": "float", "bool": "boolean"}
with open(source) as f:
lines = f.readlines()
flag_lines = "".join(lines).replace("\n", "").split(delim)[1:]
for flag_line in flag_lines:
flag_type, flag_def = flag_line.split("(", 1)
flag_contents = re.compile(split_comma_regex).split(flag_def)
NUM_FLAG_FIELDS = 3
if len(flag_contents) < NUM_FLAG_FIELDS:
continue
flag = {}
flag["type"] = flag_type
flag["name"] = flag_contents[0].strip().replace('"', "")
flag["default"] = flag_contents[1].strip()
flag["descr"] = flag_contents[2].rsplit(")", 1)[0].strip()
if flag["type"] in cppflag_to_pygflag:
flag["type"] = cppflag_to_pygflag[flag["type"]]
if flag["type"] == "boolean":
flag["default"] = False if (flag["default"] == "false") else True
elif flag["type"] == "integer":
try:
flag["default"] = int(flag["default"])
except Exception:
pass
elif flag["type"] == "float":
try:
flag["default"] = float(flag["default"])
except Exception:
pass
flags.append(flag)
return flags
def get_flags_from_flagfile(flagfile_fn):
flags = {}
for line in open(flagfile_fn, "r"):
m = re.findall("--(.*)=(.*)", line)
if len(m) == 1 and len(m[0]) == 2:
flags[m[0][0]] = m[0][1]
return flags
def gen_args_from_flags(flags):
"""Constructs CLI arguments from flags, assuming the format in res/test/.
Returns:
str: Space-separated string of CLI arguments (e.g.
"--example1 <X> --example2 <Y>")
"""
flags_string = []
for flag_name, flag_value in flags.items():
flags_string.append(f"--{flag_name}={flag_value}")
args_string = " ".join(flags_string)
return args_string
def list_only_visible_files(src_dir):
"""Gets the visible files in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible files (not full paths).
"""
return [
f
for f in os.listdir(src_dir)
if not f.startswith(".") and os.path.isfile(src_dir + "/" + f)
]
def list_only_visible_dirs(src_dir):
"""Gets the visible directories in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible directories (not full paths).
"""
return [
f
for f in os.listdir(src_dir)
if not f.startswith(".") and os.path.isdir(src_dir + "/" + f)
]
def start_subprocess(name, cmd):
"""Synchronously runs a named command.
Args:
name (str): Process name.
cmd (str): Command to execute.
Returns:
int: Return code of execution.
"""
global current_process
current_process = subprocess.Popen(cmd, shell=True)
current_process.name = name
current_process.communicate()
return current_process.returncode
def list_only_visible_files_recursive(src_dir):
"""Recursively gets the visible files in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible files (not full paths).
"""
return reduce(
lambda x, y: x + y,
[
list(
map(
lambda x: root + "/" + x,
(f for f in files if not f.startswith(".")),
)
)
for root, dirs, files in os.walk(src_dir)
],
)
def intersect_lists(*args):
return set(args[0]).intersection(*args)
def merge_lists(*args):
return list(set(chain(*args)))
def extract_tar(filename, dst=None):
"""Extracts a tar file.
Args:
filename (str): Path to the tar file.
dst (str, optional): Directory where extraction should produce results.
Returns:
str: Path to the extracted directory.
"""
if dst is None:
dst = os.getcwd()
dir_name = os.path.splitext(filename)[0]
extract_directory = os.path.join(dst, dir_name)
tar_filename_path = os.path.join(os.getcwd(), filename)
t = tarfile.open(tar_filename_path)
t.extractall(dst)
return extract_directory
def run_command(
shell_string, run_silently=False, stream=True, run_async=False, file_fn=None
):
"""Run a shell command.
Args:
shell_string (str): Command to be run.
run_silently (bool, optional): Whether or not to show stdout.
stream (bool, optional): Whether or not to stream stdout. If run_silently is
set to True, if the file_fn is not empty, or if running on a Windows host,
this argument cannot be overrided (will be False).
run_async (bool, optional): Whether or not to run command asynchronously. No output
is returned if running async.
file_fn (str, optional): Filename of where output should be saved.
Returns:
str: stdout from executing command.
"""
if run_async:
with open(os.devnull, "w") as FNULL:
subprocess.Popen(
shell_string, shell=True, stdout=FNULL, stderr=subprocess.STDOUT
)
return ""
if run_silently or file_fn is not None or os.name == "nt":
stream = False
try:
sh_fn = sh_stream if stream else sh_buffered
if run_silently:
with open(os.devnull, "w") as f:
output = sh_fn(shell_string, file=f)
else:
if file_fn is not None:
with open(file_fn, "w") as f:
output = sh_fn(shell_string, file=f)
else:
output = sh_fn(shell_string)
return output
except Exception:
if not run_silently:
print(f"Failed to run program: {shell_string}")
raise
def sh_buffered(arg, file=sys.stdout, use_shell=True):
"""Run a shell command with buffered output.
Args:
arg (str): Command to run.
file (file, optional): File handler to write stdout to.
use_shell (bool, optional): Whether or not to execute in a shell.
Returns:
str: stdout from executing command.
"""
if file:
print(f"$ {arg}", file=file)
try:
output = subprocess.check_output(arg, shell=use_shell, stderr=file)
except subprocess.CalledProcessError as e:
error = e.output.decode("utf-8")
if error:
print(error, file=sys.stderr)
raise
output = output.decode("utf-8")
if file:
print(output, file=file)
return output.rstrip()
def sh_stream(arg, file=sys.stdout, use_shell=None):
"""Run a shell command with streaming output.
Args:
arg (str): Command to run.
file (file, optional): File handler to write stdout to.
use_shell (bool, optional): Whether or not to execute in a shell.
Returns:
str: stdout from executing command.
"""
if file:
print(f"$ {arg}", file=file)
process = subprocess.Popen(
arg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
outputs = []
for c in iter(lambda: process.stdout.read(1), b""):
decoded_c = c.decode("utf-8")
sys.stdout.write(decoded_c)
outputs.append(decoded_c)
exit_code = process.poll()
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, arg)
return "".join(outputs)
def get_catchable_signals():
"""Defines list of signals that can be caught (OS dependent).
Returns:
list[signal.signal]: Signals that can be caught.
"""
catchable_sigs = [
signal.SIGINT,
signal.SIGILL,
signal.SIGFPE,
signal.SIGSEGV,
signal.SIGTERM,
]
if get_os_type_local() == OSType.WINDOWS:
catchable_sigs.extend(
[
signal.SIGHUP,
signal.SIGQUIT,
signal.SIGTRAP,
signal.SIGKILL,
signal.SIGBUS,
signal.SIGSYS,
signal.SIGPIPE,
]
)
return catchable_sigs
| #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""General systems utility used across all scripts.
Defines functions for abstracting away command line interfaces and for defining a globally
accessible map of image paths. Any script that wishes to reference the path of an image
type (e.g. color or disparity) should do so through this utility rather than hard-coding it.
Example:
To reference the image type path (as do many of the render scripts):
>>> from system_util import image_type_paths
>>> input_root = "/path/to/data"
>>> color_path = os.path.join(input_root, image_type_paths["color"])
Attributes:
image_type_paths (map[str, str]): Map from image type to its location in the standard
structure. The paths are relative to the respective root an image type is defined
for (e.g. "color" with respect to the input root and "disparity" to the output). For
a full list of the input types, see: source/util/ImageTypes.h.
"""
import os
import re
import signal
import subprocess
import sys
import tarfile
from enum import Enum
from itertools import chain
from functools import reduce
from pathlib import Path
facebook360_dep_root = str(Path(os.path.abspath(__file__)).parents[2])
class OSType(Enum):
"""Enum for referencing operating systems.
Attributes:
LINUX (int): Any Linux distro.
MAC (int): Any version of Mac OS X.
WINDOWS (int): Any version of Windows.
"""
WINDOWS = 1
MAC = 2
LINUX = 3
def get_os_type_local(platform=None):
if not platform:
platform = sys.platform
if platform.startswith("win"):
platform = "windows"
if "darwin" in platform:
os_type = OSType.MAC
elif "windows" in platform:
os_type = OSType.WINDOWS
elif "linux" in platform:
os_type = OSType.LINUX
else:
raise Exception("Unsupported OS!")
return os_type
def _set_image_type_paths():
"""Creates up the image type paths map.
Returns:
(map[str, str]): Map from image type to its location in the standard structure.
The paths are relative to the respective root an image type is defined
for (e.g. "color" with respect to the input root and "disparity" to the output). For
a full list of the input types, see: source/util/ImageTypes.h.
"""
derp_util_h = os.path.join(facebook360_dep_root, "source", "util", "ImageTypes.h")
with open(derp_util_h) as f:
content = f.readlines()
content = [x.strip() for x in content if x.strip().startswith("X(")]
image_type_paths = {}
for line in content:
# ImageType definitions have format: X(type, value)
m = re.search(r"X\((.*), \"(.*)\"\)", line)
image_type_paths[m.group(1)] = m.group(2)
return image_type_paths
image_type_paths = _set_image_type_paths()
def get_flags(source):
"""Gets flags from a source file.
Args:
source (str): Path to the source file (could be any extension).
Returns:
list[dict[str, _]]: List of maps with keys "type", "name", "default", and "descr" for the
respective fields corresponding to the flag.
"""
flags = []
_, ext = os.path.splitext(source)
if ext == ".py":
delim = "flags.DEFINE_"
else:
delim = "DEFINE_"
split_comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
cppflag_to_pygflag = {"int32": "integer", "double": "float", "bool": "boolean"}
with open(source) as f:
lines = f.readlines()
flag_lines = "".join(lines).replace("\n", "").split(delim)[1:]
for flag_line in flag_lines:
flag_type, flag_def = flag_line.split("(", 1)
flag_contents = re.compile(split_comma_regex).split(flag_def)
NUM_FLAG_FIELDS = 3
if len(flag_contents) < NUM_FLAG_FIELDS:
continue
flag = {}
flag["type"] = flag_type
flag["name"] = flag_contents[0].strip().replace('"', "")
flag["default"] = flag_contents[1].strip()
flag["descr"] = flag_contents[2].rsplit(")", 1)[0].strip()
if flag["type"] in cppflag_to_pygflag:
flag["type"] = cppflag_to_pygflag[flag["type"]]
if flag["type"] == "boolean":
flag["default"] = False if (flag["default"] == "false") else True
elif flag["type"] == "integer":
try:
flag["default"] = int(flag["default"])
except Exception:
pass
elif flag["type"] == "float":
try:
flag["default"] = float(flag["default"])
except Exception:
pass
flags.append(flag)
return flags
def get_flags_from_flagfile(flagfile_fn):
flags = {}
for line in open(flagfile_fn, "r"):
m = re.findall("--(.*)=(.*)", line)
if len(m) == 1 and len(m[0]) == 2:
flags[m[0][0]] = m[0][1]
return flags
def gen_args_from_flags(flags):
"""Constructs CLI arguments from flags, assuming the format in res/test/.
Returns:
str: Space-separated string of CLI arguments (e.g.
"--example1 <X> --example2 <Y>")
"""
flags_string = []
for flag_name, flag_value in flags.items():
flags_string.append(f"--{flag_name}={flag_value}")
args_string = " ".join(flags_string)
return args_string
def list_only_visible_files(src_dir):
"""Gets the visible files in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible files (not full paths).
"""
return [
f
for f in os.listdir(src_dir)
if not f.startswith(".") and os.path.isfile(src_dir + "/" + f)
]
def list_only_visible_dirs(src_dir):
"""Gets the visible directories in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible directories (not full paths).
"""
return [
f
for f in os.listdir(src_dir)
if not f.startswith(".") and os.path.isdir(src_dir + "/" + f)
]
def start_subprocess(name, cmd):
"""Synchronously runs a named command.
Args:
name (str): Process name.
cmd (str): Command to execute.
Returns:
int: Return code of execution.
"""
global current_process
current_process = subprocess.Popen(cmd, shell=True)
current_process.name = name
current_process.communicate()
return current_process.returncode
def list_only_visible_files_recursive(src_dir):
"""Recursively gets the visible files in a directory.
Args:
src_dir (str): Path to the directory.
Returns:
list[str]: Names of visible files (not full paths).
"""
return reduce(
lambda x, y: x + y,
[
list(
map(
lambda x: root + "/" + x,
(f for f in files if not f.startswith(".")),
)
)
for root, dirs, files in os.walk(src_dir)
],
)
def intersect_lists(*args):
return set(args[0]).intersection(*args)
def merge_lists(*args):
return list(set(chain(*args)))
def extract_tar(filename, dst=None):
"""Extracts a tar file.
Args:
filename (str): Path to the tar file.
dst (str, optional): Directory where extraction should produce results.
Returns:
str: Path to the extracted directory.
"""
if dst is None:
dst = os.getcwd()
dir_name = os.path.splitext(filename)[0]
extract_directory = os.path.join(dst, dir_name)
tar_filename_path = os.path.join(os.getcwd(), filename)
t = tarfile.open(tar_filename_path)
t.extractall(dst)
return extract_directory
def run_command(
shell_string, run_silently=False, stream=True, run_async=False, file_fn=None
):
"""Run a shell command.
Args:
shell_string (str): Command to be run.
run_silently (bool, optional): Whether or not to show stdout.
stream (bool, optional): Whether or not to stream stdout. If run_silently is
set to True, if the file_fn is not empty, or if running on a Windows host,
this argument cannot be overrided (will be False).
run_async (bool, optional): Whether or not to run command asynchronously. No output
is returned if running async.
file_fn (str, optional): Filename of where output should be saved.
Returns:
str: stdout from executing command.
"""
if run_async:
with open(os.devnull, "w") as FNULL:
subprocess.Popen(
shell_string, shell=True, stdout=FNULL, stderr=subprocess.STDOUT
)
return ""
if run_silently or file_fn is not None or os.name == "nt":
stream = False
try:
sh_fn = sh_stream if stream else sh_buffered
if run_silently:
with open(os.devnull, "w") as f:
output = sh_fn(shell_string, file=f)
else:
if file_fn is not None:
with open(file_fn, "w") as f:
output = sh_fn(shell_string, file=f)
else:
output = sh_fn(shell_string)
return output
except Exception:
if not run_silently:
print(f"Failed to run program: {shell_string}")
raise
def sh_buffered(arg, file=sys.stdout, use_shell=True):
"""Run a shell command with buffered output.
Args:
arg (str): Command to run.
file (file, optional): File handler to write stdout to.
use_shell (bool, optional): Whether or not to execute in a shell.
Returns:
str: stdout from executing command.
"""
if file:
print(f"$ {arg}", file=file)
try:
output = subprocess.check_output(arg, shell=use_shell, stderr=file)
except subprocess.CalledProcessError as e:
error = e.output.decode("utf-8")
if error:
print(error, file=sys.stderr)
raise
output = output.decode("utf-8")
if file:
print(output, file=file)
return output.rstrip()
def sh_stream(arg, file=sys.stdout, use_shell=None):
"""Run a shell command with streaming output.
Args:
arg (str): Command to run.
file (file, optional): File handler to write stdout to.
use_shell (bool, optional): Whether or not to execute in a shell.
Returns:
str: stdout from executing command.
"""
if file:
print(f"$ {arg}", file=file)
process = subprocess.Popen(
arg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
outputs = []
for c in iter(lambda: process.stdout.read(1), b""):
decoded_c = c.decode("utf-8")
sys.stdout.write(decoded_c)
outputs.append(decoded_c)
exit_code = process.poll()
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, arg)
return "".join(outputs)
def get_catchable_signals():
"""Defines list of signals that can be caught (OS dependent).
Returns:
list[signal.signal]: Signals that can be caught.
"""
catchable_sigs = [
signal.SIGINT,
signal.SIGILL,
signal.SIGFPE,
signal.SIGSEGV,
signal.SIGTERM,
]
if get_os_type_local() == OSType.WINDOWS:
catchable_sigs.extend(
[
signal.SIGHUP,
signal.SIGQUIT,
signal.SIGTRAP,
signal.SIGKILL,
signal.SIGBUS,
signal.SIGSYS,
signal.SIGPIPE,
]
)
return catchable_sigs
| en | 0.764277 | #!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. General systems utility used across all scripts. Defines functions for abstracting away command line interfaces and for defining a globally accessible map of image paths. Any script that wishes to reference the path of an image type (e.g. color or disparity) should do so through this utility rather than hard-coding it. Example: To reference the image type path (as do many of the render scripts): >>> from system_util import image_type_paths >>> input_root = "/path/to/data" >>> color_path = os.path.join(input_root, image_type_paths["color"]) Attributes: image_type_paths (map[str, str]): Map from image type to its location in the standard structure. The paths are relative to the respective root an image type is defined for (e.g. "color" with respect to the input root and "disparity" to the output). For a full list of the input types, see: source/util/ImageTypes.h. Enum for referencing operating systems. Attributes: LINUX (int): Any Linux distro. MAC (int): Any version of Mac OS X. WINDOWS (int): Any version of Windows. Creates up the image type paths map. Returns: (map[str, str]): Map from image type to its location in the standard structure. The paths are relative to the respective root an image type is defined for (e.g. "color" with respect to the input root and "disparity" to the output). For a full list of the input types, see: source/util/ImageTypes.h. # ImageType definitions have format: X(type, value) Gets flags from a source file. Args: source (str): Path to the source file (could be any extension). Returns: list[dict[str, _]]: List of maps with keys "type", "name", "default", and "descr" for the respective fields corresponding to the flag. Constructs CLI arguments from flags, assuming the format in res/test/. Returns: str: Space-separated string of CLI arguments (e.g. "--example1 <X> --example2 <Y>") Gets the visible files in a directory. Args: src_dir (str): Path to the directory. Returns: list[str]: Names of visible files (not full paths). Gets the visible directories in a directory. Args: src_dir (str): Path to the directory. Returns: list[str]: Names of visible directories (not full paths). Synchronously runs a named command. Args: name (str): Process name. cmd (str): Command to execute. Returns: int: Return code of execution. Recursively gets the visible files in a directory. Args: src_dir (str): Path to the directory. Returns: list[str]: Names of visible files (not full paths). Extracts a tar file. Args: filename (str): Path to the tar file. dst (str, optional): Directory where extraction should produce results. Returns: str: Path to the extracted directory. Run a shell command. Args: shell_string (str): Command to be run. run_silently (bool, optional): Whether or not to show stdout. stream (bool, optional): Whether or not to stream stdout. If run_silently is set to True, if the file_fn is not empty, or if running on a Windows host, this argument cannot be overrided (will be False). run_async (bool, optional): Whether or not to run command asynchronously. No output is returned if running async. file_fn (str, optional): Filename of where output should be saved. Returns: str: stdout from executing command. Run a shell command with buffered output. Args: arg (str): Command to run. file (file, optional): File handler to write stdout to. use_shell (bool, optional): Whether or not to execute in a shell. Returns: str: stdout from executing command. Run a shell command with streaming output. Args: arg (str): Command to run. file (file, optional): File handler to write stdout to. use_shell (bool, optional): Whether or not to execute in a shell. Returns: str: stdout from executing command. Defines list of signals that can be caught (OS dependent). Returns: list[signal.signal]: Signals that can be caught. | 2.490849 | 2 |
blobxfer/models/metadata.py | amishra-dev/blobxfer | 147 | 6631090 | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# stdlib imports
import collections
import json
import logging
# non-stdlib imports
# local imports
import blobxfer.util
# create logger
logger = logging.getLogger(__name__)
# global defines
JSON_KEY_BLOBXFER_METADATA = 'blobxfer_metadata'
# file attributes
_JSON_KEY_FILE_ATTRIBUTES = 'FileAttributes'
_JSON_KEY_FILE_ATTRIBUTES_POSIX = 'POSIX'
_JSON_KEY_FILE_ATTRIBUTES_WINDOWS = 'Windows'
_JSON_KEY_FILE_ATTRIBUTES_MODE = 'mode'
_JSON_KEY_FILE_ATTRIBUTES_UID = 'uid'
_JSON_KEY_FILE_ATTRIBUTES_GID = 'gid'
# vectored io
_JSON_KEY_VECTORED_IO = 'VectoredIO'
_JSON_KEY_VECTORED_IO_MODE = 'Mode'
_JSON_KEY_VECTORED_IO_STRIPE = 'Stripe'
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE = 'TotalSize'
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START = 'OffsetStart'
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES = 'TotalSlices'
_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID = 'SliceId'
_JSON_KEY_VECTORED_IO_STRIPE_NEXT = 'Next'
# named tuples
PosixFileAttr = collections.namedtuple(
'PosixFileAttr', [
'gid',
'mode',
'uid',
]
)
WindowsFileAttr = collections.namedtuple(
'WindowsFileAttr', [
]
)
VectoredStripe = collections.namedtuple(
'VectoredStripe', [
'next',
'offset_start',
'slice_id',
'total_size',
'total_slices',
]
)
VectoredNextEntry = collections.namedtuple(
'VectoredNextEntry', [
'storage_account_name',
'endpoint',
'container',
'name',
]
)
_FILEATTR_WARNED_ON_WINDOWS = False
def get_md5_from_metadata(ase):
# type: (blobxfer.models.azure.StorageEntity) -> str
"""Get MD5 from properties or metadata
:param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity
:rtype: str or None
:return: md5
"""
# if encryption metadata is present, check for pre-encryption
# md5 in blobxfer extensions
md5 = None
if ase.is_encrypted:
try:
md5 = ase.encryption_metadata.blobxfer_extensions.\
pre_encrypted_content_md5
except AttributeError:
# this can happen if partial metadata is present
md5 = None
if blobxfer.util.is_none_or_empty(md5):
md5 = ase.md5
return md5
def generate_fileattr_metadata(local_path, metadata):
# type: (blobxfer.models.upload.LocalPath, dict) -> dict
"""Generate file attribute metadata dict
:param blobxfer.models.upload.LocalPath local_path: local path
:param dict metadata: existing metadata dict
:rtype: dict
:return: merged metadata dictionary
"""
if blobxfer.util.on_windows():
global _FILEATTR_WARNED_ON_WINDOWS
if not _FILEATTR_WARNED_ON_WINDOWS:
_FILEATTR_WARNED_ON_WINDOWS = True
logger.warning(
'file attributes store/restore on Windows is not '
'supported yet')
return None
else:
md = {
_JSON_KEY_FILE_ATTRIBUTES: {
_JSON_KEY_FILE_ATTRIBUTES_POSIX: {
_JSON_KEY_FILE_ATTRIBUTES_MODE: local_path.mode,
_JSON_KEY_FILE_ATTRIBUTES_UID: local_path.uid,
_JSON_KEY_FILE_ATTRIBUTES_GID: local_path.gid,
}
}
}
return blobxfer.util.merge_dict(metadata, md)
def fileattr_from_metadata(md):
# type: (dict) -> collections.namedtuple
"""Convert fileattr metadata in json metadata
:param dict md: metadata dictionary
:rtype: PosixFileAttr or WindowsFileAttr or None
:return: fileattr metadata
"""
try:
mdattr = json.loads(
md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_FILE_ATTRIBUTES]
except (KeyError, TypeError):
return None
else:
if blobxfer.util.on_windows():
global _FILEATTR_WARNED_ON_WINDOWS
if not _FILEATTR_WARNED_ON_WINDOWS:
_FILEATTR_WARNED_ON_WINDOWS = True
logger.warning(
'file attributes store/restore on Windows is not '
'supported yet')
fileattr = None
else:
try:
fileattr = PosixFileAttr(
mode=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_MODE],
uid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_UID],
gid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_GID],
)
except KeyError:
fileattr = None
return fileattr
def create_vectored_io_next_entry(ase):
# type: (blobxfer.models.azure.StorageEntity) -> str
"""Create Vectored IO next entry id
:param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity
:rtype: str
:return: vectored io next entry
"""
return ';'.join(
(ase.client.primary_endpoint, ase.container, ase.name)
)
def explode_vectored_io_next_entry(entry):
# type: (str, int) -> str
"""Explode next vectored io entry
:param str entry: next entry
:rtype: VectoredNextEntry
:return: vectored next entry
"""
tmp = entry.split(';')
_sa = tmp[0].split('.')
return VectoredNextEntry(
storage_account_name=_sa[0],
endpoint='.'.join(_sa[2:]),
container=tmp[1],
name=tmp[2],
)
def remove_vectored_io_slice_suffix_from_name(name, slice):
# type: (str, int) -> str
"""Remove vectored io (stripe) slice suffix from a given name
:param str name: entity name
:param int slice: slice num
:rtype: str
:return: name without suffix
"""
suffix = '.bxslice-{}'.format(slice)
if name.endswith(suffix):
return name[:-len(suffix)]
else:
return name
def generate_vectored_io_stripe_metadata(local_path, metadata):
# type: (blobxfer.models.upload.LocalPath, dict) -> dict
"""Generate vectored io stripe metadata dict
:param blobxfer.models.upload.LocalPath local_path: local path
:param dict metadata: existing metadata dict
:rtype: dict
:return: merged metadata dictionary
"""
md = {
_JSON_KEY_VECTORED_IO: {
_JSON_KEY_VECTORED_IO_MODE: _JSON_KEY_VECTORED_IO_STRIPE,
_JSON_KEY_VECTORED_IO_STRIPE: {
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE: local_path.total_size,
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START:
local_path.view.fd_start,
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES:
local_path.view.total_slices,
_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID:
local_path.view.slice_num,
_JSON_KEY_VECTORED_IO_STRIPE_NEXT: local_path.view.next,
}
}
}
return blobxfer.util.merge_dict(metadata, md)
def vectored_io_from_metadata(md):
# type: (dict) -> collections.namedtuple
"""Convert vectored io metadata in json metadata
:param dict md: metadata dictionary
:rtype: VectoredStripe or None
:return: vectored io metadata
"""
try:
mdattr = json.loads(
md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_VECTORED_IO]
except (KeyError, TypeError):
pass
else:
if mdattr[_JSON_KEY_VECTORED_IO_MODE] == _JSON_KEY_VECTORED_IO_STRIPE:
mdstripe = mdattr[_JSON_KEY_VECTORED_IO_STRIPE]
try:
nextptr = explode_vectored_io_next_entry(
mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_NEXT])
except (KeyError, AttributeError):
nextptr = None
vio = VectoredStripe(
total_size=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE],
offset_start=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START],
total_slices=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES],
slice_id=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID],
next=nextptr,
)
return vio
else:
raise RuntimeError('Cannot handle Vectored IO mode: {}'.format(
mdattr[_JSON_KEY_VECTORED_IO_MODE]))
return None
| # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# stdlib imports
import collections
import json
import logging
# non-stdlib imports
# local imports
import blobxfer.util
# create logger
logger = logging.getLogger(__name__)
# global defines
JSON_KEY_BLOBXFER_METADATA = 'blobxfer_metadata'
# file attributes
_JSON_KEY_FILE_ATTRIBUTES = 'FileAttributes'
_JSON_KEY_FILE_ATTRIBUTES_POSIX = 'POSIX'
_JSON_KEY_FILE_ATTRIBUTES_WINDOWS = 'Windows'
_JSON_KEY_FILE_ATTRIBUTES_MODE = 'mode'
_JSON_KEY_FILE_ATTRIBUTES_UID = 'uid'
_JSON_KEY_FILE_ATTRIBUTES_GID = 'gid'
# vectored io
_JSON_KEY_VECTORED_IO = 'VectoredIO'
_JSON_KEY_VECTORED_IO_MODE = 'Mode'
_JSON_KEY_VECTORED_IO_STRIPE = 'Stripe'
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE = 'TotalSize'
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START = 'OffsetStart'
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES = 'TotalSlices'
_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID = 'SliceId'
_JSON_KEY_VECTORED_IO_STRIPE_NEXT = 'Next'
# named tuples
PosixFileAttr = collections.namedtuple(
'PosixFileAttr', [
'gid',
'mode',
'uid',
]
)
WindowsFileAttr = collections.namedtuple(
'WindowsFileAttr', [
]
)
VectoredStripe = collections.namedtuple(
'VectoredStripe', [
'next',
'offset_start',
'slice_id',
'total_size',
'total_slices',
]
)
VectoredNextEntry = collections.namedtuple(
'VectoredNextEntry', [
'storage_account_name',
'endpoint',
'container',
'name',
]
)
_FILEATTR_WARNED_ON_WINDOWS = False
def get_md5_from_metadata(ase):
# type: (blobxfer.models.azure.StorageEntity) -> str
"""Get MD5 from properties or metadata
:param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity
:rtype: str or None
:return: md5
"""
# if encryption metadata is present, check for pre-encryption
# md5 in blobxfer extensions
md5 = None
if ase.is_encrypted:
try:
md5 = ase.encryption_metadata.blobxfer_extensions.\
pre_encrypted_content_md5
except AttributeError:
# this can happen if partial metadata is present
md5 = None
if blobxfer.util.is_none_or_empty(md5):
md5 = ase.md5
return md5
def generate_fileattr_metadata(local_path, metadata):
# type: (blobxfer.models.upload.LocalPath, dict) -> dict
"""Generate file attribute metadata dict
:param blobxfer.models.upload.LocalPath local_path: local path
:param dict metadata: existing metadata dict
:rtype: dict
:return: merged metadata dictionary
"""
if blobxfer.util.on_windows():
global _FILEATTR_WARNED_ON_WINDOWS
if not _FILEATTR_WARNED_ON_WINDOWS:
_FILEATTR_WARNED_ON_WINDOWS = True
logger.warning(
'file attributes store/restore on Windows is not '
'supported yet')
return None
else:
md = {
_JSON_KEY_FILE_ATTRIBUTES: {
_JSON_KEY_FILE_ATTRIBUTES_POSIX: {
_JSON_KEY_FILE_ATTRIBUTES_MODE: local_path.mode,
_JSON_KEY_FILE_ATTRIBUTES_UID: local_path.uid,
_JSON_KEY_FILE_ATTRIBUTES_GID: local_path.gid,
}
}
}
return blobxfer.util.merge_dict(metadata, md)
def fileattr_from_metadata(md):
# type: (dict) -> collections.namedtuple
"""Convert fileattr metadata in json metadata
:param dict md: metadata dictionary
:rtype: PosixFileAttr or WindowsFileAttr or None
:return: fileattr metadata
"""
try:
mdattr = json.loads(
md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_FILE_ATTRIBUTES]
except (KeyError, TypeError):
return None
else:
if blobxfer.util.on_windows():
global _FILEATTR_WARNED_ON_WINDOWS
if not _FILEATTR_WARNED_ON_WINDOWS:
_FILEATTR_WARNED_ON_WINDOWS = True
logger.warning(
'file attributes store/restore on Windows is not '
'supported yet')
fileattr = None
else:
try:
fileattr = PosixFileAttr(
mode=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_MODE],
uid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_UID],
gid=mdattr[_JSON_KEY_FILE_ATTRIBUTES_POSIX][
_JSON_KEY_FILE_ATTRIBUTES_GID],
)
except KeyError:
fileattr = None
return fileattr
def create_vectored_io_next_entry(ase):
# type: (blobxfer.models.azure.StorageEntity) -> str
"""Create Vectored IO next entry id
:param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity
:rtype: str
:return: vectored io next entry
"""
return ';'.join(
(ase.client.primary_endpoint, ase.container, ase.name)
)
def explode_vectored_io_next_entry(entry):
# type: (str, int) -> str
"""Explode next vectored io entry
:param str entry: next entry
:rtype: VectoredNextEntry
:return: vectored next entry
"""
tmp = entry.split(';')
_sa = tmp[0].split('.')
return VectoredNextEntry(
storage_account_name=_sa[0],
endpoint='.'.join(_sa[2:]),
container=tmp[1],
name=tmp[2],
)
def remove_vectored_io_slice_suffix_from_name(name, slice):
# type: (str, int) -> str
"""Remove vectored io (stripe) slice suffix from a given name
:param str name: entity name
:param int slice: slice num
:rtype: str
:return: name without suffix
"""
suffix = '.bxslice-{}'.format(slice)
if name.endswith(suffix):
return name[:-len(suffix)]
else:
return name
def generate_vectored_io_stripe_metadata(local_path, metadata):
# type: (blobxfer.models.upload.LocalPath, dict) -> dict
"""Generate vectored io stripe metadata dict
:param blobxfer.models.upload.LocalPath local_path: local path
:param dict metadata: existing metadata dict
:rtype: dict
:return: merged metadata dictionary
"""
md = {
_JSON_KEY_VECTORED_IO: {
_JSON_KEY_VECTORED_IO_MODE: _JSON_KEY_VECTORED_IO_STRIPE,
_JSON_KEY_VECTORED_IO_STRIPE: {
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE: local_path.total_size,
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START:
local_path.view.fd_start,
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES:
local_path.view.total_slices,
_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID:
local_path.view.slice_num,
_JSON_KEY_VECTORED_IO_STRIPE_NEXT: local_path.view.next,
}
}
}
return blobxfer.util.merge_dict(metadata, md)
def vectored_io_from_metadata(md):
# type: (dict) -> collections.namedtuple
"""Convert vectored io metadata in json metadata
:param dict md: metadata dictionary
:rtype: VectoredStripe or None
:return: vectored io metadata
"""
try:
mdattr = json.loads(
md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_VECTORED_IO]
except (KeyError, TypeError):
pass
else:
if mdattr[_JSON_KEY_VECTORED_IO_MODE] == _JSON_KEY_VECTORED_IO_STRIPE:
mdstripe = mdattr[_JSON_KEY_VECTORED_IO_STRIPE]
try:
nextptr = explode_vectored_io_next_entry(
mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_NEXT])
except (KeyError, AttributeError):
nextptr = None
vio = VectoredStripe(
total_size=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE],
offset_start=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START],
total_slices=mdstripe[
_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES],
slice_id=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID],
next=nextptr,
)
return vio
else:
raise RuntimeError('Cannot handle Vectored IO mode: {}'.format(
mdattr[_JSON_KEY_VECTORED_IO_MODE]))
return None
| en | 0.571934 | # Copyright (c) Microsoft Corporation # # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # stdlib imports # non-stdlib imports # local imports # create logger # global defines # file attributes # vectored io # named tuples # type: (blobxfer.models.azure.StorageEntity) -> str Get MD5 from properties or metadata :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity :rtype: str or None :return: md5 # if encryption metadata is present, check for pre-encryption # md5 in blobxfer extensions # this can happen if partial metadata is present # type: (blobxfer.models.upload.LocalPath, dict) -> dict Generate file attribute metadata dict :param blobxfer.models.upload.LocalPath local_path: local path :param dict metadata: existing metadata dict :rtype: dict :return: merged metadata dictionary # type: (dict) -> collections.namedtuple Convert fileattr metadata in json metadata :param dict md: metadata dictionary :rtype: PosixFileAttr or WindowsFileAttr or None :return: fileattr metadata # type: (blobxfer.models.azure.StorageEntity) -> str Create Vectored IO next entry id :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity :rtype: str :return: vectored io next entry # type: (str, int) -> str Explode next vectored io entry :param str entry: next entry :rtype: VectoredNextEntry :return: vectored next entry # type: (str, int) -> str Remove vectored io (stripe) slice suffix from a given name :param str name: entity name :param int slice: slice num :rtype: str :return: name without suffix # type: (blobxfer.models.upload.LocalPath, dict) -> dict Generate vectored io stripe metadata dict :param blobxfer.models.upload.LocalPath local_path: local path :param dict metadata: existing metadata dict :rtype: dict :return: merged metadata dictionary # type: (dict) -> collections.namedtuple Convert vectored io metadata in json metadata :param dict md: metadata dictionary :rtype: VectoredStripe or None :return: vectored io metadata | 1.379315 | 1 |
splatter.py | anana10c/handtracking | 0 | 6631091 | import cv2
import random
# from threading import Timer
class Splatter:
def __init__(self, topleft, bottomright, color=None):
imgnum = str(random.randint(1,8))
self.outline = cv2.imread(str('splatter-'+imgnum+'.png'), -1)
self.outline = cv2.resize(self.outline, (bottomright[0]-topleft[0], bottomright[1]-topleft[1]), interpolation = cv2.INTER_AREA)
cv2.cvtColor(self.outline, cv2.COLOR_BGRA2RGBA) #remember to try to convert frame to RGBA also
if color == None:
self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
else:
self.color = color
self.outline[:, :, 0:3][self.outline[:, :, 3] != 0] = self.color
self.outline[:, :, 0:3][self.outline[:, :, 3] == 0] = (0, 0, 0)
self.opacity = 1
self.topleft = topleft
self.bottomright = bottomright
def fade(self):
#self.outline[self.outline[:, :, 3] >= 4] -= 4
if self.opacity > 0:
self.opacity -= 0.1
if self.opacity < 0:
self.opacity = 0
| import cv2
import random
# from threading import Timer
class Splatter:
def __init__(self, topleft, bottomright, color=None):
imgnum = str(random.randint(1,8))
self.outline = cv2.imread(str('splatter-'+imgnum+'.png'), -1)
self.outline = cv2.resize(self.outline, (bottomright[0]-topleft[0], bottomright[1]-topleft[1]), interpolation = cv2.INTER_AREA)
cv2.cvtColor(self.outline, cv2.COLOR_BGRA2RGBA) #remember to try to convert frame to RGBA also
if color == None:
self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
else:
self.color = color
self.outline[:, :, 0:3][self.outline[:, :, 3] != 0] = self.color
self.outline[:, :, 0:3][self.outline[:, :, 3] == 0] = (0, 0, 0)
self.opacity = 1
self.topleft = topleft
self.bottomright = bottomright
def fade(self):
#self.outline[self.outline[:, :, 3] >= 4] -= 4
if self.opacity > 0:
self.opacity -= 0.1
if self.opacity < 0:
self.opacity = 0
| en | 0.748877 | # from threading import Timer #remember to try to convert frame to RGBA also #self.outline[self.outline[:, :, 3] >= 4] -= 4 | 2.804884 | 3 |
safedelete/tests/test_admin.py | makinacorpus/django-safedelete | 505 | 6631092 | <reponame>makinacorpus/django-safedelete
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.db import models
from django.test import RequestFactory, TestCase
from ..admin import SafeDeleteAdmin, highlight_deleted
from ..config import FIELD_NAME
from ..models import SafeDeleteModel
from .models import Article, Author, Category
class Order(SafeDeleteModel):
articles = models.ManyToManyField(Article)
class CategoryAdmin(SafeDeleteAdmin):
list_display = (highlight_deleted,) + SafeDeleteAdmin.list_display
admin.site.register(Category, CategoryAdmin)
class AdminTestCase(TestCase):
urls = 'safedelete.tests.urls'
def setUp(self):
self.author = Author.objects.create()
self.categories = (
Category.objects.create(name='é'),
Category.objects.create(),
Category.objects.create(),
)
self.articles = (
Article(
author=self.author
),
Article(
author=self.author,
category=self.categories[1]
),
Article(
author=self.author,
category=self.categories[2]
),
)
self.categories[1].delete()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/', {})
self.modeladmin_default = admin.ModelAdmin(Category, AdminSite())
self.modeladmin = CategoryAdmin(Category, AdminSite())
user = User.objects.create_superuser('super', '<EMAIL>', 'secret')
self.client.login(username='super', password='<PASSWORD>')
self.request.user = user
def tearDown(self):
self.client.logout()
def get_changelist(self, request, model, modeladmin):
args = [
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.list_filter,
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable,
modeladmin
]
# New parameter in Django 2.1
if hasattr(modeladmin, 'sortable_by'):
args.append(modeladmin.sortable_by)
return ChangeList(*args)
def test_admin_model(self):
changelist_default = self.get_changelist(self.request, Category, self.modeladmin_default)
changelist = self.get_changelist(self.request, Category, self.modeladmin)
self.assertEqual(changelist.get_filters(self.request)[0][0].title, FIELD_NAME.replace('_', ' '))
self.assertEqual(changelist.queryset.count(), 3)
self.assertEqual(changelist_default.queryset.count(), 2)
def test_highlight_deleted(self):
"""Test deleted objects are in red in admin listing."""
resp = self.client.get('/admin/safedelete/category/')
line = '<span class="deleted">{0}</span>'.format(self.categories[1])
self.assertContains(resp, line)
def test_admin_xss(self):
"""Test whether admin XSS is blocked."""
Category.objects.create(name='<script>alert(42)</script>'),
resp = self.client.get('/admin/safedelete/category/')
# It should be escaped
self.assertNotContains(resp, '<script>alert(42)</script>')
def test_admin_undelete_action(self):
"""Test objects are undeleted and action is logged."""
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'_selected_action': [self.categories[1].pk],
})
self.assertTemplateUsed(resp, 'safedelete/undelete_selected_confirmation.html')
category = Category.all_objects.get(
pk=self.categories[1].pk
)
self.assertTrue(getattr(self.categories[1], FIELD_NAME))
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'post': True,
'_selected_action': [self.categories[1].pk],
})
category = Category.objects.get(
pk=self.categories[1].pk
)
self.assertFalse(getattr(category, FIELD_NAME))
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.db import models
from django.test import RequestFactory, TestCase
from ..admin import SafeDeleteAdmin, highlight_deleted
from ..config import FIELD_NAME
from ..models import SafeDeleteModel
from .models import Article, Author, Category
class Order(SafeDeleteModel):
articles = models.ManyToManyField(Article)
class CategoryAdmin(SafeDeleteAdmin):
list_display = (highlight_deleted,) + SafeDeleteAdmin.list_display
admin.site.register(Category, CategoryAdmin)
class AdminTestCase(TestCase):
urls = 'safedelete.tests.urls'
def setUp(self):
self.author = Author.objects.create()
self.categories = (
Category.objects.create(name='é'),
Category.objects.create(),
Category.objects.create(),
)
self.articles = (
Article(
author=self.author
),
Article(
author=self.author,
category=self.categories[1]
),
Article(
author=self.author,
category=self.categories[2]
),
)
self.categories[1].delete()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/', {})
self.modeladmin_default = admin.ModelAdmin(Category, AdminSite())
self.modeladmin = CategoryAdmin(Category, AdminSite())
user = User.objects.create_superuser('super', '<EMAIL>', 'secret')
self.client.login(username='super', password='<PASSWORD>')
self.request.user = user
def tearDown(self):
self.client.logout()
def get_changelist(self, request, model, modeladmin):
args = [
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.list_filter,
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable,
modeladmin
]
# New parameter in Django 2.1
if hasattr(modeladmin, 'sortable_by'):
args.append(modeladmin.sortable_by)
return ChangeList(*args)
def test_admin_model(self):
changelist_default = self.get_changelist(self.request, Category, self.modeladmin_default)
changelist = self.get_changelist(self.request, Category, self.modeladmin)
self.assertEqual(changelist.get_filters(self.request)[0][0].title, FIELD_NAME.replace('_', ' '))
self.assertEqual(changelist.queryset.count(), 3)
self.assertEqual(changelist_default.queryset.count(), 2)
def test_highlight_deleted(self):
"""Test deleted objects are in red in admin listing."""
resp = self.client.get('/admin/safedelete/category/')
line = '<span class="deleted">{0}</span>'.format(self.categories[1])
self.assertContains(resp, line)
def test_admin_xss(self):
"""Test whether admin XSS is blocked."""
Category.objects.create(name='<script>alert(42)</script>'),
resp = self.client.get('/admin/safedelete/category/')
# It should be escaped
self.assertNotContains(resp, '<script>alert(42)</script>')
def test_admin_undelete_action(self):
"""Test objects are undeleted and action is logged."""
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'_selected_action': [self.categories[1].pk],
})
self.assertTemplateUsed(resp, 'safedelete/undelete_selected_confirmation.html')
category = Category.all_objects.get(
pk=self.categories[1].pk
)
self.assertTrue(getattr(self.categories[1], FIELD_NAME))
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'post': True,
'_selected_action': [self.categories[1].pk],
})
category = Category.objects.get(
pk=self.categories[1].pk
)
self.assertFalse(getattr(category, FIELD_NAME)) | en | 0.915921 | # -*- coding: utf-8 -*- # New parameter in Django 2.1 Test deleted objects are in red in admin listing. Test whether admin XSS is blocked. # It should be escaped Test objects are undeleted and action is logged. | 2.015605 | 2 |
addons/hr_payroll_community/models/res_config_settings.py | gleis44/stellwerk | 0 | 6631093 | # -*- coding: utf-8 -*-
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
module_account_accountant = fields.Boolean(string='Account Accountant')
module_l10n_fr_hr_payroll = fields.Boolean(string='French Payroll')
module_l10n_be_hr_payroll = fields.Boolean(string='Belgium Payroll')
module_l10n_in_hr_payroll = fields.Boolean(string='Indian Payroll')
| # -*- coding: utf-8 -*-
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
module_account_accountant = fields.Boolean(string='Account Accountant')
module_l10n_fr_hr_payroll = fields.Boolean(string='French Payroll')
module_l10n_be_hr_payroll = fields.Boolean(string='Belgium Payroll')
module_l10n_in_hr_payroll = fields.Boolean(string='Indian Payroll')
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.818124 | 2 |
src/three_pl/three_pl_utils.py | eribean/girth | 43 | 6631094 | <reponame>eribean/girth
import numpy as np
def _compute_partial_integral_3pl(theta, difficulty, discrimination, guessing, the_sign):
"""
Computes the partial integral for a set of item parameters
Args:
theta: (array) evaluation points
difficulty: (array) set of difficulty parameters
discrimination: (array | number) set of discrimination parameters
guessing: (array) set of guessing parameters
the_sign: (array) positive or negative sign
associated with response vector
Returns:
partial_integral: (2d array)
integration of items defined by "sign" parameters
axis 0: individual persons
axis 1: evaluation points (at theta)
Notes:
Implicitly multiplies the data by the gaussian distribution
"""
# This represents a 3-dimensional array
# [Response Set, Person, Theta]
# The integration happens over response set and the result is an
# array of [Person, Theta]
kernel = the_sign[:, :, None] * np.ones((1, 1, theta.size))
kernel *= discrimination[:, None, None]
kernel *= (theta[None, None, :] - difficulty[:, None, None])
otpt = (1.0 / (1.0 + np.exp(kernel))) * (1 - guessing[:, None, None])
otpt += 0.5 * (1 - the_sign[:, :, None]) * guessing[:, None, None]
return otpt.prod(axis=0).squeeze() | import numpy as np
def _compute_partial_integral_3pl(theta, difficulty, discrimination, guessing, the_sign):
"""
Computes the partial integral for a set of item parameters
Args:
theta: (array) evaluation points
difficulty: (array) set of difficulty parameters
discrimination: (array | number) set of discrimination parameters
guessing: (array) set of guessing parameters
the_sign: (array) positive or negative sign
associated with response vector
Returns:
partial_integral: (2d array)
integration of items defined by "sign" parameters
axis 0: individual persons
axis 1: evaluation points (at theta)
Notes:
Implicitly multiplies the data by the gaussian distribution
"""
# This represents a 3-dimensional array
# [Response Set, Person, Theta]
# The integration happens over response set and the result is an
# array of [Person, Theta]
kernel = the_sign[:, :, None] * np.ones((1, 1, theta.size))
kernel *= discrimination[:, None, None]
kernel *= (theta[None, None, :] - difficulty[:, None, None])
otpt = (1.0 / (1.0 + np.exp(kernel))) * (1 - guessing[:, None, None])
otpt += 0.5 * (1 - the_sign[:, :, None]) * guessing[:, None, None]
return otpt.prod(axis=0).squeeze() | en | 0.67136 | Computes the partial integral for a set of item parameters Args: theta: (array) evaluation points difficulty: (array) set of difficulty parameters discrimination: (array | number) set of discrimination parameters guessing: (array) set of guessing parameters the_sign: (array) positive or negative sign associated with response vector Returns: partial_integral: (2d array) integration of items defined by "sign" parameters axis 0: individual persons axis 1: evaluation points (at theta) Notes: Implicitly multiplies the data by the gaussian distribution # This represents a 3-dimensional array # [Response Set, Person, Theta] # The integration happens over response set and the result is an # array of [Person, Theta] | 3.627983 | 4 |
setup.py | google/weather-tools | 66 | 6631095 | <filename>setup.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
weather_dl_requirements = [
"cdsapi",
"ecmwf-api-client",
"apache-beam[gcp]",
"numpy>=1.19.1",
"pandas",
"xarray",
"requests>=2.24.0",
"firebase-admin>=5.0.0",
"google-cloud-datastore>=1.15.0,<2", # For compatability with apache-beam[gcp]
"google-cloud-firestore",
"urllib3==1.25.11",
"pyparsing==2.4.2", # Fix http2lib auth breakage
]
weather_mv_requirements = [
"apache-beam[gcp]",
"dataclasses",
"numpy",
"pandas",
"xarray",
"google-cloud-bigquery",
"google-cloud-storage==2.2.1",
"pyparsing==2.4.2", # Fix http2lib auth breakage
"cfgrib",
"netcdf4",
"geojson",
]
weather_sp_requirements = [
"apache-beam[gcp]",
"numpy>=1.20.3",
"pygrib",
"xarray",
"scipy",
]
test_requirements = [
"pytype==2021.11.29",
"flake8",
"pytest",
"pytest-subtests",
"netcdf4",
"numpy",
"xarray",
"xarray-beam",
"absl-py",
]
all_test_requirements = weather_dl_requirements + weather_mv_requirements + weather_sp_requirements + test_requirements
setup(
name='google-weather-tools',
packages=find_packages(),
author='Anthromets',
author_email='<EMAIL>',
url='https://weather-tools.readthedocs.io/',
description='Apache Beam pipelines to make weather data accessible and useful.',
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type='text/markdown',
platforms=['darwin', 'linux'],
license='License :: OSI Approved :: Apache Software License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
# 'Operating System :: Microsoft :: Windows', # TODO(#64): Fully support Windows.
'Operating System :: POSIX',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
# Apache Beam's Python SDK only supports up to 3.8
python_requires='>=3.7, <3.9',
install_requires=['apache-beam[gcp]'],
use_scm_version=True,
setup_requires=['setuptools_scm'],
scripts=['weather_dl/weather-dl', 'weather_dl/download-status',
'weather_mv/weather-mv', 'weather_sp/weather-sp'],
tests_require=test_requirements,
extras_require={
'dev': ['tox', 'sphinx>=2.1', 'myst-parser'] + all_test_requirements,
'test': all_test_requirements,
},
project_urls={
'Issue Tracking': 'http://github.com/google/weather-tools/issues',
},
)
| <filename>setup.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
weather_dl_requirements = [
"cdsapi",
"ecmwf-api-client",
"apache-beam[gcp]",
"numpy>=1.19.1",
"pandas",
"xarray",
"requests>=2.24.0",
"firebase-admin>=5.0.0",
"google-cloud-datastore>=1.15.0,<2", # For compatability with apache-beam[gcp]
"google-cloud-firestore",
"urllib3==1.25.11",
"pyparsing==2.4.2", # Fix http2lib auth breakage
]
weather_mv_requirements = [
"apache-beam[gcp]",
"dataclasses",
"numpy",
"pandas",
"xarray",
"google-cloud-bigquery",
"google-cloud-storage==2.2.1",
"pyparsing==2.4.2", # Fix http2lib auth breakage
"cfgrib",
"netcdf4",
"geojson",
]
weather_sp_requirements = [
"apache-beam[gcp]",
"numpy>=1.20.3",
"pygrib",
"xarray",
"scipy",
]
test_requirements = [
"pytype==2021.11.29",
"flake8",
"pytest",
"pytest-subtests",
"netcdf4",
"numpy",
"xarray",
"xarray-beam",
"absl-py",
]
all_test_requirements = weather_dl_requirements + weather_mv_requirements + weather_sp_requirements + test_requirements
setup(
name='google-weather-tools',
packages=find_packages(),
author='Anthromets',
author_email='<EMAIL>',
url='https://weather-tools.readthedocs.io/',
description='Apache Beam pipelines to make weather data accessible and useful.',
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type='text/markdown',
platforms=['darwin', 'linux'],
license='License :: OSI Approved :: Apache Software License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
# 'Operating System :: Microsoft :: Windows', # TODO(#64): Fully support Windows.
'Operating System :: POSIX',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
# Apache Beam's Python SDK only supports up to 3.8
python_requires='>=3.7, <3.9',
install_requires=['apache-beam[gcp]'],
use_scm_version=True,
setup_requires=['setuptools_scm'],
scripts=['weather_dl/weather-dl', 'weather_dl/download-status',
'weather_mv/weather-mv', 'weather_sp/weather-sp'],
tests_require=test_requirements,
extras_require={
'dev': ['tox', 'sphinx>=2.1', 'myst-parser'] + all_test_requirements,
'test': all_test_requirements,
},
project_urls={
'Issue Tracking': 'http://github.com/google/weather-tools/issues',
},
)
| en | 0.807871 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # For compatability with apache-beam[gcp] # Fix http2lib auth breakage # Fix http2lib auth breakage # 'Operating System :: Microsoft :: Windows', # TODO(#64): Fully support Windows. # Apache Beam's Python SDK only supports up to 3.8 | 1.193037 | 1 |
lesson4/pract2.py | anhellina/lessons | 0 | 6631096 | """
Написать программу, которая выведет на экран все числа от 1 до 100 которые кратные n (n вводится с клавиатуры).
"""
n = int(input("Enter number"))
for x in range(1,101):
if x% n == 0:
print(x)
| """
Написать программу, которая выведет на экран все числа от 1 до 100 которые кратные n (n вводится с клавиатуры).
"""
n = int(input("Enter number"))
for x in range(1,101):
if x% n == 0:
print(x)
| ru | 0.997745 | Написать программу, которая выведет на экран все числа от 1 до 100 которые кратные n (n вводится с клавиатуры). | 3.777406 | 4 |
Audio/StarGAN-VC/utils/preprocess.py | LiuHaolan/models | 43 | 6631097 | <filename>Audio/StarGAN-VC/utils/preprocess.py
import os
import glob
import shutil
import argparse
import zipfile
from datetime import datetime
import librosa
import numpy as np
import pyworld
FEATURE_DIM = 36
SAMPLE_RATE = 16000
FRAMES = 512
FFTSIZE = 1024
CHUNK_SIZE = 1
EPSILON = 1e-10
MODEL_NAME = "starganvc_model"
def unzip(zip_filepath, dest_dir="./data"):
with zipfile.ZipFile(zip_filepath) as zf:
zf.extractall(dest_dir)
print("Extraction complete!")
def copy_files(source_dir, target_dir, file_dir_name_list):
for file_dir_name in file_dir_name_list:
if os.path.exists(os.path.join(target_dir, file_dir_name)):
continue
shutil.copytree(
os.path.join(source_dir, file_dir_name),
os.path.join(target_dir, file_dir_name),
)
def load_wavs(dataset: str, sr):
"""
data dict contains all audios file path &
resdict contains all wav files
"""
data = {}
with os.scandir(dataset) as it:
for entry in it:
if entry.is_dir():
data[entry.name] = []
with os.scandir(entry.path) as it_f:
for onefile in it_f:
if onefile.is_file():
data[entry.name].append(onefile.path)
print(f"loaded keys: {data.keys()}")
resdict = {}
cnt = 0
for key, value in data.items():
resdict[key] = {}
for one_file in value:
filename = one_file.split("/")[-1].split(".")[0]
newkey = f"{filename}"
wav, _ = librosa.load(one_file, sr=sr, mono=True, dtype=np.float64)
y, _ = librosa.effects.trim(wav, top_db=15)
wav = np.append(y[0], y[1:] - 0.97 * y[:-1])
resdict[key][newkey] = wav
print(".", end="")
cnt += 1
print(f"\nTotal {cnt} aduio files!")
return resdict
def chunks(iterable, size):
"""Yield successive n-sized chunks from iterable."""
for i in range(0, len(iterable), size):
yield iterable[i : i + size]
def wav_to_mcep_file(
dataset: str, sr=SAMPLE_RATE, processed_filepath: str = "./data/processed"
):
"""convert wavs to mcep feature using image repr"""
shutil.rmtree(processed_filepath)
os.makedirs(processed_filepath, exist_ok=True)
allwavs_cnt = len(glob.glob(f"{dataset}/*/*.wav"))
print(f"Total {allwavs_cnt} audio files!")
d = load_wavs(dataset, sr)
for one_speaker in d.keys():
values_of_one_speaker = list(d[one_speaker].values())
for index, one_chunk in enumerate(chunks(values_of_one_speaker, CHUNK_SIZE)):
wav_concated = []
temp = one_chunk.copy()
# concate wavs
for one in temp:
wav_concated.extend(one)
wav_concated = np.array(wav_concated)
# process one batch of wavs
f0, ap, sp, coded_sp = cal_mcep(wav_concated, sr=sr, dim=FEATURE_DIM)
newname = f"{one_speaker}_{index}"
file_path_z = os.path.join(processed_filepath, newname)
np.savez(file_path_z, f0=f0, coded_sp=coded_sp)
print(f"[save]: {file_path_z}")
# split mcep t0 muliti files
for start_idx in range(0, coded_sp.shape[1] - FRAMES + 1, FRAMES):
one_audio_seg = coded_sp[:, start_idx : start_idx + FRAMES]
if one_audio_seg.shape[1] == FRAMES:
temp_name = f"{newname}_{start_idx}"
filePath = os.path.join(processed_filepath, temp_name)
np.save(filePath, one_audio_seg)
print(f"[save]: {filePath}.npy")
def world_features(wav, sr, fft_size, dim):
f0, timeaxis = pyworld.harvest(wav, sr)
sp = pyworld.cheaptrick(wav, f0, timeaxis, sr, fft_size=fft_size)
ap = pyworld.d4c(wav, f0, timeaxis, sr, fft_size=fft_size)
coded_sp = pyworld.code_spectral_envelope(sp, sr, dim)
return f0, timeaxis, sp, ap, coded_sp
def cal_mcep(wav, sr=SAMPLE_RATE, dim=FEATURE_DIM, fft_size=FFTSIZE):
"""cal mcep given wav singnal
the frame_period used only for pad_wav_to_get_fixed_frames
"""
f0, timeaxis, sp, ap, coded_sp = world_features(wav, sr, fft_size, dim)
coded_sp = coded_sp.T # dim x n
return f0, ap, sp, coded_sp
if __name__ == "__main__":
start = datetime.now()
parser = argparse.ArgumentParser(
description="Convert the wav waveform to mel-cepstral coefficients(MCCs)\
and calculate the speech statistical characteristics"
)
parser.add_argument(
"--data_files",
type=list,
help="original datasets",
default=["vcc2016_training.zip", "evaluation_all.zip"],
)
parser.add_argument(
"--train_dir", type=str, help="trainset directory", default="./data/speakers"
)
parser.add_argument(
"--test_dir", type=str, help="testset directory", default="./data/speakers_test"
)
parser.add_argument(
"--output_dir",
type=str,
help="the directory stores the processed data",
default="./data/processed",
)
parser.add_argument(
"--speaker_ids",
type=list,
default=["SF1", "SF2", "TM1", "TM2"],
help="Source speaker id from VCC2016.",
)
argv = parser.parse_args()
data_files = argv.data_files
train_dir = argv.train_dir
test_dir = argv.test_dir
output_dir = argv.output_dir
speaker_ids = argv.speaker_ids
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
unzip(data_files[0])
unzip(data_files[1])
copy_files("./data/vcc2016_training", train_dir, speaker_ids)
copy_files("./data/evaluation_all", test_dir, speaker_ids)
wav_to_mcep_file(train_dir, SAMPLE_RATE, processed_filepath=output_dir)
# input_dir is train dataset. we need to calculate and save the speech statistical characteristics for each speaker.
from utility import *
generator = GenerateStatistics(output_dir)
generator.generate_stats()
generator.normalize_dataset()
end = datetime.now()
print(f"[Runing Time]: {end-start}")
| <filename>Audio/StarGAN-VC/utils/preprocess.py
import os
import glob
import shutil
import argparse
import zipfile
from datetime import datetime
import librosa
import numpy as np
import pyworld
FEATURE_DIM = 36
SAMPLE_RATE = 16000
FRAMES = 512
FFTSIZE = 1024
CHUNK_SIZE = 1
EPSILON = 1e-10
MODEL_NAME = "starganvc_model"
def unzip(zip_filepath, dest_dir="./data"):
with zipfile.ZipFile(zip_filepath) as zf:
zf.extractall(dest_dir)
print("Extraction complete!")
def copy_files(source_dir, target_dir, file_dir_name_list):
for file_dir_name in file_dir_name_list:
if os.path.exists(os.path.join(target_dir, file_dir_name)):
continue
shutil.copytree(
os.path.join(source_dir, file_dir_name),
os.path.join(target_dir, file_dir_name),
)
def load_wavs(dataset: str, sr):
"""
data dict contains all audios file path &
resdict contains all wav files
"""
data = {}
with os.scandir(dataset) as it:
for entry in it:
if entry.is_dir():
data[entry.name] = []
with os.scandir(entry.path) as it_f:
for onefile in it_f:
if onefile.is_file():
data[entry.name].append(onefile.path)
print(f"loaded keys: {data.keys()}")
resdict = {}
cnt = 0
for key, value in data.items():
resdict[key] = {}
for one_file in value:
filename = one_file.split("/")[-1].split(".")[0]
newkey = f"{filename}"
wav, _ = librosa.load(one_file, sr=sr, mono=True, dtype=np.float64)
y, _ = librosa.effects.trim(wav, top_db=15)
wav = np.append(y[0], y[1:] - 0.97 * y[:-1])
resdict[key][newkey] = wav
print(".", end="")
cnt += 1
print(f"\nTotal {cnt} aduio files!")
return resdict
def chunks(iterable, size):
"""Yield successive n-sized chunks from iterable."""
for i in range(0, len(iterable), size):
yield iterable[i : i + size]
def wav_to_mcep_file(
dataset: str, sr=SAMPLE_RATE, processed_filepath: str = "./data/processed"
):
"""convert wavs to mcep feature using image repr"""
shutil.rmtree(processed_filepath)
os.makedirs(processed_filepath, exist_ok=True)
allwavs_cnt = len(glob.glob(f"{dataset}/*/*.wav"))
print(f"Total {allwavs_cnt} audio files!")
d = load_wavs(dataset, sr)
for one_speaker in d.keys():
values_of_one_speaker = list(d[one_speaker].values())
for index, one_chunk in enumerate(chunks(values_of_one_speaker, CHUNK_SIZE)):
wav_concated = []
temp = one_chunk.copy()
# concate wavs
for one in temp:
wav_concated.extend(one)
wav_concated = np.array(wav_concated)
# process one batch of wavs
f0, ap, sp, coded_sp = cal_mcep(wav_concated, sr=sr, dim=FEATURE_DIM)
newname = f"{one_speaker}_{index}"
file_path_z = os.path.join(processed_filepath, newname)
np.savez(file_path_z, f0=f0, coded_sp=coded_sp)
print(f"[save]: {file_path_z}")
# split mcep t0 muliti files
for start_idx in range(0, coded_sp.shape[1] - FRAMES + 1, FRAMES):
one_audio_seg = coded_sp[:, start_idx : start_idx + FRAMES]
if one_audio_seg.shape[1] == FRAMES:
temp_name = f"{newname}_{start_idx}"
filePath = os.path.join(processed_filepath, temp_name)
np.save(filePath, one_audio_seg)
print(f"[save]: {filePath}.npy")
def world_features(wav, sr, fft_size, dim):
f0, timeaxis = pyworld.harvest(wav, sr)
sp = pyworld.cheaptrick(wav, f0, timeaxis, sr, fft_size=fft_size)
ap = pyworld.d4c(wav, f0, timeaxis, sr, fft_size=fft_size)
coded_sp = pyworld.code_spectral_envelope(sp, sr, dim)
return f0, timeaxis, sp, ap, coded_sp
def cal_mcep(wav, sr=SAMPLE_RATE, dim=FEATURE_DIM, fft_size=FFTSIZE):
"""cal mcep given wav singnal
the frame_period used only for pad_wav_to_get_fixed_frames
"""
f0, timeaxis, sp, ap, coded_sp = world_features(wav, sr, fft_size, dim)
coded_sp = coded_sp.T # dim x n
return f0, ap, sp, coded_sp
if __name__ == "__main__":
start = datetime.now()
parser = argparse.ArgumentParser(
description="Convert the wav waveform to mel-cepstral coefficients(MCCs)\
and calculate the speech statistical characteristics"
)
parser.add_argument(
"--data_files",
type=list,
help="original datasets",
default=["vcc2016_training.zip", "evaluation_all.zip"],
)
parser.add_argument(
"--train_dir", type=str, help="trainset directory", default="./data/speakers"
)
parser.add_argument(
"--test_dir", type=str, help="testset directory", default="./data/speakers_test"
)
parser.add_argument(
"--output_dir",
type=str,
help="the directory stores the processed data",
default="./data/processed",
)
parser.add_argument(
"--speaker_ids",
type=list,
default=["SF1", "SF2", "TM1", "TM2"],
help="Source speaker id from VCC2016.",
)
argv = parser.parse_args()
data_files = argv.data_files
train_dir = argv.train_dir
test_dir = argv.test_dir
output_dir = argv.output_dir
speaker_ids = argv.speaker_ids
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
unzip(data_files[0])
unzip(data_files[1])
copy_files("./data/vcc2016_training", train_dir, speaker_ids)
copy_files("./data/evaluation_all", test_dir, speaker_ids)
wav_to_mcep_file(train_dir, SAMPLE_RATE, processed_filepath=output_dir)
# input_dir is train dataset. we need to calculate and save the speech statistical characteristics for each speaker.
from utility import *
generator = GenerateStatistics(output_dir)
generator.generate_stats()
generator.normalize_dataset()
end = datetime.now()
print(f"[Runing Time]: {end-start}")
| en | 0.733613 | data dict contains all audios file path & resdict contains all wav files Yield successive n-sized chunks from iterable. convert wavs to mcep feature using image repr # concate wavs # process one batch of wavs # split mcep t0 muliti files cal mcep given wav singnal the frame_period used only for pad_wav_to_get_fixed_frames # dim x n # input_dir is train dataset. we need to calculate and save the speech statistical characteristics for each speaker. | 2.360999 | 2 |
nyuki/workflow/db/metadata.py | surycat/nyuki-legacy | 8 | 6631098 | <reponame>surycat/nyuki-legacy
import logging
from pymongo import ReturnDocument
from .utils.indexes import check_index_names
log = logging.getLogger(__name__)
class MetadataCollection:
"""
{
"workflow_template_id": <uuid4>,
"title": <str>,
"tags": [<str>]
}
"""
def __init__(self, db):
self._metadata = db['workflow_metadata']
async def index(self):
await check_index_names(self._metadata, ['unique_workflow_template_id'])
await self._metadata.create_index(
'workflow_template_id',
unique=True,
name='unique_workflow_template_id',
)
async def get_one(self, tid):
"""
Return metadata for one template.
"""
return await self._metadata.find_one(
{'workflow_template_id': tid},
{'_id': 0, 'workflow_template_id': 0},
)
async def insert(self, metadata):
"""
Insert new metadata for a template.
"""
await self._metadata.insert_one(metadata)
del metadata['_id']
return metadata
async def update(self, tid, metadata):
"""
Update and return the updated metadata.
"""
return await self._metadata.find_one_and_update(
{'workflow_template_id': tid},
{'$set': {
key: value
for key, value in metadata.items()
if value is not None
}},
projection={'_id': 0},
return_document=ReturnDocument.AFTER,
)
async def delete(self, tid):
"""
Delete metadata for one template.
"""
await self._metadata.delete_one({'workflow_template_id': tid})
| import logging
from pymongo import ReturnDocument
from .utils.indexes import check_index_names
log = logging.getLogger(__name__)
class MetadataCollection:
"""
{
"workflow_template_id": <uuid4>,
"title": <str>,
"tags": [<str>]
}
"""
def __init__(self, db):
self._metadata = db['workflow_metadata']
async def index(self):
await check_index_names(self._metadata, ['unique_workflow_template_id'])
await self._metadata.create_index(
'workflow_template_id',
unique=True,
name='unique_workflow_template_id',
)
async def get_one(self, tid):
"""
Return metadata for one template.
"""
return await self._metadata.find_one(
{'workflow_template_id': tid},
{'_id': 0, 'workflow_template_id': 0},
)
async def insert(self, metadata):
"""
Insert new metadata for a template.
"""
await self._metadata.insert_one(metadata)
del metadata['_id']
return metadata
async def update(self, tid, metadata):
"""
Update and return the updated metadata.
"""
return await self._metadata.find_one_and_update(
{'workflow_template_id': tid},
{'$set': {
key: value
for key, value in metadata.items()
if value is not None
}},
projection={'_id': 0},
return_document=ReturnDocument.AFTER,
)
async def delete(self, tid):
"""
Delete metadata for one template.
"""
await self._metadata.delete_one({'workflow_template_id': tid}) | en | 0.134314 | { "workflow_template_id": <uuid4>, "title": <str>, "tags": [<str>] } Return metadata for one template. Insert new metadata for a template. Update and return the updated metadata. Delete metadata for one template. | 2.431939 | 2 |
codes_/0101_Symmetric_Tree.py | SaitoTsutomu/leetcode | 0 | 6631099 | <gh_stars>0
# %% [101. Symmetric Tree](https://leetcode.com/problems/symmetric-tree/)
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return isSymmetric(root, root)
def isSymmetric(left, right):
if left and right:
return (
left.val == right.val
and isSymmetric(left.left, right.right)
and isSymmetric(left.right, right.left)
)
return left is right
| # %% [101. Symmetric Tree](https://leetcode.com/problems/symmetric-tree/)
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return isSymmetric(root, root)
def isSymmetric(left, right):
if left and right:
return (
left.val == right.val
and isSymmetric(left.left, right.right)
and isSymmetric(left.right, right.left)
)
return left is right | en | 0.707448 | # %% [101. Symmetric Tree](https://leetcode.com/problems/symmetric-tree/) | 3.775927 | 4 |
test/functional/s3api/__init__.py | fossabot/swift-1 | 1 | 6631100 | <gh_stars>1-10
# Copyright (c) 2011-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import traceback
import test.functional as tf
from test.functional.s3api.s3_test_client import (
Connection, get_boto3_conn, tear_down_s3)
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class S3ApiBase(unittest.TestCase):
def __init__(self, method_name):
super(S3ApiBase, self).__init__(method_name)
self.method_name = method_name
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = Connection()
self.conn.reset()
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def assertCommonResponseHeaders(self, headers, etag=None):
"""
asserting common response headers with args
:param headers: a dict of response headers
:param etag: a string of md5(content).hexdigest() if not given,
this won't assert anything about etag. (e.g. DELETE obj)
"""
self.assertTrue(headers['x-amz-id-2'] is not None)
self.assertTrue(headers['x-amz-request-id'] is not None)
self.assertTrue(headers['date'] is not None)
# TODO; requires consideration
# self.assertTrue(headers['server'] is not None)
if etag is not None:
self.assertTrue('etag' in headers) # sanity
self.assertEqual(etag, headers['etag'].strip('"'))
class S3ApiBaseBoto3(S3ApiBase):
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = get_boto3_conn()
self.endpoint_url = self.conn._endpoint.host
self.access_key = self.conn._request_signer._credentials.access_key
self.region = self.conn._client_config.region_name
tear_down_s3(self.conn)
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def tearDown(self):
tear_down_s3(self.conn)
| # Copyright (c) 2011-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import traceback
import test.functional as tf
from test.functional.s3api.s3_test_client import (
Connection, get_boto3_conn, tear_down_s3)
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class S3ApiBase(unittest.TestCase):
def __init__(self, method_name):
super(S3ApiBase, self).__init__(method_name)
self.method_name = method_name
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = Connection()
self.conn.reset()
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def assertCommonResponseHeaders(self, headers, etag=None):
"""
asserting common response headers with args
:param headers: a dict of response headers
:param etag: a string of md5(content).hexdigest() if not given,
this won't assert anything about etag. (e.g. DELETE obj)
"""
self.assertTrue(headers['x-amz-id-2'] is not None)
self.assertTrue(headers['x-amz-request-id'] is not None)
self.assertTrue(headers['date'] is not None)
# TODO; requires consideration
# self.assertTrue(headers['server'] is not None)
if etag is not None:
self.assertTrue('etag' in headers) # sanity
self.assertEqual(etag, headers['etag'].strip('"'))
class S3ApiBaseBoto3(S3ApiBase):
def setUp(self):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
self.conn = get_boto3_conn()
self.endpoint_url = self.conn._endpoint.host
self.access_key = self.conn._request_signer._credentials.access_key
self.region = self.conn._client_config.region_name
tear_down_s3(self.conn)
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
(self.method_name, traceback.format_exc())
# TODO: Find a way to make this go to FAIL instead of Error
self.fail(message)
def tearDown(self):
tear_down_s3(self.conn) | en | 0.782809 | # Copyright (c) 2011-2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Find a way to make this go to FAIL instead of Error asserting common response headers with args :param headers: a dict of response headers :param etag: a string of md5(content).hexdigest() if not given, this won't assert anything about etag. (e.g. DELETE obj) # TODO; requires consideration # self.assertTrue(headers['server'] is not None) # sanity # TODO: Find a way to make this go to FAIL instead of Error | 1.883843 | 2 |
format.py | cobhan/python-problems | 0 | 6631101 | <reponame>cobhan/python-problems
for i in range(1,11):
print('{:2d} {:3d} {:4d} {:5d}'.format(i, i**2, i**3, i**4))
a = 5
print(f"The value of a is {a} and a+1 is {a+1}.")
for i in range(1,11):
print(f'{i} {i**2} {i**3} {i**4}') | for i in range(1,11):
print('{:2d} {:3d} {:4d} {:5d}'.format(i, i**2, i**3, i**4))
a = 5
print(f"The value of a is {a} and a+1 is {a+1}.")
for i in range(1,11):
print(f'{i} {i**2} {i**3} {i**4}') | none | 1 | 4.020288 | 4 |
|
apps/admin/urls.py | x-utest/server-api | 20 | 6631102 | <filename>apps/admin/urls.py
"""
运管和超管后台
主要包含的功能:
1. 注册用户的查看
2. 各种活动日志
"""
from apps.admin.handlers import *
from dtlib import filetool
app_path = filetool.get_parent_folder_name(__file__) # set the relative path in one place
url = [
# 用户信息
# (r"/%s/get-all-user-info/" % app_path, ListUserInfo), # 获取所有的注册用户的基本信息
# (r"/%s/delete-user/" % app_path, DeleteUser), # 删除用户
# (r"/%s/online-user/" % app_path, OnlineUser), # 在线用户
# (r"/%s/get-user-counts/" % app_path, GetUserCounts), # 获取注册用户的统计信息
# 组织信息
# (r"/%s/get-all-org-info/" % app_path,), # 获取所有组织信息
# (r"/%s/audit-org/" % app_path,), # 审核组织
# (r"/%s/freeze-org/" % app_path,),#冻结组织
# (r"/%s/set-user-active-status/" % app_path, SetUserActiveStatus), # 设置用户的激活状态
#
# # 接口调用
# (r"/%s/read-api-call-counts/" % app_path, ListApiCallCount), # api调用次数
#
# # 用户活动日志
# (r"/%s/get-log-history/" % app_path, GetLogHistory),
#
# # 超级管理员全网用于监控的日志
# (r"/%s/all-login-his/" % app_path, LoginHistory), # 用户登录
# (r"/%s/all-api-calls-his/" % app_path, ApiCallsHistory), # 接口调用。非自动化的接口调用
# 自动化的接口调用是高频的,要用单独的统计系统来部署
(r"/%s/list-organization/" % app_path, ListOrganization), # todo 分页列表,这个是所有的组织,属于管理人员的接口
# 分三层权限控制
# TODO 管理员用于监控自己企业的日志数据
# todo 用户监控自己的日志数据
(r"/%s/reset-admin-password/" % app_path, ResetAdminPassword), # 使用 super_token 重置 admin 用户的密码
(r"/%s/get-user-list/" % app_path, GetUserList), # 获取用户列表
(r"/%s/add-user/" % app_path, AddUser), # 添加用户
(r"/%s/delete-user/" % app_path, DeleteUser), # 删除用户
(r"/%s/lock-user/" % app_path, LockUser), # 锁定/解锁用户
]
| <filename>apps/admin/urls.py
"""
运管和超管后台
主要包含的功能:
1. 注册用户的查看
2. 各种活动日志
"""
from apps.admin.handlers import *
from dtlib import filetool
app_path = filetool.get_parent_folder_name(__file__) # set the relative path in one place
url = [
# 用户信息
# (r"/%s/get-all-user-info/" % app_path, ListUserInfo), # 获取所有的注册用户的基本信息
# (r"/%s/delete-user/" % app_path, DeleteUser), # 删除用户
# (r"/%s/online-user/" % app_path, OnlineUser), # 在线用户
# (r"/%s/get-user-counts/" % app_path, GetUserCounts), # 获取注册用户的统计信息
# 组织信息
# (r"/%s/get-all-org-info/" % app_path,), # 获取所有组织信息
# (r"/%s/audit-org/" % app_path,), # 审核组织
# (r"/%s/freeze-org/" % app_path,),#冻结组织
# (r"/%s/set-user-active-status/" % app_path, SetUserActiveStatus), # 设置用户的激活状态
#
# # 接口调用
# (r"/%s/read-api-call-counts/" % app_path, ListApiCallCount), # api调用次数
#
# # 用户活动日志
# (r"/%s/get-log-history/" % app_path, GetLogHistory),
#
# # 超级管理员全网用于监控的日志
# (r"/%s/all-login-his/" % app_path, LoginHistory), # 用户登录
# (r"/%s/all-api-calls-his/" % app_path, ApiCallsHistory), # 接口调用。非自动化的接口调用
# 自动化的接口调用是高频的,要用单独的统计系统来部署
(r"/%s/list-organization/" % app_path, ListOrganization), # todo 分页列表,这个是所有的组织,属于管理人员的接口
# 分三层权限控制
# TODO 管理员用于监控自己企业的日志数据
# todo 用户监控自己的日志数据
(r"/%s/reset-admin-password/" % app_path, ResetAdminPassword), # 使用 super_token 重置 admin 用户的密码
(r"/%s/get-user-list/" % app_path, GetUserList), # 获取用户列表
(r"/%s/add-user/" % app_path, AddUser), # 添加用户
(r"/%s/delete-user/" % app_path, DeleteUser), # 删除用户
(r"/%s/lock-user/" % app_path, LockUser), # 锁定/解锁用户
]
| zh | 0.651349 | 运管和超管后台 主要包含的功能: 1. 注册用户的查看 2. 各种活动日志 # set the relative path in one place # 用户信息 # (r"/%s/get-all-user-info/" % app_path, ListUserInfo), # 获取所有的注册用户的基本信息 # (r"/%s/delete-user/" % app_path, DeleteUser), # 删除用户 # (r"/%s/online-user/" % app_path, OnlineUser), # 在线用户 # (r"/%s/get-user-counts/" % app_path, GetUserCounts), # 获取注册用户的统计信息 # 组织信息 # (r"/%s/get-all-org-info/" % app_path,), # 获取所有组织信息 # (r"/%s/audit-org/" % app_path,), # 审核组织 # (r"/%s/freeze-org/" % app_path,),#冻结组织 # (r"/%s/set-user-active-status/" % app_path, SetUserActiveStatus), # 设置用户的激活状态 # # # 接口调用 # (r"/%s/read-api-call-counts/" % app_path, ListApiCallCount), # api调用次数 # # # 用户活动日志 # (r"/%s/get-log-history/" % app_path, GetLogHistory), # # # 超级管理员全网用于监控的日志 # (r"/%s/all-login-his/" % app_path, LoginHistory), # 用户登录 # (r"/%s/all-api-calls-his/" % app_path, ApiCallsHistory), # 接口调用。非自动化的接口调用 # 自动化的接口调用是高频的,要用单独的统计系统来部署 # todo 分页列表,这个是所有的组织,属于管理人员的接口 # 分三层权限控制 # TODO 管理员用于监控自己企业的日志数据 # todo 用户监控自己的日志数据 # 使用 super_token 重置 admin 用户的密码 # 获取用户列表 # 添加用户 # 删除用户 # 锁定/解锁用户 | 2.213818 | 2 |
code/python_call_screening/python_call_screening.py | niravcodes/signalwire-guides | 10 | 6631103 | from flask import Flask, request
from signalwire.voice_response import VoiceResponse
import os
app = Flask(__name__)
def get_blocklist():
# there is a default here you can change if you don't want to use the environment variable
return os.getenv('BLOCKLIST', '+1555778899').split(',')
@app.route('/check', methods=['POST'])
def check_number():
response = VoiceResponse()
from_number = request.form.get('From')
if from_number not in get_blocklist():
response.redirect(os.environ.get('REDIRECT_PATH', 'https://example.signalwire.com/laml-bins/55ab7685-e9c3-4449-b1f0-07ff083d041e'))
else:
response.hangup()
return response.to_xml()
if __name__ == "__main__":
app.run()
| from flask import Flask, request
from signalwire.voice_response import VoiceResponse
import os
app = Flask(__name__)
def get_blocklist():
# there is a default here you can change if you don't want to use the environment variable
return os.getenv('BLOCKLIST', '+1555778899').split(',')
@app.route('/check', methods=['POST'])
def check_number():
response = VoiceResponse()
from_number = request.form.get('From')
if from_number not in get_blocklist():
response.redirect(os.environ.get('REDIRECT_PATH', 'https://example.signalwire.com/laml-bins/55ab7685-e9c3-4449-b1f0-07ff083d041e'))
else:
response.hangup()
return response.to_xml()
if __name__ == "__main__":
app.run()
| en | 0.930185 | # there is a default here you can change if you don't want to use the environment variable | 2.356086 | 2 |
libqtile/backend/wayland/wlrq.py | duxedo/qtile | 1 | 6631104 | <filename>libqtile/backend/wayland/wlrq.py<gh_stars>1-10
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import functools
import operator
from typing import TYPE_CHECKING
import cairocffi
from pywayland.server import Listener
from wlroots.wlr_types import Texture
from wlroots.wlr_types.keyboard import KeyboardModifier
from wlroots.wlr_types.pointer_constraints_v1 import (
PointerConstraintV1,
PointerConstraintV1StateField,
)
from wlroots.wlr_types.xdg_shell import XdgSurface
from libqtile.backend.base import Internal
from libqtile.log_utils import logger
from libqtile.utils import QtileError
if TYPE_CHECKING:
from typing import Callable, List, Optional, Set
from pywayland.server import Signal
from wlroots.wlr_types import Box, data_device_manager
from libqtile.backend.wayland.core import Core
from libqtile.backend.wayland.output import Output
from libqtile.backend.wayland.window import WindowType
class WlrQError(QtileError):
pass
ModMasks = {
"shift": KeyboardModifier.SHIFT,
"lock": KeyboardModifier.CAPS,
"control": KeyboardModifier.CTRL,
"mod1": KeyboardModifier.ALT,
"mod2": KeyboardModifier.MOD2,
"mod3": KeyboardModifier.MOD3,
"mod4": KeyboardModifier.LOGO,
"mod5": KeyboardModifier.MOD5,
}
# from linux/input-event-codes.h
_KEY_MAX = 0x2FF
# These are mouse buttons 1-9
BTN_LEFT = 0x110
BTN_MIDDLE = 0x112
BTN_RIGHT = 0x111
SCROLL_UP = _KEY_MAX + 1
SCROLL_DOWN = _KEY_MAX + 2
SCROLL_LEFT = _KEY_MAX + 3
SCROLL_RIGHT = _KEY_MAX + 4
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
buttons = [
BTN_LEFT,
BTN_MIDDLE,
BTN_RIGHT,
SCROLL_UP,
SCROLL_DOWN,
SCROLL_LEFT,
SCROLL_RIGHT,
BTN_SIDE,
BTN_EXTRA,
]
# from drm_fourcc.h
DRM_FORMAT_ARGB8888 = 875713089
def translate_masks(modifiers: List[str]) -> int:
"""
Translate a modifier mask specified as a list of strings into an or-ed
bit representation.
"""
masks = []
for i in modifiers:
try:
masks.append(ModMasks[i])
except KeyError as e:
raise WlrQError("Unknown modifier: %s" % i) from e
if masks:
return functools.reduce(operator.or_, masks)
else:
return 0
class Painter:
def __init__(self, core):
self.core = core
def paint(self, screen, image_path, mode=None):
try:
with open(image_path, "rb") as f:
image, _ = cairocffi.pixbuf.decode_to_image_surface(f.read())
except IOError as e:
logger.error("Wallpaper: %s" % e)
return
surface = cairocffi.ImageSurface(cairocffi.FORMAT_ARGB32, screen.width, screen.height)
with cairocffi.Context(surface) as context:
if mode == "fill":
context.rectangle(0, 0, screen.width, screen.height)
context.clip()
image_w = image.get_width()
image_h = image.get_height()
width_ratio = screen.width / image_w
if width_ratio * image_h >= screen.height:
context.scale(width_ratio)
else:
height_ratio = screen.height / image_h
context.translate(-(image_w * height_ratio - screen.width) // 2, 0)
context.scale(height_ratio)
elif mode == "stretch":
context.scale(
sx=screen.width / image.get_width(),
sy=screen.height / image.get_height(),
)
context.set_source_surface(image)
context.paint()
stride = surface.format_stride_for_width(cairocffi.FORMAT_ARGB32, screen.width)
surface.flush()
texture = Texture.from_pixels(
self.core.renderer,
DRM_FORMAT_ARGB8888,
stride,
screen.width,
screen.height,
cairocffi.cairo.cairo_image_surface_get_data(surface._pointer),
)
outputs = [output for output in self.core.outputs if output.wlr_output.enabled]
outputs[screen.index].wallpaper = texture
class HasListeners:
"""
Classes can subclass this to get some convenience handlers around
`pywayland.server.Listener`.
This guarantees that all listeners that set up and then removed in reverse order.
"""
def add_listener(self, event: Signal, callback: Callable):
if not hasattr(self, "_listeners"):
self._listeners = []
listener = Listener(callback)
event.add(listener)
self._listeners.append(listener)
def finalize_listeners(self):
for listener in reversed(self._listeners):
listener.remove()
class PointerConstraint(HasListeners):
"""
A small object to listen to signals on `struct wlr_pointer_constraint_v1` instances.
"""
rect: Box
def __init__(self, core: Core, wlr_constraint: PointerConstraintV1):
self.core = core
self.wlr_constraint = wlr_constraint
self.window: Optional[WindowType] = None
self._warp_target = (0, 0)
self._needs_warp = False
self.add_listener(wlr_constraint.set_region_event, self._on_set_region)
self.add_listener(wlr_constraint.destroy_event, self._on_destroy)
self._get_window()
def _get_window(self):
for win in self.core.qtile.windows_map.values():
if not isinstance(win, Internal) and isinstance(win.surface, XdgSurface):
if win.surface.surface == self.wlr_constraint.surface:
break
else:
self.finalize()
self.window = win
def finalize(self):
if self.core.active_pointer_constraint is self:
self.disable()
self.finalize_listeners()
self.core.pointer_constraints.remove(self)
def _on_set_region(self, _listener, _data):
logger.debug("Signal: wlr_pointer_constraint_v1 set_region")
self._get_region()
def _on_destroy(self, _listener, wlr_constraint: PointerConstraintV1):
logger.debug("Signal: wlr_pointer_constraint_v1 destroy")
self.finalize()
def _on_commit(self, _listener, _data):
if self._needs_warp:
# Warp in case the pointer is not inside the rect
if not self.rect.contains_point(self.cursor.x, self.cursor.y):
self.core.warp_pointer(*self._warp_target)
self._needs_warp = False
def _get_region(self):
rect = self.wlr_constraint.region.rectangles_as_boxes()[0]
rect.x += self.window.x + self.window.borderwidth
rect.y += self.window.y + self.window.borderwidth
self._warp_target = (rect.x + rect.width / 2, rect.y + rect.height / 2)
self.rect = rect
self._needs_warp = True
def enable(self):
logger.debug("Enabling pointer constraints.")
self.core.active_pointer_constraint = self
self._get_region()
self.add_listener(self.wlr_constraint.surface.commit_event, self._on_commit)
self.wlr_constraint.send_activated()
def disable(self):
logger.debug("Disabling pointer constraints.")
if self.wlr_constraint.current.committed & PointerConstraintV1StateField.CURSOR_HINT:
x, y = self.wlr_constraint.current.cursor_hint
self.core.warp_pointer(x + self.window.x, y + self.window.y)
self.core.active_pointer_constraint = None
self.wlr_constraint.send_deactivated()
class Dnd(HasListeners):
"""A helper for drag and drop functionality."""
def __init__(self, core: Core, wlr_drag: data_device_manager.Drag):
self.core = core
self.wlr_drag = wlr_drag
self._outputs: Set[Output] = set()
self.x: float = core.cursor.x
self.y: float = core.cursor.y
self.width: int = 0 # Set upon surface commit
self.height: int = 0
self.add_listener(wlr_drag.destroy_event, self._on_destroy)
self.add_listener(wlr_drag.icon.map_event, self._on_icon_map)
self.add_listener(wlr_drag.icon.unmap_event, self._on_icon_unmap)
self.add_listener(wlr_drag.icon.destroy_event, self._on_icon_destroy)
self.add_listener(wlr_drag.icon.surface.commit_event, self._on_icon_commit)
def finalize(self) -> None:
self.finalize_listeners()
self.core.live_dnd = None
def _on_destroy(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag destroy")
self.finalize()
def _on_icon_map(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon map")
for output in self._outputs:
output.damage()
def _on_icon_unmap(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon unmap")
for output in self._outputs:
output.damage()
def _on_icon_destroy(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon destroy")
def _on_icon_commit(self, _listener, _event) -> None:
self.width = self.wlr_drag.icon.surface.current.width
self.height = self.wlr_drag.icon.surface.current.height
self.position(self.core.cursor.x, self.core.cursor.y)
def position(self, cx: float, cy: float) -> None:
self.x = cx
self.y = cy
self._outputs = {o for o in self.core.outputs if o.contains(self)}
for output in self._outputs:
output.damage()
| <filename>libqtile/backend/wayland/wlrq.py<gh_stars>1-10
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import functools
import operator
from typing import TYPE_CHECKING
import cairocffi
from pywayland.server import Listener
from wlroots.wlr_types import Texture
from wlroots.wlr_types.keyboard import KeyboardModifier
from wlroots.wlr_types.pointer_constraints_v1 import (
PointerConstraintV1,
PointerConstraintV1StateField,
)
from wlroots.wlr_types.xdg_shell import XdgSurface
from libqtile.backend.base import Internal
from libqtile.log_utils import logger
from libqtile.utils import QtileError
if TYPE_CHECKING:
from typing import Callable, List, Optional, Set
from pywayland.server import Signal
from wlroots.wlr_types import Box, data_device_manager
from libqtile.backend.wayland.core import Core
from libqtile.backend.wayland.output import Output
from libqtile.backend.wayland.window import WindowType
class WlrQError(QtileError):
pass
ModMasks = {
"shift": KeyboardModifier.SHIFT,
"lock": KeyboardModifier.CAPS,
"control": KeyboardModifier.CTRL,
"mod1": KeyboardModifier.ALT,
"mod2": KeyboardModifier.MOD2,
"mod3": KeyboardModifier.MOD3,
"mod4": KeyboardModifier.LOGO,
"mod5": KeyboardModifier.MOD5,
}
# from linux/input-event-codes.h
_KEY_MAX = 0x2FF
# These are mouse buttons 1-9
BTN_LEFT = 0x110
BTN_MIDDLE = 0x112
BTN_RIGHT = 0x111
SCROLL_UP = _KEY_MAX + 1
SCROLL_DOWN = _KEY_MAX + 2
SCROLL_LEFT = _KEY_MAX + 3
SCROLL_RIGHT = _KEY_MAX + 4
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
buttons = [
BTN_LEFT,
BTN_MIDDLE,
BTN_RIGHT,
SCROLL_UP,
SCROLL_DOWN,
SCROLL_LEFT,
SCROLL_RIGHT,
BTN_SIDE,
BTN_EXTRA,
]
# from drm_fourcc.h
DRM_FORMAT_ARGB8888 = 875713089
def translate_masks(modifiers: List[str]) -> int:
"""
Translate a modifier mask specified as a list of strings into an or-ed
bit representation.
"""
masks = []
for i in modifiers:
try:
masks.append(ModMasks[i])
except KeyError as e:
raise WlrQError("Unknown modifier: %s" % i) from e
if masks:
return functools.reduce(operator.or_, masks)
else:
return 0
class Painter:
def __init__(self, core):
self.core = core
def paint(self, screen, image_path, mode=None):
try:
with open(image_path, "rb") as f:
image, _ = cairocffi.pixbuf.decode_to_image_surface(f.read())
except IOError as e:
logger.error("Wallpaper: %s" % e)
return
surface = cairocffi.ImageSurface(cairocffi.FORMAT_ARGB32, screen.width, screen.height)
with cairocffi.Context(surface) as context:
if mode == "fill":
context.rectangle(0, 0, screen.width, screen.height)
context.clip()
image_w = image.get_width()
image_h = image.get_height()
width_ratio = screen.width / image_w
if width_ratio * image_h >= screen.height:
context.scale(width_ratio)
else:
height_ratio = screen.height / image_h
context.translate(-(image_w * height_ratio - screen.width) // 2, 0)
context.scale(height_ratio)
elif mode == "stretch":
context.scale(
sx=screen.width / image.get_width(),
sy=screen.height / image.get_height(),
)
context.set_source_surface(image)
context.paint()
stride = surface.format_stride_for_width(cairocffi.FORMAT_ARGB32, screen.width)
surface.flush()
texture = Texture.from_pixels(
self.core.renderer,
DRM_FORMAT_ARGB8888,
stride,
screen.width,
screen.height,
cairocffi.cairo.cairo_image_surface_get_data(surface._pointer),
)
outputs = [output for output in self.core.outputs if output.wlr_output.enabled]
outputs[screen.index].wallpaper = texture
class HasListeners:
"""
Classes can subclass this to get some convenience handlers around
`pywayland.server.Listener`.
This guarantees that all listeners that set up and then removed in reverse order.
"""
def add_listener(self, event: Signal, callback: Callable):
if not hasattr(self, "_listeners"):
self._listeners = []
listener = Listener(callback)
event.add(listener)
self._listeners.append(listener)
def finalize_listeners(self):
for listener in reversed(self._listeners):
listener.remove()
class PointerConstraint(HasListeners):
"""
A small object to listen to signals on `struct wlr_pointer_constraint_v1` instances.
"""
rect: Box
def __init__(self, core: Core, wlr_constraint: PointerConstraintV1):
self.core = core
self.wlr_constraint = wlr_constraint
self.window: Optional[WindowType] = None
self._warp_target = (0, 0)
self._needs_warp = False
self.add_listener(wlr_constraint.set_region_event, self._on_set_region)
self.add_listener(wlr_constraint.destroy_event, self._on_destroy)
self._get_window()
def _get_window(self):
for win in self.core.qtile.windows_map.values():
if not isinstance(win, Internal) and isinstance(win.surface, XdgSurface):
if win.surface.surface == self.wlr_constraint.surface:
break
else:
self.finalize()
self.window = win
def finalize(self):
if self.core.active_pointer_constraint is self:
self.disable()
self.finalize_listeners()
self.core.pointer_constraints.remove(self)
def _on_set_region(self, _listener, _data):
logger.debug("Signal: wlr_pointer_constraint_v1 set_region")
self._get_region()
def _on_destroy(self, _listener, wlr_constraint: PointerConstraintV1):
logger.debug("Signal: wlr_pointer_constraint_v1 destroy")
self.finalize()
def _on_commit(self, _listener, _data):
if self._needs_warp:
# Warp in case the pointer is not inside the rect
if not self.rect.contains_point(self.cursor.x, self.cursor.y):
self.core.warp_pointer(*self._warp_target)
self._needs_warp = False
def _get_region(self):
rect = self.wlr_constraint.region.rectangles_as_boxes()[0]
rect.x += self.window.x + self.window.borderwidth
rect.y += self.window.y + self.window.borderwidth
self._warp_target = (rect.x + rect.width / 2, rect.y + rect.height / 2)
self.rect = rect
self._needs_warp = True
def enable(self):
logger.debug("Enabling pointer constraints.")
self.core.active_pointer_constraint = self
self._get_region()
self.add_listener(self.wlr_constraint.surface.commit_event, self._on_commit)
self.wlr_constraint.send_activated()
def disable(self):
logger.debug("Disabling pointer constraints.")
if self.wlr_constraint.current.committed & PointerConstraintV1StateField.CURSOR_HINT:
x, y = self.wlr_constraint.current.cursor_hint
self.core.warp_pointer(x + self.window.x, y + self.window.y)
self.core.active_pointer_constraint = None
self.wlr_constraint.send_deactivated()
class Dnd(HasListeners):
"""A helper for drag and drop functionality."""
def __init__(self, core: Core, wlr_drag: data_device_manager.Drag):
self.core = core
self.wlr_drag = wlr_drag
self._outputs: Set[Output] = set()
self.x: float = core.cursor.x
self.y: float = core.cursor.y
self.width: int = 0 # Set upon surface commit
self.height: int = 0
self.add_listener(wlr_drag.destroy_event, self._on_destroy)
self.add_listener(wlr_drag.icon.map_event, self._on_icon_map)
self.add_listener(wlr_drag.icon.unmap_event, self._on_icon_unmap)
self.add_listener(wlr_drag.icon.destroy_event, self._on_icon_destroy)
self.add_listener(wlr_drag.icon.surface.commit_event, self._on_icon_commit)
def finalize(self) -> None:
self.finalize_listeners()
self.core.live_dnd = None
def _on_destroy(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag destroy")
self.finalize()
def _on_icon_map(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon map")
for output in self._outputs:
output.damage()
def _on_icon_unmap(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon unmap")
for output in self._outputs:
output.damage()
def _on_icon_destroy(self, _listener, _event) -> None:
logger.debug("Signal: wlr_drag_icon destroy")
def _on_icon_commit(self, _listener, _event) -> None:
self.width = self.wlr_drag.icon.surface.current.width
self.height = self.wlr_drag.icon.surface.current.height
self.position(self.core.cursor.x, self.core.cursor.y)
def position(self, cx: float, cy: float) -> None:
self.x = cx
self.y = cy
self._outputs = {o for o in self.core.outputs if o.contains(self)}
for output in self._outputs:
output.damage()
| en | 0.788455 | # Copyright (c) 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from linux/input-event-codes.h # These are mouse buttons 1-9 # from drm_fourcc.h Translate a modifier mask specified as a list of strings into an or-ed bit representation. Classes can subclass this to get some convenience handlers around `pywayland.server.Listener`. This guarantees that all listeners that set up and then removed in reverse order. A small object to listen to signals on `struct wlr_pointer_constraint_v1` instances. # Warp in case the pointer is not inside the rect A helper for drag and drop functionality. # Set upon surface commit | 1.389336 | 1 |
research_mnist/mnist_visualization_pca.py | Gaon-Choi/CSE4007 | 0 | 6631105 | <filename>research_mnist/mnist_visualization_pca.py
import matplotlib.pyplot as plt
import sklearn.linear_model
import sklearn.discriminant_analysis
import sklearn.svm
import sklearn.neighbors
import sklearn.neural_network
from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
import time
from sklearn.decomposition import PCA
mnist = datasets.load_digits()
print(mnist.data.shape)
print(mnist['images'].shape)
# flatten the images
n_samples = len(mnist.images)
data = mnist.images.reshape((n_samples, -1))
X_train, X_test, y_train, y_test = train_test_split(data, mnist.target, test_size = 0.3, shuffle=False)
pca = PCA(n_components=2)
X2D = pca.fit_transform(mnist.data)
X2D = pca.transform(mnist.data)
print(type(X2D))
print(X2D.shape)
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'yellow', 'orange', 'purple'
for i, c, label in zip(range(len(mnist.target_names)), colors, mnist.target_names):
plt.scatter(X2D[mnist.target == i, 0], X2D[mnist.target == i, 1], c=c, label=label, s=10)
plt.legend()
plt.title("PCA Results : MNIST")
plt.show() | <filename>research_mnist/mnist_visualization_pca.py
import matplotlib.pyplot as plt
import sklearn.linear_model
import sklearn.discriminant_analysis
import sklearn.svm
import sklearn.neighbors
import sklearn.neural_network
from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
import time
from sklearn.decomposition import PCA
mnist = datasets.load_digits()
print(mnist.data.shape)
print(mnist['images'].shape)
# flatten the images
n_samples = len(mnist.images)
data = mnist.images.reshape((n_samples, -1))
X_train, X_test, y_train, y_test = train_test_split(data, mnist.target, test_size = 0.3, shuffle=False)
pca = PCA(n_components=2)
X2D = pca.fit_transform(mnist.data)
X2D = pca.transform(mnist.data)
print(type(X2D))
print(X2D.shape)
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'yellow', 'orange', 'purple'
for i, c, label in zip(range(len(mnist.target_names)), colors, mnist.target_names):
plt.scatter(X2D[mnist.target == i, 0], X2D[mnist.target == i, 1], c=c, label=label, s=10)
plt.legend()
plt.title("PCA Results : MNIST")
plt.show() | en | 0.472941 | # flatten the images | 3.140311 | 3 |
table2json/management/commands/csv.py | amoghmadan/Table2JSON | 4 | 6631106 | <reponame>amoghmadan/Table2JSON
from pandas import read_csv
from table2json.management import Table2JSONBaseCommand
class Command(Table2JSONBaseCommand):
"""CSV Command"""
help = "Read from XSV write to JSON"
def add_command_arguments(self, parser):
"""Add Arguments for CSV Command"""
parser.add_argument("path", type=str, help="Path to the XSV file")
def process_to_df(self, *args, **options):
"""CSV Handling Logic"""
return read_csv(options["path"])
| from pandas import read_csv
from table2json.management import Table2JSONBaseCommand
class Command(Table2JSONBaseCommand):
"""CSV Command"""
help = "Read from XSV write to JSON"
def add_command_arguments(self, parser):
"""Add Arguments for CSV Command"""
parser.add_argument("path", type=str, help="Path to the XSV file")
def process_to_df(self, *args, **options):
"""CSV Handling Logic"""
return read_csv(options["path"]) | en | 0.457989 | CSV Command Add Arguments for CSV Command CSV Handling Logic | 3.238709 | 3 |
initiative/ui/stat_display/stat_display_form.py | nicjohnson145/initiative | 0 | 6631107 | import curses
import npyscreen
from initiative.ui.stat_display.grid_box import GridBox
BUTTON_COLUMNS = 4
BOX_HEIGHT = 6
ENTRY_HEIGHT = BOX_HEIGHT - 2
class StatDisplay(npyscreen.ActionFormMinimal):
def create(self):
self.add_handlers({
'q': lambda *args: self.parentApp.switchFormPrevious(),
})
def populate_form(self):
self._clear_all_widgets()
self.name = self.value.name
self.add(npyscreen.TitleFixedText, name='Armor Class', value=self.value.armor_class)
self.add(npyscreen.TitleFixedText, name='Hit Points', value=self.value.hit_points)
self.add(npyscreen.TitleFixedText, name='Speed', value=self.value.speed)
self.nextrely += 1
stat_grid_values = [[
self.value.strength,
self.value.dexterity,
self.value.constitution,
self.value.intelligence,
self.value.wisdom,
self.value.charisma
]]
self.add(npyscreen.GridColTitles, columns=6, values=stat_grid_values, max_height=3,
col_titles=['STR', 'DEX', 'CON', 'INT', 'WIS', 'CHA'])
self.nextrely += 1
self.conditional_single_line_display('saving_throws', 'Saving Throws')
self.conditional_single_line_display('damage_vulnerabilities', 'Damage Vulnerabilities')
self.conditional_single_line_display('damage_resistances', 'Damage Resistances')
self.conditional_single_line_display('damage_immunities', 'Damage Immunities')
self.conditional_single_line_display('condition_immunities', 'Condition Immunities')
self.conditional_single_line_display('senses', 'Senses')
self.conditional_single_line_display('languages', 'Languages')
if len(self.value.abilities) > 0:
self._create_grid_box('Abilities', 'abilities')
self._create_grid_box('Actions', 'actions')
if len(self.value.legendary_actions) > 0:
self._create_grid_box('Legendary Actions', 'legendary_actions')
def _create_grid_box(self, name, attribute):
box = self.add(GridBox, name=name, max_height=BOX_HEIGHT)
setattr(self, attribute, box.entry_widget)
entry = box.entry_widget
entry.columns = BUTTON_COLUMNS
entry.max_height = ENTRY_HEIGHT
entry.set_grid_values_from_flat_list(getattr(self.value, attribute))
entry.add_handlers({
curses.ascii.NL: lambda *args: self._display_popup(attribute)
})
def _display_popup(self, attr):
widget = getattr(self, attr)
row, column = widget.edit_cell
msg = widget.values[row][column].as_popup()
npyscreen.notify_confirm(msg, wide=True)
def conditional_single_line_display(self, attr, title):
longKwargs = {
'begin_entry_at': 23,
'use_two_lines': False
}
if len(getattr(self.value, attr)) > 0:
self.add(
npyscreen.TitleFixedText,
name=title,
value=getattr(self.value, attr),
**longKwargs
)
def beforeEditing(self):
self.populate_form()
def on_ok(self):
self.parentApp.switchFormPrevious()
| import curses
import npyscreen
from initiative.ui.stat_display.grid_box import GridBox
BUTTON_COLUMNS = 4
BOX_HEIGHT = 6
ENTRY_HEIGHT = BOX_HEIGHT - 2
class StatDisplay(npyscreen.ActionFormMinimal):
def create(self):
self.add_handlers({
'q': lambda *args: self.parentApp.switchFormPrevious(),
})
def populate_form(self):
self._clear_all_widgets()
self.name = self.value.name
self.add(npyscreen.TitleFixedText, name='Armor Class', value=self.value.armor_class)
self.add(npyscreen.TitleFixedText, name='Hit Points', value=self.value.hit_points)
self.add(npyscreen.TitleFixedText, name='Speed', value=self.value.speed)
self.nextrely += 1
stat_grid_values = [[
self.value.strength,
self.value.dexterity,
self.value.constitution,
self.value.intelligence,
self.value.wisdom,
self.value.charisma
]]
self.add(npyscreen.GridColTitles, columns=6, values=stat_grid_values, max_height=3,
col_titles=['STR', 'DEX', 'CON', 'INT', 'WIS', 'CHA'])
self.nextrely += 1
self.conditional_single_line_display('saving_throws', 'Saving Throws')
self.conditional_single_line_display('damage_vulnerabilities', 'Damage Vulnerabilities')
self.conditional_single_line_display('damage_resistances', 'Damage Resistances')
self.conditional_single_line_display('damage_immunities', 'Damage Immunities')
self.conditional_single_line_display('condition_immunities', 'Condition Immunities')
self.conditional_single_line_display('senses', 'Senses')
self.conditional_single_line_display('languages', 'Languages')
if len(self.value.abilities) > 0:
self._create_grid_box('Abilities', 'abilities')
self._create_grid_box('Actions', 'actions')
if len(self.value.legendary_actions) > 0:
self._create_grid_box('Legendary Actions', 'legendary_actions')
def _create_grid_box(self, name, attribute):
box = self.add(GridBox, name=name, max_height=BOX_HEIGHT)
setattr(self, attribute, box.entry_widget)
entry = box.entry_widget
entry.columns = BUTTON_COLUMNS
entry.max_height = ENTRY_HEIGHT
entry.set_grid_values_from_flat_list(getattr(self.value, attribute))
entry.add_handlers({
curses.ascii.NL: lambda *args: self._display_popup(attribute)
})
def _display_popup(self, attr):
widget = getattr(self, attr)
row, column = widget.edit_cell
msg = widget.values[row][column].as_popup()
npyscreen.notify_confirm(msg, wide=True)
def conditional_single_line_display(self, attr, title):
longKwargs = {
'begin_entry_at': 23,
'use_two_lines': False
}
if len(getattr(self.value, attr)) > 0:
self.add(
npyscreen.TitleFixedText,
name=title,
value=getattr(self.value, attr),
**longKwargs
)
def beforeEditing(self):
self.populate_form()
def on_ok(self):
self.parentApp.switchFormPrevious()
| none | 1 | 2.453535 | 2 |
|
Day 21 Classes and Objects/classes.py | Sumanth-Talluri/30-Days-Of-Python | 3 | 6631108 | <reponame>Sumanth-Talluri/30-Days-Of-Python
class Statistics:
def __init__(self, lst):
self.lst = lst
def count(self):
return len(self.lst)
def sum(self):
total = 0
for item in self.lst:
total += item
return total
def min(self):
minn = self.lst[0]
for item in self.lst:
if item < minn:
minn = item
return minn
def max(self):
maxx = None
for item in self.lst:
if item > maxx:
maxx = item
return maxx
def range(self):
minn = self.min()
maxx = self.max()
return maxx-minn
def mean(self):
val = float(self.sum()) / self.count()
return val
def median(self):
new_lst = self.lst
new_lst.sort()
length = self.count()
if length % 2 == 0:
output = [new_lst[(length/2)-1], new_lst[length/2]]
else:
output = [new_lst[length//2]]
return output
def mode(self):
count_dic = {}
for item in self.lst:
if item not in count_dic:
count_dic[item] = 1
else:
count_dic[item] += 1
output = []
for k, v in count_dic.items():
tup = (k, v)
output.append(tup)
output.sort(key=lambda x: x[1], reverse=True)
res = {}
res['mode'] = output[0][0]
res['count'] = output[0][1]
return res
def var(self):
avg = self.mean()
n = self.count()
sq_diff_lst = []
for item in self.lst:
diff = item - avg
sq_diff_lst.append(diff**2)
total = 0
for item in sq_diff_lst:
total += item
var = float(total)/n
return var
def std(self):
avg = self.mean()
n = self.count()
sq_diff_lst = []
for item in self.lst:
diff = item - avg
sq_diff_lst.append(diff**2)
total = 0
for item in sq_diff_lst:
total += item
var = total/n
sd = var**0.5
return sd
def freq_dist(self):
count_dic = {}
for item in self.lst:
if item not in count_dic:
count_dic[item] = 1
else:
count_dic[item] += 1
output = []
for k, v in count_dic.items():
tup = (k, v)
output.append(tup)
output.sort(key=lambda x: x[1], reverse=True)
return output
def describe(self):
print('Count:', self.count())
print('Sum: ', self.sum())
print('Min: ', self.min())
print('Max: ', self.max())
print('Range: ', self.range())
print('Mean: ', self.mean())
print('Median: ', self.median())
print('Mode: ', self.mode())
print('Variance: ', self.var())
print('Standard Deviation: ', self.std())
print('Frequency Distribution: ', self.freq_dist())
ages = [31, 26, 34, 37, 27, 26, 32, 32, 26, 27, 27, 24,
32, 33, 27, 25, 26, 38, 37, 31, 34, 24, 33, 29, 26]
data = Statistics(ages)
data.describe()
class PersonalAccount:
def __init__(self, fname, lname, incomes, expenses, properties):
self.fname = fname
self.lname = lname
self.incomes = incomes
self.expenses = expenses
self.properties = properties
def total_income(self):
total = 0
for income in self.incomes:
total += income[0]
return total
def total_expense(self):
total = 0
for expense in self.expenses:
total += expense[0]
return total
def account_info(self):
print("Firstname:", self.fname)
print("Lastname:", self.lname)
print("Income:", self.incomes)
print("Expenses:", self.expenses)
print("Properties:", self.properties)
def add_income(self, income, desc):
self.incomes.append((income, desc))
return self.incomes
def add_expense(self, expense, desc):
self.expenses.append((expense, desc))
return self.expenses
def account_balance(self):
bal = self.total_income() - self.total_expense()
return bal
ram = PersonalAccount("Ram", "Binod", [(10000, 'salary'), (5000, 'pension')], [
(1000, 'Rent'), (1000, 'Food'), (800, 'Clothes')], "No Properties")
ram.account_info()
print("Account Balance:", ram.account_balance())
| class Statistics:
def __init__(self, lst):
self.lst = lst
def count(self):
return len(self.lst)
def sum(self):
total = 0
for item in self.lst:
total += item
return total
def min(self):
minn = self.lst[0]
for item in self.lst:
if item < minn:
minn = item
return minn
def max(self):
maxx = None
for item in self.lst:
if item > maxx:
maxx = item
return maxx
def range(self):
minn = self.min()
maxx = self.max()
return maxx-minn
def mean(self):
val = float(self.sum()) / self.count()
return val
def median(self):
new_lst = self.lst
new_lst.sort()
length = self.count()
if length % 2 == 0:
output = [new_lst[(length/2)-1], new_lst[length/2]]
else:
output = [new_lst[length//2]]
return output
def mode(self):
count_dic = {}
for item in self.lst:
if item not in count_dic:
count_dic[item] = 1
else:
count_dic[item] += 1
output = []
for k, v in count_dic.items():
tup = (k, v)
output.append(tup)
output.sort(key=lambda x: x[1], reverse=True)
res = {}
res['mode'] = output[0][0]
res['count'] = output[0][1]
return res
def var(self):
avg = self.mean()
n = self.count()
sq_diff_lst = []
for item in self.lst:
diff = item - avg
sq_diff_lst.append(diff**2)
total = 0
for item in sq_diff_lst:
total += item
var = float(total)/n
return var
def std(self):
avg = self.mean()
n = self.count()
sq_diff_lst = []
for item in self.lst:
diff = item - avg
sq_diff_lst.append(diff**2)
total = 0
for item in sq_diff_lst:
total += item
var = total/n
sd = var**0.5
return sd
def freq_dist(self):
count_dic = {}
for item in self.lst:
if item not in count_dic:
count_dic[item] = 1
else:
count_dic[item] += 1
output = []
for k, v in count_dic.items():
tup = (k, v)
output.append(tup)
output.sort(key=lambda x: x[1], reverse=True)
return output
def describe(self):
print('Count:', self.count())
print('Sum: ', self.sum())
print('Min: ', self.min())
print('Max: ', self.max())
print('Range: ', self.range())
print('Mean: ', self.mean())
print('Median: ', self.median())
print('Mode: ', self.mode())
print('Variance: ', self.var())
print('Standard Deviation: ', self.std())
print('Frequency Distribution: ', self.freq_dist())
ages = [31, 26, 34, 37, 27, 26, 32, 32, 26, 27, 27, 24,
32, 33, 27, 25, 26, 38, 37, 31, 34, 24, 33, 29, 26]
data = Statistics(ages)
data.describe()
class PersonalAccount:
def __init__(self, fname, lname, incomes, expenses, properties):
self.fname = fname
self.lname = lname
self.incomes = incomes
self.expenses = expenses
self.properties = properties
def total_income(self):
total = 0
for income in self.incomes:
total += income[0]
return total
def total_expense(self):
total = 0
for expense in self.expenses:
total += expense[0]
return total
def account_info(self):
print("Firstname:", self.fname)
print("Lastname:", self.lname)
print("Income:", self.incomes)
print("Expenses:", self.expenses)
print("Properties:", self.properties)
def add_income(self, income, desc):
self.incomes.append((income, desc))
return self.incomes
def add_expense(self, expense, desc):
self.expenses.append((expense, desc))
return self.expenses
def account_balance(self):
bal = self.total_income() - self.total_expense()
return bal
ram = PersonalAccount("Ram", "Binod", [(10000, 'salary'), (5000, 'pension')], [
(1000, 'Rent'), (1000, 'Food'), (800, 'Clothes')], "No Properties")
ram.account_info()
print("Account Balance:", ram.account_balance()) | none | 1 | 3.325228 | 3 |
|
bin/Packager/DesktopEntry.py | C-EO/craft | 55 | 6631109 | # -*- coding: utf-8 -*-
# Copyright <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import io
from pathlib import Path
from Packager.PackagerBase import *
from shells import Powershell
class DesktopEntry(PackagerBase):
def createPackage(self):
defines = self.setDefaults(self.defines)
craftName = CraftCore.standardDirs.craftRoot().name
if CraftCore.compiler.isMacOS:
root = CraftCore.standardDirs.craftRoot()
targetBundle = Path(self.getMacAppPath(defines, root / "Applications"))
targetPlist = targetBundle / "Contents/Info.plist"
with io.StringIO() as binaryLog:
utils.system(["defaults", "read", targetPlist, "CFBundleExecutable"], stdout=binaryLog)
targetBinary = binaryLog.getvalue().strip()
with io.StringIO() as iconLog:
utils.system(["defaults", "read", targetPlist, "CFBundleIconFile"], stdout=iconLog)
targetIcon = iconLog.getvalue().strip()
if not targetBinary:
return False
# install shim to allow invocation of the bunde from the craft env
if not utils.createShim(root / "bin" / defines["display_name"],
targetBundle / "Contents/MacOS" / targetBinary , useAbsolutePath=True):
return False
targetShimBundle = Path("/Applications/Craft", f"{craftName} {defines['display_name']}.app")
shim = targetShimBundle / "Contents/MacOS" / targetBinary
if not utils.createDir(targetShimBundle/ "Contents/MacOS"):
return False
if not utils.createDir(targetShimBundle / "Contents/Resources"):
return False
if not utils.createShim(shim, sys.executable, [CraftCore.standardDirs.craftBin()/ "craft.py", "--run-detached", targetBundle / "Contents/MacOS" / targetBinary ], useAbsolutePath=True):
return False
if not utils.copyFile(targetPlist, targetShimBundle /"Contents/Info.plist", linkOnly=False):
return False
if targetIcon and not utils.copyFile(targetBundle / "Contents/Resources" / targetIcon, targetShimBundle / "Contents/Resources" / targetIcon, linkOnly=False):
return False
elif CraftCore.compiler.isWindows:
shortcuts = defines["shortcuts"] or []
if "executable" in defines:
shortcuts.append({"name": defines["productname"], "target": defines["executable"]})
del defines["executable"]
for shortcut in shortcuts:
shim = CraftCore.standardDirs.craftRoot() / "wrapper" / shortcut["name"]
target = CraftCore.standardDirs.craftRoot() / shortcut["target"]
if not utils.createShim(shim, sys.executable, [CraftCore.standardDirs.craftBin() / "craft.py", "--run-detached", target], guiApp=True):
return False
if not utils.installShortcut(f"{craftName}/{shortcut['name']} {craftName}", shim, target.parent,
os.path.join(CraftCore.standardDirs.craftRoot(), shortcut["target"]),
shortcut.get("desciption", f"{shortcut['name']} from {CraftCore.standardDirs.craftRoot()}")):
return False
return True
| # -*- coding: utf-8 -*-
# Copyright <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import io
from pathlib import Path
from Packager.PackagerBase import *
from shells import Powershell
class DesktopEntry(PackagerBase):
def createPackage(self):
defines = self.setDefaults(self.defines)
craftName = CraftCore.standardDirs.craftRoot().name
if CraftCore.compiler.isMacOS:
root = CraftCore.standardDirs.craftRoot()
targetBundle = Path(self.getMacAppPath(defines, root / "Applications"))
targetPlist = targetBundle / "Contents/Info.plist"
with io.StringIO() as binaryLog:
utils.system(["defaults", "read", targetPlist, "CFBundleExecutable"], stdout=binaryLog)
targetBinary = binaryLog.getvalue().strip()
with io.StringIO() as iconLog:
utils.system(["defaults", "read", targetPlist, "CFBundleIconFile"], stdout=iconLog)
targetIcon = iconLog.getvalue().strip()
if not targetBinary:
return False
# install shim to allow invocation of the bunde from the craft env
if not utils.createShim(root / "bin" / defines["display_name"],
targetBundle / "Contents/MacOS" / targetBinary , useAbsolutePath=True):
return False
targetShimBundle = Path("/Applications/Craft", f"{craftName} {defines['display_name']}.app")
shim = targetShimBundle / "Contents/MacOS" / targetBinary
if not utils.createDir(targetShimBundle/ "Contents/MacOS"):
return False
if not utils.createDir(targetShimBundle / "Contents/Resources"):
return False
if not utils.createShim(shim, sys.executable, [CraftCore.standardDirs.craftBin()/ "craft.py", "--run-detached", targetBundle / "Contents/MacOS" / targetBinary ], useAbsolutePath=True):
return False
if not utils.copyFile(targetPlist, targetShimBundle /"Contents/Info.plist", linkOnly=False):
return False
if targetIcon and not utils.copyFile(targetBundle / "Contents/Resources" / targetIcon, targetShimBundle / "Contents/Resources" / targetIcon, linkOnly=False):
return False
elif CraftCore.compiler.isWindows:
shortcuts = defines["shortcuts"] or []
if "executable" in defines:
shortcuts.append({"name": defines["productname"], "target": defines["executable"]})
del defines["executable"]
for shortcut in shortcuts:
shim = CraftCore.standardDirs.craftRoot() / "wrapper" / shortcut["name"]
target = CraftCore.standardDirs.craftRoot() / shortcut["target"]
if not utils.createShim(shim, sys.executable, [CraftCore.standardDirs.craftBin() / "craft.py", "--run-detached", target], guiApp=True):
return False
if not utils.installShortcut(f"{craftName}/{shortcut['name']} {craftName}", shim, target.parent,
os.path.join(CraftCore.standardDirs.craftRoot(), shortcut["target"]),
shortcut.get("desciption", f"{shortcut['name']} from {CraftCore.standardDirs.craftRoot()}")):
return False
return True
| en | 0.674979 | # -*- coding: utf-8 -*- # Copyright <NAME> <<EMAIL>> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # install shim to allow invocation of the bunde from the craft env | 1.309969 | 1 |
core/domain/wipeout_domain_test.py | jlau323/oppia | 2 | 6631110 | <gh_stars>1-10
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_services
from core.domain import wipeout_domain
from core.tests import test_utils
import utils
class PendingDeletionRequestUnitTests(test_utils.GenericTestBase):
"""Tests for topic domain objects."""
def setUp(self):
super(PendingDeletionRequestUnitTests, self).setUp()
self.signup('<EMAIL>', 'A')
self.signup('<EMAIL>', 'B')
self.user_id_a = self.get_user_id_from_email('<EMAIL>')
self.role = user_services.get_user_settings(self.user_id_a).role
def test_create_default_pending_deletion_request(self):
"""Tests the create_default_topic() function."""
default_pending_deletion = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
self.assertEqual(default_pending_deletion.user_id, self.user_id_a)
self.assertEqual(default_pending_deletion.email, '<EMAIL>')
self.assertEqual(default_pending_deletion.role, self.role)
self.assertEqual(default_pending_deletion.deletion_complete, False)
self.assertEqual(
default_pending_deletion.pseudonymizable_entity_mappings, {})
def test_validate_fails_for_wrong_key_in_activity_mappings(self):
"""Tests the create_default_topic() function."""
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
pending_deletion_request.pseudonymizable_entity_mappings = {
'wrong_key': {}
}
with self.assertRaisesRegexp(
utils.ValidationError,
'pseudonymizable_entity_mappings contain wrong key'
):
pending_deletion_request.validate()
| # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_services
from core.domain import wipeout_domain
from core.tests import test_utils
import utils
class PendingDeletionRequestUnitTests(test_utils.GenericTestBase):
"""Tests for topic domain objects."""
def setUp(self):
super(PendingDeletionRequestUnitTests, self).setUp()
self.signup('<EMAIL>', 'A')
self.signup('<EMAIL>', 'B')
self.user_id_a = self.get_user_id_from_email('<EMAIL>')
self.role = user_services.get_user_settings(self.user_id_a).role
def test_create_default_pending_deletion_request(self):
"""Tests the create_default_topic() function."""
default_pending_deletion = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
self.assertEqual(default_pending_deletion.user_id, self.user_id_a)
self.assertEqual(default_pending_deletion.email, '<EMAIL>')
self.assertEqual(default_pending_deletion.role, self.role)
self.assertEqual(default_pending_deletion.deletion_complete, False)
self.assertEqual(
default_pending_deletion.pseudonymizable_entity_mappings, {})
def test_validate_fails_for_wrong_key_in_activity_mappings(self):
"""Tests the create_default_topic() function."""
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_id_a, '<EMAIL>', self.role))
pending_deletion_request.pseudonymizable_entity_mappings = {
'wrong_key': {}
}
with self.assertRaisesRegexp(
utils.ValidationError,
'pseudonymizable_entity_mappings contain wrong key'
):
pending_deletion_request.validate() | en | 0.773291 | # coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for topic domain objects. # pylint: disable=import-only-modules # pylint: disable=import-only-modules Tests for topic domain objects. Tests the create_default_topic() function. Tests the create_default_topic() function. | 1.866051 | 2 |
base/src/shallowflow/base/help/_Markdown.py | waikato-datamining/shallow-flow | 0 | 6631111 | <filename>base/src/shallowflow/base/help/_Markdown.py
from shallowflow.api.config import Option
from shallowflow.api.help import AbstractHelpGenerator
class Markdown(AbstractHelpGenerator):
"""
Generates help in plain text format.
"""
def file_extension(self):
"""
Returns the preferred file extension.
:return: the file extension (incl dot)
:rtype: str
"""
return ".md"
def _indent(self, s, num):
"""
Indents the lines in the string.
:param s: the string to indent
:type s: str
:param num: the number of spaces to use for indentation
:type num: int
:return: the indented string
:rtype: str
"""
if num == 0:
return s
indent = " " * num
parts = s.split("\n")
result = ""
for part in parts:
if len(result) > 0:
result += "\n"
result += indent + part
return result
def _do_generate(self, handler):
"""
Performs the actual generation.
:param handler: the option handler to generate the help for
:type handler: AbstractOptionHandler
:return: the generate string
:rtype: str
"""
result = "# " + type(handler).__name__ + "\n"
result += "\n"
result += "## Description\n"
result += handler.description() + "\n"
result += "\n"
result += "## Options\n"
for item in handler.option_manager.options():
result += "* " + item.name + " (" + str(item.value_type.__name__) + ")\n"
result += "\n"
result += " * " + item.help + "\n"
result += " * default: " + repr(item.def_value) + "\n"
result += "\n"
return result
| <filename>base/src/shallowflow/base/help/_Markdown.py
from shallowflow.api.config import Option
from shallowflow.api.help import AbstractHelpGenerator
class Markdown(AbstractHelpGenerator):
"""
Generates help in plain text format.
"""
def file_extension(self):
"""
Returns the preferred file extension.
:return: the file extension (incl dot)
:rtype: str
"""
return ".md"
def _indent(self, s, num):
"""
Indents the lines in the string.
:param s: the string to indent
:type s: str
:param num: the number of spaces to use for indentation
:type num: int
:return: the indented string
:rtype: str
"""
if num == 0:
return s
indent = " " * num
parts = s.split("\n")
result = ""
for part in parts:
if len(result) > 0:
result += "\n"
result += indent + part
return result
def _do_generate(self, handler):
"""
Performs the actual generation.
:param handler: the option handler to generate the help for
:type handler: AbstractOptionHandler
:return: the generate string
:rtype: str
"""
result = "# " + type(handler).__name__ + "\n"
result += "\n"
result += "## Description\n"
result += handler.description() + "\n"
result += "\n"
result += "## Options\n"
for item in handler.option_manager.options():
result += "* " + item.name + " (" + str(item.value_type.__name__) + ")\n"
result += "\n"
result += " * " + item.help + "\n"
result += " * default: " + repr(item.def_value) + "\n"
result += "\n"
return result
| en | 0.485185 | Generates help in plain text format. Returns the preferred file extension. :return: the file extension (incl dot) :rtype: str Indents the lines in the string. :param s: the string to indent :type s: str :param num: the number of spaces to use for indentation :type num: int :return: the indented string :rtype: str Performs the actual generation. :param handler: the option handler to generate the help for :type handler: AbstractOptionHandler :return: the generate string :rtype: str # Description\n" # Options\n" | 2.818697 | 3 |
backend/api/db/models/__init__.py | kkevinn114/Yacht | 0 | 6631112 | <reponame>kkevinn114/Yacht<filename>backend/api/db/models/__init__.py<gh_stars>0
from .containers import *
| from .containers import * | none | 1 | 1.194881 | 1 |
|
fd.py | buhuhaha/python | 0 | 6631113 | import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv2.imread('IMG_0711.JPG')
faces = face_cascade.detectMultiScale(img, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imwrite("face_detected.png", img)
print('Successfully saved') | import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img = cv2.imread('IMG_0711.JPG')
faces = face_cascade.detectMultiScale(img, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imwrite("face_detected.png", img)
print('Successfully saved') | none | 1 | 2.884437 | 3 |
|
openstackclient/volume/v3/volume_group.py | mydevice/python-openstackclient | 262 | 6631114 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cinderclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
def _format_group(group):
columns = (
'id',
'status',
'name',
'description',
'group_type',
'volume_types',
'availability_zone',
'created_at',
'volumes',
'group_snapshot_id',
'source_group_id',
)
column_headers = (
'ID',
'Status',
'Name',
'Description',
'Group Type',
'Volume Types',
'Availability Zone',
'Created At',
'Volumes',
'Group Snapshot ID',
'Source Group ID',
)
# TODO(stephenfin): Consider using a formatter for volume_types since it's
# a list
return (
column_headers,
utils.get_item_properties(
group,
columns,
),
)
class CreateVolumeGroup(command.ShowOne):
"""Create a volume group.
Generic volume groups enable you to create a group of volumes and manage
them together.
Generic volume groups are more flexible than consistency groups. Currently
volume consistency groups only support consistent group snapshot. It
cannot be extended easily to serve other purposes. A project may want to
put volumes used in the same application together in a group so that it is
easier to manage them together, and this group of volumes may or may not
support consistent group snapshot. Generic volume group solve this problem.
By decoupling the tight relationship between the group construct and the
consistency concept, generic volume groups can be extended to support other
features in the future.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'volume_group_type',
metavar='<volume_group_type>',
help=_('Name or ID of volume group type to use.'),
)
parser.add_argument(
'volume_types',
metavar='<volume_type>',
nargs='+',
default=[],
help=_('Name or ID of volume type(s) to use.'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Name of the volume group.'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description of a volume group.')
)
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
help=_('Availability zone for volume group.'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group create' command"
)
raise exceptions.CommandError(msg)
volume_group_type = utils.find_resource(
volume_client.group_types,
parsed_args.volume_group_type,
)
volume_types = []
for volume_type in parsed_args.volume_types:
volume_types.append(
utils.find_resource(
volume_client.volume_types,
volume_type,
)
)
group = volume_client.groups.create(
volume_group_type.id,
','.join(x.id for x in volume_types),
parsed_args.name,
parsed_args.description,
availability_zone=parsed_args.availability_zone)
group = volume_client.groups.get(group.id)
return _format_group(group)
class DeleteVolumeGroup(command.Command):
"""Delete a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group to delete'),
)
parser.add_argument(
'--force',
action='store_true',
default=False,
help=_(
'Delete the volume group even if it contains volumes. '
'This will delete any remaining volumes in the group.',
)
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group delete' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
volume_client.groups.delete(
group.id, delete_volumes=parsed_args.force)
class SetVolumeGroup(command.ShowOne):
"""Update a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group.'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New name for group.'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New description for group.'),
)
parser.add_argument(
'--enable-replication',
action='store_true',
dest='enable_replication',
default=None,
help=_(
'Enable replication for group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
parser.add_argument(
'--disable-replication',
action='store_false',
dest='enable_replication',
help=_(
'Disable replication for group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group set' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
if parsed_args.enable_replication is not None:
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the '--enable-replication' or "
"'--disable-replication' options"
)
raise exceptions.CommandError(msg)
if parsed_args.enable_replication:
volume_client.groups.enable_replication(group.id)
else:
volume_client.groups.disable_replication(group.id)
kwargs = {}
if parsed_args.name is not None:
kwargs['name'] = parsed_args.name
if parsed_args.description is not None:
kwargs['description'] = parsed_args.description
if kwargs:
group = volume_client.groups.update(group.id, **kwargs)
return _format_group(group)
class ListVolumeGroup(command.Lister):
"""Lists all volume groups.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'--all-projects',
dest='all_projects',
action='store_true',
default=utils.env('ALL_PROJECTS', default=False),
help=_('Shows details for all projects (admin only).'),
)
# TODO(stephenfin): Add once we have an equivalent command for
# 'cinder list-filters'
# parser.add_argument(
# '--filter',
# metavar='<key=value>',
# action=parseractions.KeyValueAction,
# dest='filters',
# help=_(
# "Filter key and value pairs. Use 'foo' to "
# "check enabled filters from server. Use 'key~=value' for "
# "inexact filtering if the key supports "
# "(supported by --os-volume-api-version 3.33 or above)"
# ),
# )
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group list' command"
)
raise exceptions.CommandError(msg)
search_opts = {
'all_tenants': parsed_args.all_projects,
}
groups = volume_client.groups.list(
search_opts=search_opts)
column_headers = (
'ID',
'Status',
'Name',
)
columns = (
'id',
'status',
'name',
)
return (
column_headers,
(
utils.get_item_properties(a, columns)
for a in groups
),
)
class ShowVolumeGroup(command.ShowOne):
"""Show detailed information for a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group.'),
)
parser.add_argument(
'--volumes',
action='store_true',
dest='show_volumes',
default=None,
help=_(
'Show volumes included in the group. '
'(supported by --os-volume-api-version 3.25 or above)'
),
)
parser.add_argument(
'--no-volumes',
action='store_false',
dest='show_volumes',
help=_(
'Do not show volumes included in the group. '
'(supported by --os-volume-api-version 3.25 or above)'
),
)
parser.add_argument(
'--replication-targets',
action='store_true',
dest='show_replication_targets',
default=None,
help=_(
'Show replication targets for the group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
parser.add_argument(
'--no-replication-targets',
action='store_false',
dest='show_replication_targets',
help=_(
'Do not show replication targets for the group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group show' command"
)
raise exceptions.CommandError(msg)
kwargs = {}
if parsed_args.show_volumes is not None:
if volume_client.api_version < api_versions.APIVersion('3.25'):
msg = _(
"--os-volume-api-version 3.25 or greater is required to "
"support the '--(no-)volumes' option"
)
raise exceptions.CommandError(msg)
kwargs['list_volume'] = parsed_args.show_volumes
if parsed_args.show_replication_targets is not None:
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the '--(no-)replication-targets' option"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
group = volume_client.groups.show(group.id, **kwargs)
if parsed_args.show_replication_targets:
replication_targets = \
volume_client.groups.list_replication_targets(group.id)
group.replication_targets = replication_targets
# TODO(stephenfin): Show replication targets
return _format_group(group)
class FailoverVolumeGroup(command.Command):
"""Failover replication for a volume group.
This command requires ``--os-volume-api-version`` 3.38 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group to failover replication for.'),
)
parser.add_argument(
'--allow-attached-volume',
action='store_true',
dest='allow_attached_volume',
default=False,
help=_(
'Allow group with attached volumes to be failed over.',
)
)
parser.add_argument(
'--disallow-attached-volume',
action='store_false',
dest='allow_attached_volume',
default=False,
help=_(
'Disallow group with attached volumes to be failed over.',
)
)
parser.add_argument(
'--secondary-backend-id',
metavar='<backend_id>',
help=_('Secondary backend ID.'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the 'volume group failover' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
volume_client.groups.failover_replication(
group.id,
allow_attached_volume=parsed_args.allow_attached_volume,
secondary_backend_id=parsed_args.secondary_backend_id,
)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cinderclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
def _format_group(group):
columns = (
'id',
'status',
'name',
'description',
'group_type',
'volume_types',
'availability_zone',
'created_at',
'volumes',
'group_snapshot_id',
'source_group_id',
)
column_headers = (
'ID',
'Status',
'Name',
'Description',
'Group Type',
'Volume Types',
'Availability Zone',
'Created At',
'Volumes',
'Group Snapshot ID',
'Source Group ID',
)
# TODO(stephenfin): Consider using a formatter for volume_types since it's
# a list
return (
column_headers,
utils.get_item_properties(
group,
columns,
),
)
class CreateVolumeGroup(command.ShowOne):
"""Create a volume group.
Generic volume groups enable you to create a group of volumes and manage
them together.
Generic volume groups are more flexible than consistency groups. Currently
volume consistency groups only support consistent group snapshot. It
cannot be extended easily to serve other purposes. A project may want to
put volumes used in the same application together in a group so that it is
easier to manage them together, and this group of volumes may or may not
support consistent group snapshot. Generic volume group solve this problem.
By decoupling the tight relationship between the group construct and the
consistency concept, generic volume groups can be extended to support other
features in the future.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'volume_group_type',
metavar='<volume_group_type>',
help=_('Name or ID of volume group type to use.'),
)
parser.add_argument(
'volume_types',
metavar='<volume_type>',
nargs='+',
default=[],
help=_('Name or ID of volume type(s) to use.'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Name of the volume group.'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description of a volume group.')
)
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
help=_('Availability zone for volume group.'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group create' command"
)
raise exceptions.CommandError(msg)
volume_group_type = utils.find_resource(
volume_client.group_types,
parsed_args.volume_group_type,
)
volume_types = []
for volume_type in parsed_args.volume_types:
volume_types.append(
utils.find_resource(
volume_client.volume_types,
volume_type,
)
)
group = volume_client.groups.create(
volume_group_type.id,
','.join(x.id for x in volume_types),
parsed_args.name,
parsed_args.description,
availability_zone=parsed_args.availability_zone)
group = volume_client.groups.get(group.id)
return _format_group(group)
class DeleteVolumeGroup(command.Command):
"""Delete a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group to delete'),
)
parser.add_argument(
'--force',
action='store_true',
default=False,
help=_(
'Delete the volume group even if it contains volumes. '
'This will delete any remaining volumes in the group.',
)
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group delete' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
volume_client.groups.delete(
group.id, delete_volumes=parsed_args.force)
class SetVolumeGroup(command.ShowOne):
"""Update a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group.'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New name for group.'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New description for group.'),
)
parser.add_argument(
'--enable-replication',
action='store_true',
dest='enable_replication',
default=None,
help=_(
'Enable replication for group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
parser.add_argument(
'--disable-replication',
action='store_false',
dest='enable_replication',
help=_(
'Disable replication for group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group set' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
if parsed_args.enable_replication is not None:
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the '--enable-replication' or "
"'--disable-replication' options"
)
raise exceptions.CommandError(msg)
if parsed_args.enable_replication:
volume_client.groups.enable_replication(group.id)
else:
volume_client.groups.disable_replication(group.id)
kwargs = {}
if parsed_args.name is not None:
kwargs['name'] = parsed_args.name
if parsed_args.description is not None:
kwargs['description'] = parsed_args.description
if kwargs:
group = volume_client.groups.update(group.id, **kwargs)
return _format_group(group)
class ListVolumeGroup(command.Lister):
"""Lists all volume groups.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'--all-projects',
dest='all_projects',
action='store_true',
default=utils.env('ALL_PROJECTS', default=False),
help=_('Shows details for all projects (admin only).'),
)
# TODO(stephenfin): Add once we have an equivalent command for
# 'cinder list-filters'
# parser.add_argument(
# '--filter',
# metavar='<key=value>',
# action=parseractions.KeyValueAction,
# dest='filters',
# help=_(
# "Filter key and value pairs. Use 'foo' to "
# "check enabled filters from server. Use 'key~=value' for "
# "inexact filtering if the key supports "
# "(supported by --os-volume-api-version 3.33 or above)"
# ),
# )
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group list' command"
)
raise exceptions.CommandError(msg)
search_opts = {
'all_tenants': parsed_args.all_projects,
}
groups = volume_client.groups.list(
search_opts=search_opts)
column_headers = (
'ID',
'Status',
'Name',
)
columns = (
'id',
'status',
'name',
)
return (
column_headers,
(
utils.get_item_properties(a, columns)
for a in groups
),
)
class ShowVolumeGroup(command.ShowOne):
"""Show detailed information for a volume group.
This command requires ``--os-volume-api-version`` 3.13 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group.'),
)
parser.add_argument(
'--volumes',
action='store_true',
dest='show_volumes',
default=None,
help=_(
'Show volumes included in the group. '
'(supported by --os-volume-api-version 3.25 or above)'
),
)
parser.add_argument(
'--no-volumes',
action='store_false',
dest='show_volumes',
help=_(
'Do not show volumes included in the group. '
'(supported by --os-volume-api-version 3.25 or above)'
),
)
parser.add_argument(
'--replication-targets',
action='store_true',
dest='show_replication_targets',
default=None,
help=_(
'Show replication targets for the group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
parser.add_argument(
'--no-replication-targets',
action='store_false',
dest='show_replication_targets',
help=_(
'Do not show replication targets for the group. '
'(supported by --os-volume-api-version 3.38 or above)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.13'):
msg = _(
"--os-volume-api-version 3.13 or greater is required to "
"support the 'volume group show' command"
)
raise exceptions.CommandError(msg)
kwargs = {}
if parsed_args.show_volumes is not None:
if volume_client.api_version < api_versions.APIVersion('3.25'):
msg = _(
"--os-volume-api-version 3.25 or greater is required to "
"support the '--(no-)volumes' option"
)
raise exceptions.CommandError(msg)
kwargs['list_volume'] = parsed_args.show_volumes
if parsed_args.show_replication_targets is not None:
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the '--(no-)replication-targets' option"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
group = volume_client.groups.show(group.id, **kwargs)
if parsed_args.show_replication_targets:
replication_targets = \
volume_client.groups.list_replication_targets(group.id)
group.replication_targets = replication_targets
# TODO(stephenfin): Show replication targets
return _format_group(group)
class FailoverVolumeGroup(command.Command):
"""Failover replication for a volume group.
This command requires ``--os-volume-api-version`` 3.38 or greater.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Name or ID of volume group to failover replication for.'),
)
parser.add_argument(
'--allow-attached-volume',
action='store_true',
dest='allow_attached_volume',
default=False,
help=_(
'Allow group with attached volumes to be failed over.',
)
)
parser.add_argument(
'--disallow-attached-volume',
action='store_false',
dest='allow_attached_volume',
default=False,
help=_(
'Disallow group with attached volumes to be failed over.',
)
)
parser.add_argument(
'--secondary-backend-id',
metavar='<backend_id>',
help=_('Secondary backend ID.'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if volume_client.api_version < api_versions.APIVersion('3.38'):
msg = _(
"--os-volume-api-version 3.38 or greater is required to "
"support the 'volume group failover' command"
)
raise exceptions.CommandError(msg)
group = utils.find_resource(
volume_client.groups,
parsed_args.group,
)
volume_client.groups.failover_replication(
group.id,
allow_attached_volume=parsed_args.allow_attached_volume,
secondary_backend_id=parsed_args.secondary_backend_id,
)
| en | 0.738849 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Consider using a formatter for volume_types since it's # a list Create a volume group. Generic volume groups enable you to create a group of volumes and manage them together. Generic volume groups are more flexible than consistency groups. Currently volume consistency groups only support consistent group snapshot. It cannot be extended easily to serve other purposes. A project may want to put volumes used in the same application together in a group so that it is easier to manage them together, and this group of volumes may or may not support consistent group snapshot. Generic volume group solve this problem. By decoupling the tight relationship between the group construct and the consistency concept, generic volume groups can be extended to support other features in the future. This command requires ``--os-volume-api-version`` 3.13 or greater. Delete a volume group. This command requires ``--os-volume-api-version`` 3.13 or greater. Update a volume group. This command requires ``--os-volume-api-version`` 3.13 or greater. Lists all volume groups. This command requires ``--os-volume-api-version`` 3.13 or greater. # TODO(stephenfin): Add once we have an equivalent command for # 'cinder list-filters' # parser.add_argument( # '--filter', # metavar='<key=value>', # action=parseractions.KeyValueAction, # dest='filters', # help=_( # "Filter key and value pairs. Use 'foo' to " # "check enabled filters from server. Use 'key~=value' for " # "inexact filtering if the key supports " # "(supported by --os-volume-api-version 3.33 or above)" # ), # ) Show detailed information for a volume group. This command requires ``--os-volume-api-version`` 3.13 or greater. # TODO(stephenfin): Show replication targets Failover replication for a volume group. This command requires ``--os-volume-api-version`` 3.38 or greater. | 1.949013 | 2 |
healpy/tutorial/pbpbUCC/rapidity08_uccData_power_spectrum.py | tuos/FlowAndCorrelations | 0 | 6631115 | <reponame>tuos/FlowAndCorrelations
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
# Set the number of sources and the coordinates for the input
#nsources = int(2664001)
#nsources = int(3149)
nsources = int(1268174)
nside = 64
npix = hp.nside2npix(nside)
# npix= 12*nside^2
# Coordinates and the density field f
#thetas = np.random.random(nsources) * np.pi
#phis = np.random.random(nsources) * np.pi * 2.
fs = np.random.randn(nsources)
with open("./eventFile_data_noHead_eta08.txt") as inputFile:
lines = inputFile.readlines()
#print (lines[1].split()[1])
thetas=[]
phis=[]
for i in range(nsources):
thetas.append(float(lines[i+1].split()[1]))
phis.append(float(lines[i+1].split()[2]))
#if(thetas[0]<0 or thetas[0]>3.14):
# print("theta out of range")
#print(thetas)
# Go from HEALPix coordinates to indices
indices = hp.ang2pix(nside, thetas, phis)
# Initate the map and fill it with the values
hpxmap = np.zeros(npix, dtype=np.float)
for i in range(nsources):
#hpxmap[indices[i]] += fs[i]
#hpxmap[indices[i]] += 1.0
hpxmap[indices[i]] += 12.0*nside*nside/nsources
#for j in range(npix):
# if hpxmap[j] < 0.000001:
# hpxmap[j] = 0.000001
DPI = 100
SIZE = 800
# Inspect the map
#plt.figure(1)
#hp.mollview(hpxmap, xsize = SIZE)
#map_ring = hp.pixelfunc.reorder(hpxmap, inp='NEST', out='RING')
hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5))
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))
#hp_smoothed = hp.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, sigma=np.radians(5.0*3.14/180))
#hp_smoothed = hp.smoothing(hpxmap, fwhm=60, arcmin=True)
#hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm = 'hist', title='CMS UCC smoothed')
#cmap_tmp = cm.jet
#cmap_tmp.set_bad("gray")
#cmap_tmp.set_under("white")
hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm="hist", title='CMS UCC smoothed') #jet/rainbow/seismic/bwr; norm="hist"/LogNorm()
hp.graticule()
#plt.savefig("plot_uccData_3149m_smoothed.png", dpi = DPI)
#plt.savefig("plot_rapidity08_uccData_allEvents_smoothed.png", dpi = DPI)
plt.savefig("plot_rapidity08_uccData_allEvents_withnormhist_smoothed.png", dpi = DPI)
#plt.savefig("plot_uccData_oneEvents_smoothed.png", dpi = DPI)
'''
#plt.figure(2)
# Get the power spectrum
Cl = hp.anafast(hpxmap)
#print(Cl)
plt.plot(Cl)
plt.ylabel('C_{l}')
plt.savefig('plot_uccData_power_spectrum.png')
'''
| import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
# Set the number of sources and the coordinates for the input
#nsources = int(2664001)
#nsources = int(3149)
nsources = int(1268174)
nside = 64
npix = hp.nside2npix(nside)
# npix= 12*nside^2
# Coordinates and the density field f
#thetas = np.random.random(nsources) * np.pi
#phis = np.random.random(nsources) * np.pi * 2.
fs = np.random.randn(nsources)
with open("./eventFile_data_noHead_eta08.txt") as inputFile:
lines = inputFile.readlines()
#print (lines[1].split()[1])
thetas=[]
phis=[]
for i in range(nsources):
thetas.append(float(lines[i+1].split()[1]))
phis.append(float(lines[i+1].split()[2]))
#if(thetas[0]<0 or thetas[0]>3.14):
# print("theta out of range")
#print(thetas)
# Go from HEALPix coordinates to indices
indices = hp.ang2pix(nside, thetas, phis)
# Initate the map and fill it with the values
hpxmap = np.zeros(npix, dtype=np.float)
for i in range(nsources):
#hpxmap[indices[i]] += fs[i]
#hpxmap[indices[i]] += 1.0
hpxmap[indices[i]] += 12.0*nside*nside/nsources
#for j in range(npix):
# if hpxmap[j] < 0.000001:
# hpxmap[j] = 0.000001
DPI = 100
SIZE = 800
# Inspect the map
#plt.figure(1)
#hp.mollview(hpxmap, xsize = SIZE)
#map_ring = hp.pixelfunc.reorder(hpxmap, inp='NEST', out='RING')
hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5))
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))
#hp_smoothed = hp.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, sigma=np.radians(5.0*3.14/180))
#hp_smoothed = hp.smoothing(hpxmap, fwhm=60, arcmin=True)
#hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm = 'hist', title='CMS UCC smoothed')
#cmap_tmp = cm.jet
#cmap_tmp.set_bad("gray")
#cmap_tmp.set_under("white")
hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm="hist", title='CMS UCC smoothed') #jet/rainbow/seismic/bwr; norm="hist"/LogNorm()
hp.graticule()
#plt.savefig("plot_uccData_3149m_smoothed.png", dpi = DPI)
#plt.savefig("plot_rapidity08_uccData_allEvents_smoothed.png", dpi = DPI)
plt.savefig("plot_rapidity08_uccData_allEvents_withnormhist_smoothed.png", dpi = DPI)
#plt.savefig("plot_uccData_oneEvents_smoothed.png", dpi = DPI)
'''
#plt.figure(2)
# Get the power spectrum
Cl = hp.anafast(hpxmap)
#print(Cl)
plt.plot(Cl)
plt.ylabel('C_{l}')
plt.savefig('plot_uccData_power_spectrum.png')
''' | en | 0.365461 | # Set the number of sources and the coordinates for the input #nsources = int(2664001) #nsources = int(3149) # npix= 12*nside^2 # Coordinates and the density field f #thetas = np.random.random(nsources) * np.pi #phis = np.random.random(nsources) * np.pi * 2. #print (lines[1].split()[1]) #if(thetas[0]<0 or thetas[0]>3.14): # print("theta out of range") #print(thetas) # Go from HEALPix coordinates to indices # Initate the map and fill it with the values #hpxmap[indices[i]] += fs[i] #hpxmap[indices[i]] += 1.0 #for j in range(npix): # if hpxmap[j] < 0.000001: # hpxmap[j] = 0.000001 # Inspect the map #plt.figure(1) #hp.mollview(hpxmap, xsize = SIZE) #map_ring = hp.pixelfunc.reorder(hpxmap, inp='NEST', out='RING') #hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180)) #hp_smoothed = hp.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180)) #hp_smoothed = hp.sphtfunc.smoothing(hpxmap, sigma=np.radians(5.0*3.14/180)) #hp_smoothed = hp.smoothing(hpxmap, fwhm=60, arcmin=True) #hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm = 'hist', title='CMS UCC smoothed') #cmap_tmp = cm.jet #cmap_tmp.set_bad("gray") #cmap_tmp.set_under("white") #jet/rainbow/seismic/bwr; norm="hist"/LogNorm() #plt.savefig("plot_uccData_3149m_smoothed.png", dpi = DPI) #plt.savefig("plot_rapidity08_uccData_allEvents_smoothed.png", dpi = DPI) #plt.savefig("plot_uccData_oneEvents_smoothed.png", dpi = DPI) #plt.figure(2) # Get the power spectrum Cl = hp.anafast(hpxmap) #print(Cl) plt.plot(Cl) plt.ylabel('C_{l}') plt.savefig('plot_uccData_power_spectrum.png') | 2.19469 | 2 |
pyearcal/l10n/italian.py | janpipek/pyearcal | 3 | 6631116 | # -*- coding: utf-8 -*-
from .default import DefaultLocale
from dateutil.easter import easter
from datetime import date, timedelta
import calendar
class ItalianLocale(DefaultLocale):
"""Italian variant of the calendar.
Includes holiday both national and local. Specify
province or city (by its Italian name) in the constructor
if needed.
Local holidays are included for the following:
**Cities:** Bari, Bologna, Cagliari, Firenze, Genova, Milano,
Napoli, Palermo, Roma, Torino, Trieste
**Provinces:** Bolzano (Alto Adige)
"""
def __init__(self, city=None, province=None):
"""
:type province: str
"""
super(ItalianLocale, self).__init__()
if city:
self.city = city.lower()
else:
self.city = None
if province:
self.province = province.lower()
else:
self.province = None
@property
def month_names(self):
return (
"Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Agosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre",
)
@property
def first_day_of_week(self):
return calendar.MONDAY
def get_holidays(self, year):
"""Italian holidays for a selected year.
Info taken from:
- http://www.timeanddate.com/holidays/italy/
- http://www.qppstudio.net/publicholidays2015/italy.htm
- https://it.wikipedia.org/wiki/Pentecoste
"""
hols = super().get_holidays(year)
hols.append(date(year, 1, 1)) # New Year
hols.append(date(year, 1, 6)) # Epiphany
hols.append(date(year, 4, 25)) # Liberation Day (St. Mark)
hols.append(date(year, 5, 1)) # Labour Day
hols.append(date(year, 6, 2)) # Republic Day
if self.city in ["firenze", "genova", "torino"]:
hols.append(date(year, 6, 24)) # St. Giovanni
if self.city == "roma":
hols.append(date(year, 6, 29)) # St. Peter & Paul
if self.city == "palermo":
hols.append(date(year, 7, 15)) # St. Rosalia
hols.append(date(year, 8, 15)) # Assumption of Mary
if self.city == "napoli":
hols.append(date(year, 9, 19)) # St. Gennaro
if self.city == "bologna":
hols.append(date(year, 10, 4)) # St. Petronio
if self.city == "cagliari":
hols.append(date(year, 10, 30)) # St. Saturnio
hols.append(date(year, 11, 1)) # All Saints' Day
if self.city == "trieste":
hols.append(date(year, 11, 3)) # St. Giusto
if self.city == "bari":
hols.append(date(year, 12, 6)) # St. Nicola
if self.city == "milano":
hols.append(date(year, 12, 7)) # St. Ambrose
hols.append(date(year, 12, 8)) # Immaculate Conception
hols.append(date(year, 12, 25)) # Christmas Day
hols.append(date(year, 12, 26)) # St. Stefano
# Easter (Sunday + Monday)
hols.append(easter(year))
hols.append(easter(year) + timedelta(days=1))
if self.province == "bolzano":
# Pentecoste (in Alto Adige / Südtirol)
hols.append(easter(year) + timedelta(days=50))
return hols
@property
def calendar_name(self):
return "Calendario"
| # -*- coding: utf-8 -*-
from .default import DefaultLocale
from dateutil.easter import easter
from datetime import date, timedelta
import calendar
class ItalianLocale(DefaultLocale):
"""Italian variant of the calendar.
Includes holiday both national and local. Specify
province or city (by its Italian name) in the constructor
if needed.
Local holidays are included for the following:
**Cities:** Bari, Bologna, Cagliari, Firenze, Genova, Milano,
Napoli, Palermo, Roma, Torino, Trieste
**Provinces:** Bolzano (Alto Adige)
"""
def __init__(self, city=None, province=None):
"""
:type province: str
"""
super(ItalianLocale, self).__init__()
if city:
self.city = city.lower()
else:
self.city = None
if province:
self.province = province.lower()
else:
self.province = None
@property
def month_names(self):
return (
"Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Agosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre",
)
@property
def first_day_of_week(self):
return calendar.MONDAY
def get_holidays(self, year):
"""Italian holidays for a selected year.
Info taken from:
- http://www.timeanddate.com/holidays/italy/
- http://www.qppstudio.net/publicholidays2015/italy.htm
- https://it.wikipedia.org/wiki/Pentecoste
"""
hols = super().get_holidays(year)
hols.append(date(year, 1, 1)) # New Year
hols.append(date(year, 1, 6)) # Epiphany
hols.append(date(year, 4, 25)) # Liberation Day (St. Mark)
hols.append(date(year, 5, 1)) # Labour Day
hols.append(date(year, 6, 2)) # Republic Day
if self.city in ["firenze", "genova", "torino"]:
hols.append(date(year, 6, 24)) # St. Giovanni
if self.city == "roma":
hols.append(date(year, 6, 29)) # St. Peter & Paul
if self.city == "palermo":
hols.append(date(year, 7, 15)) # St. Rosalia
hols.append(date(year, 8, 15)) # Assumption of Mary
if self.city == "napoli":
hols.append(date(year, 9, 19)) # St. Gennaro
if self.city == "bologna":
hols.append(date(year, 10, 4)) # St. Petronio
if self.city == "cagliari":
hols.append(date(year, 10, 30)) # St. Saturnio
hols.append(date(year, 11, 1)) # All Saints' Day
if self.city == "trieste":
hols.append(date(year, 11, 3)) # St. Giusto
if self.city == "bari":
hols.append(date(year, 12, 6)) # St. Nicola
if self.city == "milano":
hols.append(date(year, 12, 7)) # St. Ambrose
hols.append(date(year, 12, 8)) # Immaculate Conception
hols.append(date(year, 12, 25)) # Christmas Day
hols.append(date(year, 12, 26)) # St. Stefano
# Easter (Sunday + Monday)
hols.append(easter(year))
hols.append(easter(year) + timedelta(days=1))
if self.province == "bolzano":
# Pentecoste (in Alto Adige / Südtirol)
hols.append(easter(year) + timedelta(days=50))
return hols
@property
def calendar_name(self):
return "Calendario"
| en | 0.683224 | # -*- coding: utf-8 -*- Italian variant of the calendar. Includes holiday both national and local. Specify province or city (by its Italian name) in the constructor if needed. Local holidays are included for the following: **Cities:** Bari, Bologna, Cagliari, Firenze, Genova, Milano, Napoli, Palermo, Roma, Torino, Trieste **Provinces:** Bolzano (Alto Adige) :type province: str Italian holidays for a selected year. Info taken from: - http://www.timeanddate.com/holidays/italy/ - http://www.qppstudio.net/publicholidays2015/italy.htm - https://it.wikipedia.org/wiki/Pentecoste # New Year # Epiphany # Liberation Day (St. Mark) # Labour Day # Republic Day # St. Giovanni # St. Peter & Paul # St. Rosalia # Assumption of Mary # St. Gennaro # St. Petronio # St. Saturnio # All Saints' Day # St. Giusto # St. Nicola # St. Ambrose # Immaculate Conception # Christmas Day # St. Stefano # Easter (Sunday + Monday) # Pentecoste (in Alto Adige / Südtirol) | 3.612832 | 4 |
height_map/cci_water_bodies_v4.py | jaluebbe/HeightMap | 5 | 6631117 | import os
from height_map.geotiff_handler import GeoTiffHandler
class WaterBodies:
attribution_url = 'https://www.mdpi.com/2072-4292/9/1/36'
attribution_name = 'CCI Water Bodies v4.0'
attribution = '© <a href="{}">{}</a>'.format(attribution_url,
attribution_name)
def __init__(self, path=None, file_name=None):
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'maps/cci_wb4')
if file_name is None:
file_name = 'ESACCI-LC-L4-WB-Ocean-Land-Map-150m-P13Y-2000-v4.0.tif'
self.gth = GeoTiffHandler(os.path.join(path, file_name))
self.legend = {'0': 'Ocean', '1': 'Land', '2': 'Water'}
def get_value_at_position(self, lat, lon):
return self.gth.get_value_at_position(lat, lon)
def get_data_at_position(self, lat, lon):
if not (-90 <= lat <= 90 and -180 <= lon <= 180):
raise ValueError('invalid coordinates ({}, {})'.format(lat, lon))
value = self.get_value_at_position(lat, lon)
return {
'value': value, 'label': self.legend.get(str(value)),
'source': self.attribution_name, 'attributions': [self.attribution]}
| import os
from height_map.geotiff_handler import GeoTiffHandler
class WaterBodies:
attribution_url = 'https://www.mdpi.com/2072-4292/9/1/36'
attribution_name = 'CCI Water Bodies v4.0'
attribution = '© <a href="{}">{}</a>'.format(attribution_url,
attribution_name)
def __init__(self, path=None, file_name=None):
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'maps/cci_wb4')
if file_name is None:
file_name = 'ESACCI-LC-L4-WB-Ocean-Land-Map-150m-P13Y-2000-v4.0.tif'
self.gth = GeoTiffHandler(os.path.join(path, file_name))
self.legend = {'0': 'Ocean', '1': 'Land', '2': 'Water'}
def get_value_at_position(self, lat, lon):
return self.gth.get_value_at_position(lat, lon)
def get_data_at_position(self, lat, lon):
if not (-90 <= lat <= 90 and -180 <= lon <= 180):
raise ValueError('invalid coordinates ({}, {})'.format(lat, lon))
value = self.get_value_at_position(lat, lon)
return {
'value': value, 'label': self.legend.get(str(value)),
'source': self.attribution_name, 'attributions': [self.attribution]}
| none | 1 | 2.814025 | 3 |
|
bashhub/model/command_form.py | rationalthinker1/bashhub-client | 1 | 6631118 | from time import *
import uuid
from serializable import Serializable
class CommandForm(Serializable):
def __init__(self, command, path, exit_status, process_id,
process_start_time):
self.uuid = uuid.uuid4().__str__()
self.command = command
self.path = path
self.exit_status = exit_status
self.process_id = long(process_id)
self.process_start_time = process_start_time
self.created = int(round(time() * 1000))
| from time import *
import uuid
from serializable import Serializable
class CommandForm(Serializable):
def __init__(self, command, path, exit_status, process_id,
process_start_time):
self.uuid = uuid.uuid4().__str__()
self.command = command
self.path = path
self.exit_status = exit_status
self.process_id = long(process_id)
self.process_start_time = process_start_time
self.created = int(round(time() * 1000))
| none | 1 | 2.653652 | 3 |
|
iucn_sim/transition_rates.py | tobiashofmann88/iucn_extinction_simulator | 11 | 6631119 | <reponame>tobiashofmann88/iucn_extinction_simulator
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCMC-estimation of status transition rates from IUCN record
Created on Mon Oct 28 14:43:44 2019
@author: <NAME> (<EMAIL>)
"""
import iucn_sim.iucn_sim as iucn_sim
def add_arguments(parser):
parser.add_argument(
'--species_data',
required=True,
metavar='<path>',
help="File containing species list and current IUCN status of species, as well as generation length (GL) data estimates if available. GL data is only used for '--extinction_probs_mode 0' ('species_data.txt' output from get_iucn_data function).",
)
parser.add_argument(
'--iucn_history',
required=True,
metavar='<path>',
help="File containing IUCN history of the reference group for transition rate estimation ('*_iucn_history.txt' output of get_iucn_data function)."
)
parser.add_argument(
'--outdir',
required=True,
metavar='<path>',
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--extinction_probs_mode',
default=0,
metavar='N',
help="Set to '0' to use the critE EX mode to determine extinction probabilities for each status (e.g. Mooers et al, 2008 approach). Set to '1' to use empirical EX mode, based on the recorded extinction in the IUCN history of the reference group (e.g. Monroe et al, 2019 approach). GL data can only be used in the critE EX mode ('0')."
)
parser.add_argument(
'--possibly_extinct_list',
default=0,
metavar='<path>',
help="File containing list of taxa that are likely extinct, but that are listed as extant in IUCN, including the year of their assessment as possibly extinct ('possibly_extinct_reference_taxa.txt' output from get_iucn_data function). These species will then be modeled as extinct by the esimate_rates function, which will effect the estimated extinction probabilities when chosing `--extinction_probs_mode 1`",
)
parser.add_argument(
'--species_specific_regression',
action='store_true',
help='Enables species-specific regression fitting to model LC, NT, and VU extinction probabilities. Only applicable with --extinction_probs_mode 0 (critE mode) and if GL is provided.',
default=False
)
parser.add_argument(
'--rate_samples',
default=100,
metavar='N',
help="How many rates to sample from the posterior transition rate estimates. These rates will be used to populate transition rate q-matrices for downstream simulations. Later on you can still chose to run more simulation replicates than the here specified number of produced transition rate q-matrices, in which case the `run_sim` function will randomely resample from the available q-matrices (default=100, this is ususally sufficient, larger numbers can lead to very high output file size volumes)."
)
parser.add_argument(
'--n_gen',
default=100000,
metavar='N',
help="Number of generations for MCMC for transition rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
metavar='N',
help="Burn-in for MCMC for transition rate estimation (default=1000)."
)
parser.add_argument(
'--seed',
default=None,
help="Set starting seed for the MCMC."
)
def main(args):
tr_rates = iucn_sim.transition_rates(
species_iucn_status = args.species_data,
iucn_history_file = args.iucn_history,
outdir = args.outdir,
extinction_probs_mode = args.extinction_probs_mode,
possibly_extinct_list = args.possibly_extinct_list,
species_specific_regression = args.species_specific_regression,
rate_samples = args.rate_samples,
n_gen = args.n_gen,
burnin = args.burnin,
seed = args.seed,
load_from_file = True # the load_from_file option should always be true when run from command line, but can be turned off when running from R or python
)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCMC-estimation of status transition rates from IUCN record
Created on Mon Oct 28 14:43:44 2019
@author: <NAME> (<EMAIL>)
"""
import iucn_sim.iucn_sim as iucn_sim
def add_arguments(parser):
parser.add_argument(
'--species_data',
required=True,
metavar='<path>',
help="File containing species list and current IUCN status of species, as well as generation length (GL) data estimates if available. GL data is only used for '--extinction_probs_mode 0' ('species_data.txt' output from get_iucn_data function).",
)
parser.add_argument(
'--iucn_history',
required=True,
metavar='<path>',
help="File containing IUCN history of the reference group for transition rate estimation ('*_iucn_history.txt' output of get_iucn_data function)."
)
parser.add_argument(
'--outdir',
required=True,
metavar='<path>',
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--extinction_probs_mode',
default=0,
metavar='N',
help="Set to '0' to use the critE EX mode to determine extinction probabilities for each status (e.g. Mooers et al, 2008 approach). Set to '1' to use empirical EX mode, based on the recorded extinction in the IUCN history of the reference group (e.g. Monroe et al, 2019 approach). GL data can only be used in the critE EX mode ('0')."
)
parser.add_argument(
'--possibly_extinct_list',
default=0,
metavar='<path>',
help="File containing list of taxa that are likely extinct, but that are listed as extant in IUCN, including the year of their assessment as possibly extinct ('possibly_extinct_reference_taxa.txt' output from get_iucn_data function). These species will then be modeled as extinct by the esimate_rates function, which will effect the estimated extinction probabilities when chosing `--extinction_probs_mode 1`",
)
parser.add_argument(
'--species_specific_regression',
action='store_true',
help='Enables species-specific regression fitting to model LC, NT, and VU extinction probabilities. Only applicable with --extinction_probs_mode 0 (critE mode) and if GL is provided.',
default=False
)
parser.add_argument(
'--rate_samples',
default=100,
metavar='N',
help="How many rates to sample from the posterior transition rate estimates. These rates will be used to populate transition rate q-matrices for downstream simulations. Later on you can still chose to run more simulation replicates than the here specified number of produced transition rate q-matrices, in which case the `run_sim` function will randomely resample from the available q-matrices (default=100, this is ususally sufficient, larger numbers can lead to very high output file size volumes)."
)
parser.add_argument(
'--n_gen',
default=100000,
metavar='N',
help="Number of generations for MCMC for transition rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
metavar='N',
help="Burn-in for MCMC for transition rate estimation (default=1000)."
)
parser.add_argument(
'--seed',
default=None,
help="Set starting seed for the MCMC."
)
def main(args):
tr_rates = iucn_sim.transition_rates(
species_iucn_status = args.species_data,
iucn_history_file = args.iucn_history,
outdir = args.outdir,
extinction_probs_mode = args.extinction_probs_mode,
possibly_extinct_list = args.possibly_extinct_list,
species_specific_regression = args.species_specific_regression,
rate_samples = args.rate_samples,
n_gen = args.n_gen,
burnin = args.burnin,
seed = args.seed,
load_from_file = True # the load_from_file option should always be true when run from command line, but can be turned off when running from R or python
) | en | 0.704743 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- MCMC-estimation of status transition rates from IUCN record Created on Mon Oct 28 14:43:44 2019 @author: <NAME> (<EMAIL>) # the load_from_file option should always be true when run from command line, but can be turned off when running from R or python | 2.648878 | 3 |
0x04-python-more_data_structures/3-common_elements.py | Nahi-Terefe/alx-higher_level_programming | 0 | 6631120 | #!/usr/bin/python3
def common_elements(set_1, set_2):
first_list = set(set_1)
sec_list = set(set_2)
final_list = first_list & sec_list
return final_list
| #!/usr/bin/python3
def common_elements(set_1, set_2):
first_list = set(set_1)
sec_list = set(set_2)
final_list = first_list & sec_list
return final_list
| fr | 0.386793 | #!/usr/bin/python3 | 2.967803 | 3 |
npz2npz4custom_load.py | k5iogura/YOLOv2-chainer | 0 | 6631121 | #! /usr/bin/env python3
# encoding: utf-8
import sys, os
import argparse
import numpy as np
import chainer
import chainer.links as L
from chainer import serializers
from yolov2 import YOLOv2, load_npz
def statistics(ndarray):
ndarray_ = ndarray.reshape(-1)
return np.std(ndarray_), np.mean(ndarray_), np.max(ndarray_), np.min(ndarray_)
parser = argparse.ArgumentParser(description="npz1 to npz transform")
parser.add_argument('npz1', help="input npz format")
parser.add_argument('npz2', help="output npz format")
parser.add_argument('--model','-m', type=str, default="yolov2",help="target model file")
args = parser.parse_args()
model_prefix,_ = os.path.splitext(os.path.basename(args.model))
print("from",model_prefix,"import YOLOv2")
exec("from "+model_prefix+" import YOLOv2")
print("define model (80,5)")
model = YOLOv2(80,5)
if os.path.exists(args.npz1):
print("custom load npz1 model",args.npz1)
load_npz(args.npz1, model)
else:
print(args.npz1,"not found")
sys.exit(-1)
#x = np.zeros((1, 3, 416, 416), dtype=np.float32)
x = np.random.rand(1, 3, 416, 416).astype(np.float32)
chainer.config.train = False
with chainer.using_config('train',False):
print("infer for dummy data", x.shape)
result = model(x)
print("result",result.shape, type(result))
print("save as npz model", args.npz2)
serializers.save_npz(args.npz2, model)
print("serializers.load_npz model",args.npz2)
serializers.load_npz(args.npz2, model)
print("infer No.2 for dummy data", x.shape)
result2 = model(x)
print("result No.2",result2.shape, type(result2))
print(" std/means/max/min")
print("original: %11.7f %11.7f %11.7f %11.7f"%statistics(result.data[0]))
print("for onnx: %11.7f %11.7f %11.7f %11.7f"%statistics(result2.data[0]))
print("finished")
| #! /usr/bin/env python3
# encoding: utf-8
import sys, os
import argparse
import numpy as np
import chainer
import chainer.links as L
from chainer import serializers
from yolov2 import YOLOv2, load_npz
def statistics(ndarray):
ndarray_ = ndarray.reshape(-1)
return np.std(ndarray_), np.mean(ndarray_), np.max(ndarray_), np.min(ndarray_)
parser = argparse.ArgumentParser(description="npz1 to npz transform")
parser.add_argument('npz1', help="input npz format")
parser.add_argument('npz2', help="output npz format")
parser.add_argument('--model','-m', type=str, default="yolov2",help="target model file")
args = parser.parse_args()
model_prefix,_ = os.path.splitext(os.path.basename(args.model))
print("from",model_prefix,"import YOLOv2")
exec("from "+model_prefix+" import YOLOv2")
print("define model (80,5)")
model = YOLOv2(80,5)
if os.path.exists(args.npz1):
print("custom load npz1 model",args.npz1)
load_npz(args.npz1, model)
else:
print(args.npz1,"not found")
sys.exit(-1)
#x = np.zeros((1, 3, 416, 416), dtype=np.float32)
x = np.random.rand(1, 3, 416, 416).astype(np.float32)
chainer.config.train = False
with chainer.using_config('train',False):
print("infer for dummy data", x.shape)
result = model(x)
print("result",result.shape, type(result))
print("save as npz model", args.npz2)
serializers.save_npz(args.npz2, model)
print("serializers.load_npz model",args.npz2)
serializers.load_npz(args.npz2, model)
print("infer No.2 for dummy data", x.shape)
result2 = model(x)
print("result No.2",result2.shape, type(result2))
print(" std/means/max/min")
print("original: %11.7f %11.7f %11.7f %11.7f"%statistics(result.data[0]))
print("for onnx: %11.7f %11.7f %11.7f %11.7f"%statistics(result2.data[0]))
print("finished")
| en | 0.183633 | #! /usr/bin/env python3 # encoding: utf-8 #x = np.zeros((1, 3, 416, 416), dtype=np.float32) | 2.67378 | 3 |
src/assets/data_example/sync.py | studiorvandco/Website | 0 | 6631122 | <filename>src/assets/data_example/sync.py<gh_stars>0
import os
import requests
import json
# Crontab command
# */10 * * * * python3 /path/to/sync.py >/dev/null 2>&1
# Script path
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# --- Get statistics ---
statistics = requests.get("https://youtube.googleapis.com/youtube/v3/channels" +
"?part=statistics&id=########################" +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
statistics = statistics["items"][0]["statistics"]
# --- Get content creators ---
sections = requests.get("https://youtube.googleapis.com/youtube/v3/channelSections" +
"?part=contentDetails&channelId=########################" +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
sections = sections["items"][len(sections["items"]) - 1]["contentDetails"]["channels"]
content_creators_ids = ''
for channelId in sections:
content_creators_ids += "&id=" + channelId
content_creators_request = requests.get("https://youtube.googleapis.com/youtube/v3/channels" +
"?part=snippet" + content_creators_ids +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
content_creators = []
for i in range(len(content_creators_request["items"])):
content_creators.append({
"id": content_creators_request["items"][i]["id"],
"name": content_creators_request["items"][i]["snippet"]["title"],
"picture": content_creators_request["items"][i]["snippet"]["thumbnails"]["default"]["url"]
})
# --- Get playlists ---
# playlists_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlists" +
# "?part=snippet&part=contentDetails&channelId=########################" +
# "&alt=json&fields=items&prettyPrint=true" +
# "&key=#######################################").json()
#
# playlists = []
#
# for playlistId in playlists_requests["items"]:
# playlists_items_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlistItems" +
# "?part=snippet&part=contentDetails&playlistId=" + playlistId["id"] +
# "&alt=json&fields=items&prettyPrint=true&maxResults=25" +
# "&key=#######################################").json()
#
# playlists_items = []
# items_ids = ''
#
# for playlistItem in playlists_items_requests["items"]:
# playlists_items.append({
# "id": playlistItem["snippet"]["resourceId"]["videoId"],
# "thumbnails": playlistItem["snippet"]["thumbnails"]
# })
# items_ids += "&id=" + playlistItem["snippet"]["resourceId"]["videoId"]
#
# del items_ids
# infos = ''
#
# if playlistId["id"] == "##################################":
# infos = {
# "title": "example_title",
# "description": "example_description",
# "banner": "example_banner.jpg"
# }
# elif playlistId["id"] == "##################################":
# infos = {
# "title": "example_title",
# "description": "example_description",
# "banner": "example_banner.jpg"
# }
#
# playlists.append({
# "playlistId": playlistId["id"],
# "infos": infos,
# "thumbnail": playlistId["snippet"]["thumbnails"],
# "videos": playlists_items
# })
# --- Get Instagram posts ---
posts = requests.get("https://graph.instagram.com/v11.0/me/media" +
"?fields=id,caption,media_type,media_url,permalink,thumbnail_url,username,timestamp" +
"&access_token=####################################################################" +
"###########################################################################").json()
posts = posts["data"]
# --- Write statistics to file ---
file = open(os.path.join(__location__, "statistics.json"), 'w')
file.write(json.dumps(statistics, indent=2))
file.close()
# --- Write content creators to file ---
file = open(os.path.join(__location__, "content_creators.json"), 'w')
file.write(json.dumps(content_creators, indent=2))
file.close()
# --- Write playlists to file ---
# file = open(os.path.join(__location__, "playlists.json"), 'w')
# file.write(json.dumps(playlists, indent=2))
# file.close()
# # --- Write posts to file ---
file = open(os.path.join(__location__, "posts.json"), 'w')
file.write(json.dumps(posts, indent=2))
file.close()
| <filename>src/assets/data_example/sync.py<gh_stars>0
import os
import requests
import json
# Crontab command
# */10 * * * * python3 /path/to/sync.py >/dev/null 2>&1
# Script path
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# --- Get statistics ---
statistics = requests.get("https://youtube.googleapis.com/youtube/v3/channels" +
"?part=statistics&id=########################" +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
statistics = statistics["items"][0]["statistics"]
# --- Get content creators ---
sections = requests.get("https://youtube.googleapis.com/youtube/v3/channelSections" +
"?part=contentDetails&channelId=########################" +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
sections = sections["items"][len(sections["items"]) - 1]["contentDetails"]["channels"]
content_creators_ids = ''
for channelId in sections:
content_creators_ids += "&id=" + channelId
content_creators_request = requests.get("https://youtube.googleapis.com/youtube/v3/channels" +
"?part=snippet" + content_creators_ids +
"&alt=json&fields=items&prettyPrint=true" +
"&key=#######################################").json()
content_creators = []
for i in range(len(content_creators_request["items"])):
content_creators.append({
"id": content_creators_request["items"][i]["id"],
"name": content_creators_request["items"][i]["snippet"]["title"],
"picture": content_creators_request["items"][i]["snippet"]["thumbnails"]["default"]["url"]
})
# --- Get playlists ---
# playlists_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlists" +
# "?part=snippet&part=contentDetails&channelId=########################" +
# "&alt=json&fields=items&prettyPrint=true" +
# "&key=#######################################").json()
#
# playlists = []
#
# for playlistId in playlists_requests["items"]:
# playlists_items_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlistItems" +
# "?part=snippet&part=contentDetails&playlistId=" + playlistId["id"] +
# "&alt=json&fields=items&prettyPrint=true&maxResults=25" +
# "&key=#######################################").json()
#
# playlists_items = []
# items_ids = ''
#
# for playlistItem in playlists_items_requests["items"]:
# playlists_items.append({
# "id": playlistItem["snippet"]["resourceId"]["videoId"],
# "thumbnails": playlistItem["snippet"]["thumbnails"]
# })
# items_ids += "&id=" + playlistItem["snippet"]["resourceId"]["videoId"]
#
# del items_ids
# infos = ''
#
# if playlistId["id"] == "##################################":
# infos = {
# "title": "example_title",
# "description": "example_description",
# "banner": "example_banner.jpg"
# }
# elif playlistId["id"] == "##################################":
# infos = {
# "title": "example_title",
# "description": "example_description",
# "banner": "example_banner.jpg"
# }
#
# playlists.append({
# "playlistId": playlistId["id"],
# "infos": infos,
# "thumbnail": playlistId["snippet"]["thumbnails"],
# "videos": playlists_items
# })
# --- Get Instagram posts ---
posts = requests.get("https://graph.instagram.com/v11.0/me/media" +
"?fields=id,caption,media_type,media_url,permalink,thumbnail_url,username,timestamp" +
"&access_token=####################################################################" +
"###########################################################################").json()
posts = posts["data"]
# --- Write statistics to file ---
file = open(os.path.join(__location__, "statistics.json"), 'w')
file.write(json.dumps(statistics, indent=2))
file.close()
# --- Write content creators to file ---
file = open(os.path.join(__location__, "content_creators.json"), 'w')
file.write(json.dumps(content_creators, indent=2))
file.close()
# --- Write playlists to file ---
# file = open(os.path.join(__location__, "playlists.json"), 'w')
# file.write(json.dumps(playlists, indent=2))
# file.close()
# # --- Write posts to file ---
file = open(os.path.join(__location__, "posts.json"), 'w')
file.write(json.dumps(posts, indent=2))
file.close()
| en | 0.380445 | # Crontab command # */10 * * * * python3 /path/to/sync.py >/dev/null 2>&1 # Script path # --- Get statistics --- ########################" + #######################################").json() # --- Get content creators --- ########################" + #######################################").json() #######################################").json() # --- Get playlists --- # playlists_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlists" + # "?part=snippet&part=contentDetails&channelId=########################" + # "&alt=json&fields=items&prettyPrint=true" + # "&key=#######################################").json() # # playlists = [] # # for playlistId in playlists_requests["items"]: # playlists_items_requests = requests.get("https://youtube.googleapis.com/youtube/v3/playlistItems" + # "?part=snippet&part=contentDetails&playlistId=" + playlistId["id"] + # "&alt=json&fields=items&prettyPrint=true&maxResults=25" + # "&key=#######################################").json() # # playlists_items = [] # items_ids = '' # # for playlistItem in playlists_items_requests["items"]: # playlists_items.append({ # "id": playlistItem["snippet"]["resourceId"]["videoId"], # "thumbnails": playlistItem["snippet"]["thumbnails"] # }) # items_ids += "&id=" + playlistItem["snippet"]["resourceId"]["videoId"] # # del items_ids # infos = '' # # if playlistId["id"] == "##################################": # infos = { # "title": "example_title", # "description": "example_description", # "banner": "example_banner.jpg" # } # elif playlistId["id"] == "##################################": # infos = { # "title": "example_title", # "description": "example_description", # "banner": "example_banner.jpg" # } # # playlists.append({ # "playlistId": playlistId["id"], # "infos": infos, # "thumbnail": playlistId["snippet"]["thumbnails"], # "videos": playlists_items # }) # --- Get Instagram posts --- ####################################################################" + ##########################################################################").json() # --- Write statistics to file --- # --- Write content creators to file --- # --- Write playlists to file --- # file = open(os.path.join(__location__, "playlists.json"), 'w') # file.write(json.dumps(playlists, indent=2)) # file.close() # # --- Write posts to file --- | 2.550011 | 3 |
3d/upload.py | selipe/Flask-Miguel-Safari | 151 | 6631123 | <reponame>selipe/Flask-Miguel-Safari
import os
import imghdr
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.wtf import Form
from wtforms import FileField, SubmitField, ValidationError
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top secret!'
bootstrap = Bootstrap(app)
class UploadForm(Form):
image_file = FileField('Image file')
submit = SubmitField('Submit')
def validate_image_file(self, field):
if field.data.filename[-4:].lower() != '.jpg':
raise ValidationError('Invalid file extension')
if imghdr.what(field.data) != 'jpeg':
raise ValidationError('Invalid image format')
@app.route('/', methods=['GET', 'POST'])
def index():
image = None
form = UploadForm()
if form.validate_on_submit():
image = 'uploads/' + form.image_file.data.filename
form.image_file.data.save(os.path.join(app.static_folder, image))
return render_template('index.html', form=form, image=image)
if __name__ == '__main__':
app.run(debug=True)
| import os
import imghdr
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.wtf import Form
from wtforms import FileField, SubmitField, ValidationError
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top secret!'
bootstrap = Bootstrap(app)
class UploadForm(Form):
image_file = FileField('Image file')
submit = SubmitField('Submit')
def validate_image_file(self, field):
if field.data.filename[-4:].lower() != '.jpg':
raise ValidationError('Invalid file extension')
if imghdr.what(field.data) != 'jpeg':
raise ValidationError('Invalid image format')
@app.route('/', methods=['GET', 'POST'])
def index():
image = None
form = UploadForm()
if form.validate_on_submit():
image = 'uploads/' + form.image_file.data.filename
form.image_file.data.save(os.path.join(app.static_folder, image))
return render_template('index.html', form=form, image=image)
if __name__ == '__main__':
app.run(debug=True) | none | 1 | 2.658594 | 3 |
|
src/python/pants/init/target_roots.py | lahosken/pants | 1 | 6631124 | <filename>src/python/pants/init/target_roots.py<gh_stars>1-10
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.specs import SingleAddress
from pants.init.options_initializer import OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.scm.subsystems.changed import ChangedRequest
logger = logging.getLogger(__name__)
class InvalidSpecConstraint(Exception):
"""Raised when invalid constraints are given via target specs and arguments like --changed*."""
class TargetRoots(object):
"""Determines the target roots for a given pants run."""
@classmethod
def parse_specs(cls, target_specs, build_root=None):
"""Parse string specs into unique `Spec` objects.
:param iterable target_specs: An iterable of string specs.
:param string build_root: The path to the build root.
:returns: An `OrderedSet` of `Spec` objects.
"""
build_root = build_root or get_buildroot()
spec_parser = CmdLineSpecParser(build_root)
return OrderedSet(spec_parser.parse_spec(spec_str) for spec_str in target_specs)
@classmethod
def create(cls, options=None, args=None, build_root=None, change_calculator=None):
"""
:param Options options: An `Options` instance to use, if available.
:param string args: Raw cli args to use for parsing if an `Options` instance isn't available.
:param string build_root: The build root.
:param ChangeCalculator change_calculator: A `ChangeCalculator` for calculating changes.
"""
if not options:
assert args is not None, 'must pass `args` if not passing `options`'
options, _ = OptionsInitializer(OptionsBootstrapper(args=args)).setup(init_logging=False)
# Determine the literal target roots.
spec_roots = cls.parse_specs(options.target_specs, build_root)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
logger.debug('args are: %s', args)
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
if change_calculator and changed_request.is_actionable():
if spec_roots:
# We've been provided spec roots (e.g. `./pants list ::`) AND a changed request. Error out.
raise InvalidSpecConstraint('cannot provide changed parameters and target specs!')
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
changed_addresses = change_calculator.changed_target_addresses(changed_request)
logger.debug('changed addresses: %s', changed_addresses)
return ChangedTargetRoots(tuple(SingleAddress(a.spec_path, a.target_name)
for a in changed_addresses))
return LiteralTargetRoots(spec_roots)
def __init__(self, spec_roots):
self._spec_roots = spec_roots
def as_specs(self):
"""Returns the current target roots as Specs."""
return self._spec_roots
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.as_specs())
class ChangedTargetRoots(TargetRoots):
"""Target roots that have been altered by `--changed` functionality."""
class LiteralTargetRoots(TargetRoots):
"""User defined target roots."""
| <filename>src/python/pants/init/target_roots.py<gh_stars>1-10
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.specs import SingleAddress
from pants.init.options_initializer import OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.scm.subsystems.changed import ChangedRequest
logger = logging.getLogger(__name__)
class InvalidSpecConstraint(Exception):
"""Raised when invalid constraints are given via target specs and arguments like --changed*."""
class TargetRoots(object):
"""Determines the target roots for a given pants run."""
@classmethod
def parse_specs(cls, target_specs, build_root=None):
"""Parse string specs into unique `Spec` objects.
:param iterable target_specs: An iterable of string specs.
:param string build_root: The path to the build root.
:returns: An `OrderedSet` of `Spec` objects.
"""
build_root = build_root or get_buildroot()
spec_parser = CmdLineSpecParser(build_root)
return OrderedSet(spec_parser.parse_spec(spec_str) for spec_str in target_specs)
@classmethod
def create(cls, options=None, args=None, build_root=None, change_calculator=None):
"""
:param Options options: An `Options` instance to use, if available.
:param string args: Raw cli args to use for parsing if an `Options` instance isn't available.
:param string build_root: The build root.
:param ChangeCalculator change_calculator: A `ChangeCalculator` for calculating changes.
"""
if not options:
assert args is not None, 'must pass `args` if not passing `options`'
options, _ = OptionsInitializer(OptionsBootstrapper(args=args)).setup(init_logging=False)
# Determine the literal target roots.
spec_roots = cls.parse_specs(options.target_specs, build_root)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
logger.debug('args are: %s', args)
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
if change_calculator and changed_request.is_actionable():
if spec_roots:
# We've been provided spec roots (e.g. `./pants list ::`) AND a changed request. Error out.
raise InvalidSpecConstraint('cannot provide changed parameters and target specs!')
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
changed_addresses = change_calculator.changed_target_addresses(changed_request)
logger.debug('changed addresses: %s', changed_addresses)
return ChangedTargetRoots(tuple(SingleAddress(a.spec_path, a.target_name)
for a in changed_addresses))
return LiteralTargetRoots(spec_roots)
def __init__(self, spec_roots):
self._spec_roots = spec_roots
def as_specs(self):
"""Returns the current target roots as Specs."""
return self._spec_roots
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.as_specs())
class ChangedTargetRoots(TargetRoots):
"""Target roots that have been altered by `--changed` functionality."""
class LiteralTargetRoots(TargetRoots):
"""User defined target roots."""
| en | 0.654548 | # coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). Raised when invalid constraints are given via target specs and arguments like --changed*. Determines the target roots for a given pants run. Parse string specs into unique `Spec` objects. :param iterable target_specs: An iterable of string specs. :param string build_root: The path to the build root. :returns: An `OrderedSet` of `Spec` objects. :param Options options: An `Options` instance to use, if available. :param string args: Raw cli args to use for parsing if an `Options` instance isn't available. :param string build_root: The build root. :param ChangeCalculator change_calculator: A `ChangeCalculator` for calculating changes. # Determine the literal target roots. # Determine `Changed` arguments directly from options to support pre-`Subsystem` # initialization paths. # We've been provided spec roots (e.g. `./pants list ::`) AND a changed request. Error out. # We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute # alternate target roots. Returns the current target roots as Specs. Target roots that have been altered by `--changed` functionality. User defined target roots. | 2.274674 | 2 |
adafruit_as7341.py | jposada202020/Adafruit_CircuitPython_AS7341 | 0 | 6631125 | # SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_as7341`
================================================================================
CircuitPython library for use with the Adafruit AS7341 breakout
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* `Adafruit AS7341 Breakout
<https://www.adafruit.com/product/4698>`_ (Product ID: 4698)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://circuitpython.org/downloads
* Adafruit's Bus Device library:
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library:
https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_AS7341.git"
from time import sleep, monotonic
from micropython import const
import adafruit_bus_device.i2c_device as i2c_device
from adafruit_register.i2c_struct import UnaryStruct, Struct # , ROUnaryStruct
from adafruit_register.i2c_bit import RWBit
from adafruit_register.i2c_bits import ROBits, RWBits
_AS7341_DEVICE_ID = const(0b001001) # Correct content of WHO_AM_I register
_AS7341_I2CADDR_DEFAULT = const(0x39) # AS7341 default i2c address
_AS7341_CHIP_ID = const(0x09) # AS7341 default device id from WHOAMI
_AS7341_WHOAMI = const(0x92) # Chip ID register
_AS7341_CONFIG = const(0x70) # Enables LED control and sets light sensing mode
_AS7341_GPIO = const(0x73) # Connects photo diode to GPIO or INT pins
_AS7341_LED = const(0x74) # LED Register; Enables and sets current limit
_AS7341_ENABLE = const(
0x80
) # Main enable register. Controls SMUX, Flicker Detection,Spectral and Power
_AS7341_ATIME = const(0x81) # Sets ADC integration step count
_AS7341_SP_LOW_TH_L = const(0x84) # Spectral measurement Low Threshold low byte
_AS7341_SP_LOW_TH_H = const(0x85) # 0 Spectral measurement Low Threshold high byte
_AS7341_SP_HIGH_TH_L = const(0x86) # Spectral measurement High Threshold low byte
_AS7341_SP_HIGH_TH_H = const(0x87) # Spectral measurement High Threshold low byte
_AS7341_STATUS = const(
0x93
) # Interrupt status registers. Indicates the occourance of an interrupt
_AS7341_ASTATUS = const(
0x94
) # Spectral Saturation and Gain status. Reading from here latches the data
_AS7341_CH0_DATA_L = const(0x95) # ADC Channel 0 Data
_AS7341_CH0_DATA_H = const(0x96) # ADC Channel 0 Data
_AS7341_CH1_DATA_L = const(0x97) # ADC Channel 1 Data
_AS7341_CH1_DATA_H = const(0x98) # ADC Channel 1 Data
_AS7341_CH2_DATA_L = const(0x99) # ADC Channel 2 Data
_AS7341_CH2_DATA_H = const(0x9A) # ADC Channel 2 Data
_AS7341_CH3_DATA_L = const(0x9B) # ADC Channel 3 Data
_AS7341_CH3_DATA_H = const(0x9C) # ADC Channel 3 Data
_AS7341_CH4_DATA_L = const(0x9D) # ADC Channel 4 Data
_AS7341_CH4_DATA_H = const(0x9E) # ADC Channel 4 Data
_AS7341_CH5_DATA_L = const(0x9F) # ADC Channel 5 Data
_AS7341_CH5_DATA_H = const(0xA0) # ADC Channel 5 Data
_AS7341_STATUS2 = const(0xA3) # Measurement status flags; saturation, validity
_AS7341_STATUS3 = const(0xA4) # Spectral interrupt source, high or low threshold
_AS7341_CFG0 = const(
0xA9
) # Sets Low power mode, Register bank, and Trigger lengthening
_AS7341_CFG1 = const(0xAA) # Controls ADC Gain
_AS7341_CFG6 = const(0xAF) # Used to configure Smux
_AS7341_CFG9 = const(0xB2) # flicker detect and SMUX command system ints
_AS7341_CFG12 = const(0xB5) # ADC channel for interrupts, persistence and auto-gain
_AS7341_PERS = const(
0xBD
) # number of measurements outside thresholds to trigger an interrupt
_AS7341_GPIO2 = const(
0xBE
) # GPIO Settings and status: polarity, direction, sets output, reads
_AS7341_ASTEP_L = const(0xCA) # Integration step size ow byte
_AS7341_ASTEP_H = const(0xCB) # Integration step size high byte
_AS7341_FD_TIME1 = const(0xD8) # Flicker detection integration time low byte
_AS7341_FD_TIME2 = const(0xDA) # Flicker detection gain and high nibble
_AS7341_FD_STATUS = const(
0xDB
) # Flicker detection status; measurement valid, saturation, flicker
_AS7341_INTENAB = const(0xF9) # Enables individual interrupt types
_AS7341_CONTROL = const(0xFA) # Auto-zero, fifo clear, clear SAI active
_AS7341_FD_CFG0 = const(0xD7) # Enables FIFO for flicker detection
def _low_bank(func):
# pylint:disable=protected-access
def _decorator(self, *args, **kwargs):
self._low_bank_active = True
retval = func(self, *args, **kwargs)
self._low_bank_active = False
return retval
return _decorator
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"""Add CV values to the class"""
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"""Validate that a given value is a member"""
return value in cls.string
# class Flicker(CV):
# """Options for ``flicker_detection_type``"""
# pass # pylint: disable=unnecessary-pass
# Flicker.add_values((("FLICKER_100HZ", 0, 100, None), ("FLICKER_1000HZ", 1, 1000, None)))
class Gain(CV):
"""Options for ``accelerometer_range``"""
pass # pylint: disable=unnecessary-pass
Gain.add_values(
(
("GAIN_0_5X", 0, 0.5, None),
("GAIN_1X", 1, 1, None),
("GAIN_2X", 2, 2, None),
("GAIN_4X", 3, 4, None),
("GAIN_8X", 4, 8, None),
("GAIN_16X", 5, 16, None),
("GAIN_32X", 6, 32, None),
("GAIN_64X", 7, 64, None),
("GAIN_128X", 8, 128, None),
("GAIN_256X", 9, 256, None),
("GAIN_512X", 10, 512, None),
)
)
class SMUX_OUT(CV):
"""Options for ``smux_out``"""
pass # pylint: disable=unnecessary-pass
SMUX_OUT.add_values(
(
("DISABLED", 0, 0, None),
("ADC0", 1, 1, None),
("ADC1", 2, 2, None),
("ADC2", 3, 3, None),
("ADC3", 4, 4, None),
("ADC4", 5, 5, None),
("ADC5", 6, 6, None),
)
)
class SMUX_IN(CV):
"""Options for ``smux_in``"""
pass # pylint: disable=unnecessary-pass
SMUX_IN.add_values(
(
("NC_F3L", 0, 0, None),
("F1L_NC", 1, 1, None),
("NC_NC0", 2, 2, None),
("NC_F8L", 3, 3, None),
("F6L_NC", 4, 4, None),
("F2L_F4L", 5, 5, None),
("NC_F5L", 6, 6, None),
("F7L_NC", 7, 7, None),
("NC_CL", 8, 8, None),
("NC_F5R", 9, 9, None),
("F7R_NC", 10, 10, None),
("NC_NC1", 11, 11, None),
("NC_F2R", 12, 12, None),
("F4R_NC", 13, 13, None),
("F8R_F6R", 14, 14, None),
("NC_F3R", 15, 15, None),
("F1R_EXT_GPIO", 16, 16, None),
("EXT_INT_CR", 17, 17, None),
("NC_DARK", 18, 18, None),
("NIR_F", 19, 19, None),
)
)
class AS7341: # pylint:disable=too-many-instance-attributes, no-member
"""Library for the AS7341 Sensor
:param ~busio.I2C i2c_bus: The I2C bus the device is connected to
:param int address: The I2C device address. Defaults to :const:`0x39`
**Quickstart: Importing and using the device**
Here is an example of using the :class:`AS7341`.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from adafruit_as7341 import AS7341
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = AS7341(i2c)
Now you have access to the different channels
.. code-block:: python
channel_415nm = channel_415nm
channel_445nm = channel_445nm
channel_480nm = channel_480nm
channel_515nm = channel_515nm
channel_555nm = channel_555nm
channel_590nm = channel_590nm
channel_630nm = channel_630nm
channel_680nm = channel_680nm
"""
_device_id = ROBits(6, _AS7341_WHOAMI, 2)
_smux_enable_bit = RWBit(_AS7341_ENABLE, 4)
_led_control_enable_bit = RWBit(_AS7341_CONFIG, 3)
_color_meas_enabled = RWBit(_AS7341_ENABLE, 1)
_power_enabled = RWBit(_AS7341_ENABLE, 0)
_low_bank_active = RWBit(_AS7341_CFG0, 4)
_smux_command = RWBits(2, _AS7341_CFG6, 3)
_fd_status = UnaryStruct(_AS7341_FD_STATUS, "<B")
_channel_0_data = UnaryStruct(_AS7341_CH0_DATA_L, "<H")
_channel_1_data = UnaryStruct(_AS7341_CH1_DATA_L, "<H")
_channel_2_data = UnaryStruct(_AS7341_CH2_DATA_L, "<H")
_channel_3_data = UnaryStruct(_AS7341_CH3_DATA_L, "<H")
_channel_4_data = UnaryStruct(_AS7341_CH4_DATA_L, "<H")
_channel_5_data = UnaryStruct(_AS7341_CH5_DATA_L, "<H")
# "Reading the ASTATUS register (0x60 or 0x94) latches
# all 12 spectral data bytes to that status read." Datasheet Sec. 10.2.7
_all_channels = Struct(_AS7341_ASTATUS, "<BHHHHHH")
_led_current_bits = RWBits(7, _AS7341_LED, 0)
_led_enabled = RWBit(_AS7341_LED, 7)
atime = UnaryStruct(_AS7341_ATIME, "<B")
"""The integration time step count.
Total integration time will be ``(ATIME + 1) * (ASTEP + 1) * 2.78µS``
"""
astep = UnaryStruct(_AS7341_ASTEP_L, "<H")
""" The integration time step size in 2.78 microsecond increments"""
_gain = UnaryStruct(_AS7341_CFG1, "<B")
_data_ready_bit = RWBit(_AS7341_STATUS2, 6)
"""
* @brief
*
* @return true: success false: failure
"""
def __init__(self, i2c_bus, address=_AS7341_I2CADDR_DEFAULT):
self.i2c_device = i2c_device.I2CDevice(i2c_bus, address)
if not self._device_id in [_AS7341_DEVICE_ID]:
raise RuntimeError("Failed to find an AS7341 sensor - check your wiring!")
self.initialize()
self._buffer = bytearray(2)
self._low_channels_configured = False
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
def initialize(self):
"""Configure the sensors with the default settings"""
self._power_enabled = True
self._led_control_enabled = True
self.atime = 100
self.astep = 999
self.gain = Gain.GAIN_128X # pylint:disable=no-member
@property
def all_channels(self):
"""The current readings for all six ADC channels"""
self._configure_f1_f4()
adc_reads_f1_f4 = self._all_channels
reads = adc_reads_f1_f4[1:-2]
self._configure_f5_f8()
adc_reads_f5_f8 = self._all_channels
reads += adc_reads_f5_f8[1:-2]
return reads
@property
def channel_415nm(self):
"""The current reading for the 415nm band"""
self._configure_f1_f4()
return self._channel_0_data
@property
def channel_445nm(self):
"""The current reading for the 445nm band"""
self._configure_f1_f4()
return self._channel_1_data
@property
def channel_480nm(self):
"""The current reading for the 480nm band"""
self._configure_f1_f4()
return self._channel_2_data
@property
def channel_515nm(self):
"""The current reading for the 515nm band"""
self._configure_f1_f4()
return self._channel_3_data
@property
def channel_555nm(self):
"""The current reading for the 555nm band"""
self._configure_f5_f8()
return self._channel_0_data
@property
def channel_590nm(self):
"""The current reading for the 590nm band"""
self._configure_f5_f8()
return self._channel_1_data
@property
def channel_630nm(self):
"""The current reading for the 630nm band"""
self._configure_f5_f8()
return self._channel_2_data
@property
def channel_680nm(self):
"""The current reading for the 680nm band"""
self._configure_f5_f8()
return self._channel_3_data
# TODO: Add clear and NIR accessors
def _wait_for_data(self, timeout=1.0):
"""Wait for sensor data to be ready"""
start = monotonic()
while not self._data_ready_bit:
if monotonic() - start > timeout:
raise RuntimeError("Timeout occurred waiting for sensor data")
sleep(0.001)
def _write_register(self, addr, data):
self._buffer[0] = addr
self._buffer[1] = data
with self.i2c_device as i2c:
i2c.write(self._buffer)
def _configure_f1_f4(self):
"""Configure the sensor to read from elements F1-F4, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._low_channels_configured:
return
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f1f4_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._low_channels_configured = True
self._wait_for_data()
def _configure_f5_f8(self):
"""Configure the sensor to read from elements F5-F8, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._high_channels_configured:
return
self._low_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f5f8_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._high_channels_configured = True
self._wait_for_data()
@property
def flicker_detected(self):
"""The flicker frequency detected in Hertz"""
if not self._flicker_detection_1k_configured:
AttributeError(
"Flicker detection must be enabled to access `flicker_detected`"
)
flicker_status = self._fd_status
if flicker_status == 45:
return 1000
if flicker_status == 46:
return 1200
return None
# if we haven't returned yet either there was an error or an unknown frequency was detected
@property
def flicker_detection_enabled(self):
"""The flicker detection status of the sensor. True if the sensor is configured\
to detect flickers. Currently only 1000Hz and 1200Hz flicker detection is supported
"""
return self._flicker_detection_1k_configured
@flicker_detection_enabled.setter
def flicker_detection_enabled(self, flicker_enable):
if flicker_enable:
self._configure_1k_flicker_detection()
else:
self._configure_f1_f4() # sane default
def _f1f4_clear_nir(self):
"""Configure SMUX for sensors F1-F4, Clear and NIR"""
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.ADC1, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.ADC3, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
def _f5f8_clear_nir(self):
# SMUX Config for F5,F6,F7,F8,NIR,Clear
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.ADC1, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.ADC3, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
# TODO: Convert as much of this as possible to properties or named attributes
def _configure_1k_flicker_detection(self):
self._low_channels_configured = False
self._high_channels_configured = False
# RAM_BANK 0 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x00)
# The coefficient calculated are stored into the RAM bank 0 and RAM bank 1,
# they are used instead of 100Hz and 120Hz coefficients which are the default
# flicker detection coefficients
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x04, 0x9E)
self._write_register(0x05, 0x36)
self._write_register(0x0E, 0x2E)
self._write_register(0x0F, 0x1B)
self._write_register(0x18, 0x7D)
self._write_register(0x19, 0x36)
self._write_register(0x22, 0x09)
self._write_register(0x23, 0x1B)
self._write_register(0x2C, 0x5B)
self._write_register(0x2D, 0x36)
self._write_register(0x36, 0xE5)
self._write_register(0x37, 0x1A)
self._write_register(0x40, 0x3A)
self._write_register(0x41, 0x36)
self._write_register(0x4A, 0xC1)
self._write_register(0x4B, 0x1A)
self._write_register(0x54, 0x18)
self._write_register(0x55, 0x36)
self._write_register(0x5E, 0x9C)
self._write_register(0x5F, 0x1A)
self._write_register(0x68, 0xF6)
self._write_register(0x69, 0x35)
self._write_register(0x72, 0x78)
self._write_register(0x73, 0x1A)
self._write_register(0x7C, 0x4D)
self._write_register(0x7D, 0x35)
# RAM_BANK 1 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x01)
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x06, 0x54)
self._write_register(0x07, 0x1A)
self._write_register(0x10, 0xB3)
self._write_register(0x11, 0x35)
self._write_register(0x1A, 0x2F)
self._write_register(0x1B, 0x1A)
self._write_register(_AS7341_CFG0, 0x01)
# select RAM coefficients for flicker detection by setting
# fd_disable_constant_init to „1“ (FD_CFG0 register) in FD_CFG0 register -
# 0xD7
# fd_disable_constant_init=1
# fd_samples=4
self._write_register(_AS7341_FD_CFG0, 0x60)
# in FD_CFG1 register - 0xd8 fd_time(7:0) = 0x40
self._write_register(_AS7341_FD_TIME1, 0x40)
# in FD_CFG2 register - 0xd9 fd_dcr_filter_size=1 fd_nr_data_sets(2:0)=5
self._write_register(0xD9, 0x25)
# in FD_CFG3 register - 0xda fd_gain=9
self._write_register(_AS7341_FD_TIME2, 0x48)
# in CFG9 register - 0xb2 sien_fd=1
self._write_register(_AS7341_CFG9, 0x40)
# in ENABLE - 0x80 fden=1 and pon=1 are enabled
self._write_register(_AS7341_ENABLE, 0x41)
self._flicker_detection_1k_configured = True
def _smux_template(self):
# SMUX_OUT.DISABLED
# SMUX_OUT.ADC0
# SMUX_OUT.ADC1
# SMUX_OUT.ADC2
# SMUX_OUT.ADC3
# SMUX_OUT.ADC4
# SMUX_OUT.ADC5
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
def _set_smux(self, smux_addr, smux_out1, smux_out2):
"""Connect a pair of sensors to an ADC channel"""
low_nibble = smux_out1
high_nibble = smux_out2 << 4
smux_byte = high_nibble | low_nibble
self._write_register(smux_addr, smux_byte)
@property
def gain(self):
"""The ADC gain multiplier. Must be a valid :meth:`adafruit_as7341.Gain`"""
return self._gain
@gain.setter
def gain(self, gain_value):
if not Gain.is_valid(gain_value):
raise AttributeError("`gain` must be a valid `adafruit_as7341.Gain`")
self._gain = gain_value
@property
def _smux_enabled(self):
return self._smux_enable_bit
@_smux_enabled.setter
def _smux_enabled(self, enable_smux):
self._low_bank_active = False
self._smux_enable_bit = enable_smux
while self._smux_enable_bit is True:
sleep(0.001)
@property
@_low_bank
def led_current(self):
"""The maximum allowed current through the attached LED in milliamps.
Odd numbered values will be rounded down to the next lowest even number due
to the internal configuration restrictions"""
current_val = self._led_current_bits
return (current_val * 2) + 4
@led_current.setter
@_low_bank
def led_current(self, led_curent):
new_current = int((min(258, max(4, led_curent)) - 4) / 2)
self._led_current_bits = new_current
@property
@_low_bank
def led(self):
"""The attached LED. Set to True to turn on, False to turn off"""
return self._led_enabled
@led.setter
@_low_bank
def led(self, led_on):
self._led_enabled = led_on
@property
@_low_bank
def _led_control_enabled(self):
return self._led_control_enable_bit
@_led_control_enabled.setter
@_low_bank
def _led_control_enabled(self, enabled):
self._led_control_enable_bit = enabled
| # SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_as7341`
================================================================================
CircuitPython library for use with the Adafruit AS7341 breakout
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* `Adafruit AS7341 Breakout
<https://www.adafruit.com/product/4698>`_ (Product ID: 4698)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://circuitpython.org/downloads
* Adafruit's Bus Device library:
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library:
https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_AS7341.git"
from time import sleep, monotonic
from micropython import const
import adafruit_bus_device.i2c_device as i2c_device
from adafruit_register.i2c_struct import UnaryStruct, Struct # , ROUnaryStruct
from adafruit_register.i2c_bit import RWBit
from adafruit_register.i2c_bits import ROBits, RWBits
_AS7341_DEVICE_ID = const(0b001001) # Correct content of WHO_AM_I register
_AS7341_I2CADDR_DEFAULT = const(0x39) # AS7341 default i2c address
_AS7341_CHIP_ID = const(0x09) # AS7341 default device id from WHOAMI
_AS7341_WHOAMI = const(0x92) # Chip ID register
_AS7341_CONFIG = const(0x70) # Enables LED control and sets light sensing mode
_AS7341_GPIO = const(0x73) # Connects photo diode to GPIO or INT pins
_AS7341_LED = const(0x74) # LED Register; Enables and sets current limit
_AS7341_ENABLE = const(
0x80
) # Main enable register. Controls SMUX, Flicker Detection,Spectral and Power
_AS7341_ATIME = const(0x81) # Sets ADC integration step count
_AS7341_SP_LOW_TH_L = const(0x84) # Spectral measurement Low Threshold low byte
_AS7341_SP_LOW_TH_H = const(0x85) # 0 Spectral measurement Low Threshold high byte
_AS7341_SP_HIGH_TH_L = const(0x86) # Spectral measurement High Threshold low byte
_AS7341_SP_HIGH_TH_H = const(0x87) # Spectral measurement High Threshold low byte
_AS7341_STATUS = const(
0x93
) # Interrupt status registers. Indicates the occourance of an interrupt
_AS7341_ASTATUS = const(
0x94
) # Spectral Saturation and Gain status. Reading from here latches the data
_AS7341_CH0_DATA_L = const(0x95) # ADC Channel 0 Data
_AS7341_CH0_DATA_H = const(0x96) # ADC Channel 0 Data
_AS7341_CH1_DATA_L = const(0x97) # ADC Channel 1 Data
_AS7341_CH1_DATA_H = const(0x98) # ADC Channel 1 Data
_AS7341_CH2_DATA_L = const(0x99) # ADC Channel 2 Data
_AS7341_CH2_DATA_H = const(0x9A) # ADC Channel 2 Data
_AS7341_CH3_DATA_L = const(0x9B) # ADC Channel 3 Data
_AS7341_CH3_DATA_H = const(0x9C) # ADC Channel 3 Data
_AS7341_CH4_DATA_L = const(0x9D) # ADC Channel 4 Data
_AS7341_CH4_DATA_H = const(0x9E) # ADC Channel 4 Data
_AS7341_CH5_DATA_L = const(0x9F) # ADC Channel 5 Data
_AS7341_CH5_DATA_H = const(0xA0) # ADC Channel 5 Data
_AS7341_STATUS2 = const(0xA3) # Measurement status flags; saturation, validity
_AS7341_STATUS3 = const(0xA4) # Spectral interrupt source, high or low threshold
_AS7341_CFG0 = const(
0xA9
) # Sets Low power mode, Register bank, and Trigger lengthening
_AS7341_CFG1 = const(0xAA) # Controls ADC Gain
_AS7341_CFG6 = const(0xAF) # Used to configure Smux
_AS7341_CFG9 = const(0xB2) # flicker detect and SMUX command system ints
_AS7341_CFG12 = const(0xB5) # ADC channel for interrupts, persistence and auto-gain
_AS7341_PERS = const(
0xBD
) # number of measurements outside thresholds to trigger an interrupt
_AS7341_GPIO2 = const(
0xBE
) # GPIO Settings and status: polarity, direction, sets output, reads
_AS7341_ASTEP_L = const(0xCA) # Integration step size ow byte
_AS7341_ASTEP_H = const(0xCB) # Integration step size high byte
_AS7341_FD_TIME1 = const(0xD8) # Flicker detection integration time low byte
_AS7341_FD_TIME2 = const(0xDA) # Flicker detection gain and high nibble
_AS7341_FD_STATUS = const(
0xDB
) # Flicker detection status; measurement valid, saturation, flicker
_AS7341_INTENAB = const(0xF9) # Enables individual interrupt types
_AS7341_CONTROL = const(0xFA) # Auto-zero, fifo clear, clear SAI active
_AS7341_FD_CFG0 = const(0xD7) # Enables FIFO for flicker detection
def _low_bank(func):
# pylint:disable=protected-access
def _decorator(self, *args, **kwargs):
self._low_bank_active = True
retval = func(self, *args, **kwargs)
self._low_bank_active = False
return retval
return _decorator
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"""Add CV values to the class"""
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"""Validate that a given value is a member"""
return value in cls.string
# class Flicker(CV):
# """Options for ``flicker_detection_type``"""
# pass # pylint: disable=unnecessary-pass
# Flicker.add_values((("FLICKER_100HZ", 0, 100, None), ("FLICKER_1000HZ", 1, 1000, None)))
class Gain(CV):
"""Options for ``accelerometer_range``"""
pass # pylint: disable=unnecessary-pass
Gain.add_values(
(
("GAIN_0_5X", 0, 0.5, None),
("GAIN_1X", 1, 1, None),
("GAIN_2X", 2, 2, None),
("GAIN_4X", 3, 4, None),
("GAIN_8X", 4, 8, None),
("GAIN_16X", 5, 16, None),
("GAIN_32X", 6, 32, None),
("GAIN_64X", 7, 64, None),
("GAIN_128X", 8, 128, None),
("GAIN_256X", 9, 256, None),
("GAIN_512X", 10, 512, None),
)
)
class SMUX_OUT(CV):
"""Options for ``smux_out``"""
pass # pylint: disable=unnecessary-pass
SMUX_OUT.add_values(
(
("DISABLED", 0, 0, None),
("ADC0", 1, 1, None),
("ADC1", 2, 2, None),
("ADC2", 3, 3, None),
("ADC3", 4, 4, None),
("ADC4", 5, 5, None),
("ADC5", 6, 6, None),
)
)
class SMUX_IN(CV):
"""Options for ``smux_in``"""
pass # pylint: disable=unnecessary-pass
SMUX_IN.add_values(
(
("NC_F3L", 0, 0, None),
("F1L_NC", 1, 1, None),
("NC_NC0", 2, 2, None),
("NC_F8L", 3, 3, None),
("F6L_NC", 4, 4, None),
("F2L_F4L", 5, 5, None),
("NC_F5L", 6, 6, None),
("F7L_NC", 7, 7, None),
("NC_CL", 8, 8, None),
("NC_F5R", 9, 9, None),
("F7R_NC", 10, 10, None),
("NC_NC1", 11, 11, None),
("NC_F2R", 12, 12, None),
("F4R_NC", 13, 13, None),
("F8R_F6R", 14, 14, None),
("NC_F3R", 15, 15, None),
("F1R_EXT_GPIO", 16, 16, None),
("EXT_INT_CR", 17, 17, None),
("NC_DARK", 18, 18, None),
("NIR_F", 19, 19, None),
)
)
class AS7341: # pylint:disable=too-many-instance-attributes, no-member
"""Library for the AS7341 Sensor
:param ~busio.I2C i2c_bus: The I2C bus the device is connected to
:param int address: The I2C device address. Defaults to :const:`0x39`
**Quickstart: Importing and using the device**
Here is an example of using the :class:`AS7341`.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from adafruit_as7341 import AS7341
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = AS7341(i2c)
Now you have access to the different channels
.. code-block:: python
channel_415nm = channel_415nm
channel_445nm = channel_445nm
channel_480nm = channel_480nm
channel_515nm = channel_515nm
channel_555nm = channel_555nm
channel_590nm = channel_590nm
channel_630nm = channel_630nm
channel_680nm = channel_680nm
"""
_device_id = ROBits(6, _AS7341_WHOAMI, 2)
_smux_enable_bit = RWBit(_AS7341_ENABLE, 4)
_led_control_enable_bit = RWBit(_AS7341_CONFIG, 3)
_color_meas_enabled = RWBit(_AS7341_ENABLE, 1)
_power_enabled = RWBit(_AS7341_ENABLE, 0)
_low_bank_active = RWBit(_AS7341_CFG0, 4)
_smux_command = RWBits(2, _AS7341_CFG6, 3)
_fd_status = UnaryStruct(_AS7341_FD_STATUS, "<B")
_channel_0_data = UnaryStruct(_AS7341_CH0_DATA_L, "<H")
_channel_1_data = UnaryStruct(_AS7341_CH1_DATA_L, "<H")
_channel_2_data = UnaryStruct(_AS7341_CH2_DATA_L, "<H")
_channel_3_data = UnaryStruct(_AS7341_CH3_DATA_L, "<H")
_channel_4_data = UnaryStruct(_AS7341_CH4_DATA_L, "<H")
_channel_5_data = UnaryStruct(_AS7341_CH5_DATA_L, "<H")
# "Reading the ASTATUS register (0x60 or 0x94) latches
# all 12 spectral data bytes to that status read." Datasheet Sec. 10.2.7
_all_channels = Struct(_AS7341_ASTATUS, "<BHHHHHH")
_led_current_bits = RWBits(7, _AS7341_LED, 0)
_led_enabled = RWBit(_AS7341_LED, 7)
atime = UnaryStruct(_AS7341_ATIME, "<B")
"""The integration time step count.
Total integration time will be ``(ATIME + 1) * (ASTEP + 1) * 2.78µS``
"""
astep = UnaryStruct(_AS7341_ASTEP_L, "<H")
""" The integration time step size in 2.78 microsecond increments"""
_gain = UnaryStruct(_AS7341_CFG1, "<B")
_data_ready_bit = RWBit(_AS7341_STATUS2, 6)
"""
* @brief
*
* @return true: success false: failure
"""
def __init__(self, i2c_bus, address=_AS7341_I2CADDR_DEFAULT):
self.i2c_device = i2c_device.I2CDevice(i2c_bus, address)
if not self._device_id in [_AS7341_DEVICE_ID]:
raise RuntimeError("Failed to find an AS7341 sensor - check your wiring!")
self.initialize()
self._buffer = bytearray(2)
self._low_channels_configured = False
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
def initialize(self):
"""Configure the sensors with the default settings"""
self._power_enabled = True
self._led_control_enabled = True
self.atime = 100
self.astep = 999
self.gain = Gain.GAIN_128X # pylint:disable=no-member
@property
def all_channels(self):
"""The current readings for all six ADC channels"""
self._configure_f1_f4()
adc_reads_f1_f4 = self._all_channels
reads = adc_reads_f1_f4[1:-2]
self._configure_f5_f8()
adc_reads_f5_f8 = self._all_channels
reads += adc_reads_f5_f8[1:-2]
return reads
@property
def channel_415nm(self):
"""The current reading for the 415nm band"""
self._configure_f1_f4()
return self._channel_0_data
@property
def channel_445nm(self):
"""The current reading for the 445nm band"""
self._configure_f1_f4()
return self._channel_1_data
@property
def channel_480nm(self):
"""The current reading for the 480nm band"""
self._configure_f1_f4()
return self._channel_2_data
@property
def channel_515nm(self):
"""The current reading for the 515nm band"""
self._configure_f1_f4()
return self._channel_3_data
@property
def channel_555nm(self):
"""The current reading for the 555nm band"""
self._configure_f5_f8()
return self._channel_0_data
@property
def channel_590nm(self):
"""The current reading for the 590nm band"""
self._configure_f5_f8()
return self._channel_1_data
@property
def channel_630nm(self):
"""The current reading for the 630nm band"""
self._configure_f5_f8()
return self._channel_2_data
@property
def channel_680nm(self):
"""The current reading for the 680nm band"""
self._configure_f5_f8()
return self._channel_3_data
# TODO: Add clear and NIR accessors
def _wait_for_data(self, timeout=1.0):
"""Wait for sensor data to be ready"""
start = monotonic()
while not self._data_ready_bit:
if monotonic() - start > timeout:
raise RuntimeError("Timeout occurred waiting for sensor data")
sleep(0.001)
def _write_register(self, addr, data):
self._buffer[0] = addr
self._buffer[1] = data
with self.i2c_device as i2c:
i2c.write(self._buffer)
def _configure_f1_f4(self):
"""Configure the sensor to read from elements F1-F4, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._low_channels_configured:
return
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f1f4_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._low_channels_configured = True
self._wait_for_data()
def _configure_f5_f8(self):
"""Configure the sensor to read from elements F5-F8, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._high_channels_configured:
return
self._low_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f5f8_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._high_channels_configured = True
self._wait_for_data()
@property
def flicker_detected(self):
"""The flicker frequency detected in Hertz"""
if not self._flicker_detection_1k_configured:
AttributeError(
"Flicker detection must be enabled to access `flicker_detected`"
)
flicker_status = self._fd_status
if flicker_status == 45:
return 1000
if flicker_status == 46:
return 1200
return None
# if we haven't returned yet either there was an error or an unknown frequency was detected
@property
def flicker_detection_enabled(self):
"""The flicker detection status of the sensor. True if the sensor is configured\
to detect flickers. Currently only 1000Hz and 1200Hz flicker detection is supported
"""
return self._flicker_detection_1k_configured
@flicker_detection_enabled.setter
def flicker_detection_enabled(self, flicker_enable):
if flicker_enable:
self._configure_1k_flicker_detection()
else:
self._configure_f1_f4() # sane default
def _f1f4_clear_nir(self):
"""Configure SMUX for sensors F1-F4, Clear and NIR"""
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.ADC1, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.ADC3, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
def _f5f8_clear_nir(self):
# SMUX Config for F5,F6,F7,F8,NIR,Clear
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.ADC1, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.ADC3, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
# TODO: Convert as much of this as possible to properties or named attributes
def _configure_1k_flicker_detection(self):
self._low_channels_configured = False
self._high_channels_configured = False
# RAM_BANK 0 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x00)
# The coefficient calculated are stored into the RAM bank 0 and RAM bank 1,
# they are used instead of 100Hz and 120Hz coefficients which are the default
# flicker detection coefficients
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x04, 0x9E)
self._write_register(0x05, 0x36)
self._write_register(0x0E, 0x2E)
self._write_register(0x0F, 0x1B)
self._write_register(0x18, 0x7D)
self._write_register(0x19, 0x36)
self._write_register(0x22, 0x09)
self._write_register(0x23, 0x1B)
self._write_register(0x2C, 0x5B)
self._write_register(0x2D, 0x36)
self._write_register(0x36, 0xE5)
self._write_register(0x37, 0x1A)
self._write_register(0x40, 0x3A)
self._write_register(0x41, 0x36)
self._write_register(0x4A, 0xC1)
self._write_register(0x4B, 0x1A)
self._write_register(0x54, 0x18)
self._write_register(0x55, 0x36)
self._write_register(0x5E, 0x9C)
self._write_register(0x5F, 0x1A)
self._write_register(0x68, 0xF6)
self._write_register(0x69, 0x35)
self._write_register(0x72, 0x78)
self._write_register(0x73, 0x1A)
self._write_register(0x7C, 0x4D)
self._write_register(0x7D, 0x35)
# RAM_BANK 1 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x01)
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x06, 0x54)
self._write_register(0x07, 0x1A)
self._write_register(0x10, 0xB3)
self._write_register(0x11, 0x35)
self._write_register(0x1A, 0x2F)
self._write_register(0x1B, 0x1A)
self._write_register(_AS7341_CFG0, 0x01)
# select RAM coefficients for flicker detection by setting
# fd_disable_constant_init to „1“ (FD_CFG0 register) in FD_CFG0 register -
# 0xD7
# fd_disable_constant_init=1
# fd_samples=4
self._write_register(_AS7341_FD_CFG0, 0x60)
# in FD_CFG1 register - 0xd8 fd_time(7:0) = 0x40
self._write_register(_AS7341_FD_TIME1, 0x40)
# in FD_CFG2 register - 0xd9 fd_dcr_filter_size=1 fd_nr_data_sets(2:0)=5
self._write_register(0xD9, 0x25)
# in FD_CFG3 register - 0xda fd_gain=9
self._write_register(_AS7341_FD_TIME2, 0x48)
# in CFG9 register - 0xb2 sien_fd=1
self._write_register(_AS7341_CFG9, 0x40)
# in ENABLE - 0x80 fden=1 and pon=1 are enabled
self._write_register(_AS7341_ENABLE, 0x41)
self._flicker_detection_1k_configured = True
def _smux_template(self):
# SMUX_OUT.DISABLED
# SMUX_OUT.ADC0
# SMUX_OUT.ADC1
# SMUX_OUT.ADC2
# SMUX_OUT.ADC3
# SMUX_OUT.ADC4
# SMUX_OUT.ADC5
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
def _set_smux(self, smux_addr, smux_out1, smux_out2):
"""Connect a pair of sensors to an ADC channel"""
low_nibble = smux_out1
high_nibble = smux_out2 << 4
smux_byte = high_nibble | low_nibble
self._write_register(smux_addr, smux_byte)
@property
def gain(self):
"""The ADC gain multiplier. Must be a valid :meth:`adafruit_as7341.Gain`"""
return self._gain
@gain.setter
def gain(self, gain_value):
if not Gain.is_valid(gain_value):
raise AttributeError("`gain` must be a valid `adafruit_as7341.Gain`")
self._gain = gain_value
@property
def _smux_enabled(self):
return self._smux_enable_bit
@_smux_enabled.setter
def _smux_enabled(self, enable_smux):
self._low_bank_active = False
self._smux_enable_bit = enable_smux
while self._smux_enable_bit is True:
sleep(0.001)
@property
@_low_bank
def led_current(self):
"""The maximum allowed current through the attached LED in milliamps.
Odd numbered values will be rounded down to the next lowest even number due
to the internal configuration restrictions"""
current_val = self._led_current_bits
return (current_val * 2) + 4
@led_current.setter
@_low_bank
def led_current(self, led_curent):
new_current = int((min(258, max(4, led_curent)) - 4) / 2)
self._led_current_bits = new_current
@property
@_low_bank
def led(self):
"""The attached LED. Set to True to turn on, False to turn off"""
return self._led_enabled
@led.setter
@_low_bank
def led(self, led_on):
self._led_enabled = led_on
@property
@_low_bank
def _led_control_enabled(self):
return self._led_control_enable_bit
@_led_control_enabled.setter
@_low_bank
def _led_control_enabled(self, enabled):
self._led_control_enable_bit = enabled
| en | 0.705871 | # SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries # SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT `adafruit_as7341` ================================================================================ CircuitPython library for use with the Adafruit AS7341 breakout * Author(s): <NAME> Implementation Notes -------------------- **Hardware:** * `Adafruit AS7341 Breakout <https://www.adafruit.com/product/4698>`_ (Product ID: 4698) **Software and Dependencies:** * Adafruit CircuitPython firmware for the supported boards: https://circuitpython.org/downloads * Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice * Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register # , ROUnaryStruct # Correct content of WHO_AM_I register # AS7341 default i2c address # AS7341 default device id from WHOAMI # Chip ID register # Enables LED control and sets light sensing mode # Connects photo diode to GPIO or INT pins # LED Register; Enables and sets current limit # Main enable register. Controls SMUX, Flicker Detection,Spectral and Power # Sets ADC integration step count # Spectral measurement Low Threshold low byte # 0 Spectral measurement Low Threshold high byte # Spectral measurement High Threshold low byte # Spectral measurement High Threshold low byte # Interrupt status registers. Indicates the occourance of an interrupt # Spectral Saturation and Gain status. Reading from here latches the data # ADC Channel 0 Data # ADC Channel 0 Data # ADC Channel 1 Data # ADC Channel 1 Data # ADC Channel 2 Data # ADC Channel 2 Data # ADC Channel 3 Data # ADC Channel 3 Data # ADC Channel 4 Data # ADC Channel 4 Data # ADC Channel 5 Data # ADC Channel 5 Data # Measurement status flags; saturation, validity # Spectral interrupt source, high or low threshold # Sets Low power mode, Register bank, and Trigger lengthening # Controls ADC Gain # Used to configure Smux # flicker detect and SMUX command system ints # ADC channel for interrupts, persistence and auto-gain # number of measurements outside thresholds to trigger an interrupt # GPIO Settings and status: polarity, direction, sets output, reads # Integration step size ow byte # Integration step size high byte # Flicker detection integration time low byte # Flicker detection gain and high nibble # Flicker detection status; measurement valid, saturation, flicker # Enables individual interrupt types # Auto-zero, fifo clear, clear SAI active # Enables FIFO for flicker detection # pylint:disable=protected-access struct helper Add CV values to the class Validate that a given value is a member # class Flicker(CV): # """Options for ``flicker_detection_type``""" # pass # pylint: disable=unnecessary-pass # Flicker.add_values((("FLICKER_100HZ", 0, 100, None), ("FLICKER_1000HZ", 1, 1000, None))) Options for ``accelerometer_range`` # pylint: disable=unnecessary-pass Options for ``smux_out`` # pylint: disable=unnecessary-pass Options for ``smux_in`` # pylint: disable=unnecessary-pass # pylint:disable=too-many-instance-attributes, no-member Library for the AS7341 Sensor :param ~busio.I2C i2c_bus: The I2C bus the device is connected to :param int address: The I2C device address. Defaults to :const:`0x39` **Quickstart: Importing and using the device** Here is an example of using the :class:`AS7341`. First you will need to import the libraries to use the sensor .. code-block:: python import board from adafruit_as7341 import AS7341 Once this is done you can define your `board.I2C` object and define your sensor object .. code-block:: python i2c = board.I2C() # uses board.SCL and board.SDA sensor = AS7341(i2c) Now you have access to the different channels .. code-block:: python channel_415nm = channel_415nm channel_445nm = channel_445nm channel_480nm = channel_480nm channel_515nm = channel_515nm channel_555nm = channel_555nm channel_590nm = channel_590nm channel_630nm = channel_630nm channel_680nm = channel_680nm # "Reading the ASTATUS register (0x60 or 0x94) latches # all 12 spectral data bytes to that status read." Datasheet Sec. 10.2.7 The integration time step count. Total integration time will be ``(ATIME + 1) * (ASTEP + 1) * 2.78µS`` The integration time step size in 2.78 microsecond increments * @brief * * @return true: success false: failure Configure the sensors with the default settings # pylint:disable=no-member The current readings for all six ADC channels The current reading for the 415nm band The current reading for the 445nm band The current reading for the 480nm band The current reading for the 515nm band The current reading for the 555nm band The current reading for the 590nm band The current reading for the 630nm band The current reading for the 680nm band # TODO: Add clear and NIR accessors Wait for sensor data to be ready Configure the sensor to read from elements F1-F4, Clear, and NIR # disable SP_EN bit while making config changes # ENUM-ify # Write new configuration to all the 20 registers # Start SMUX command # Enable SP_EN bit Configure the sensor to read from elements F5-F8, Clear, and NIR # disable SP_EN bit while making config changes # ENUM-ify # Write new configuration to all the 20 registers # Start SMUX command # Enable SP_EN bit The flicker frequency detected in Hertz # if we haven't returned yet either there was an error or an unknown frequency was detected The flicker detection status of the sensor. True if the sensor is configured\ to detect flickers. Currently only 1000Hz and 1200Hz flicker detection is supported # sane default Configure SMUX for sensors F1-F4, Clear and NIR # SMUX Config for F5,F6,F7,F8,NIR,Clear # TODO: Convert as much of this as possible to properties or named attributes # RAM_BANK 0 select which RAM bank to access in register addresses 0x00-0x7f # The coefficient calculated are stored into the RAM bank 0 and RAM bank 1, # they are used instead of 100Hz and 120Hz coefficients which are the default # flicker detection coefficients # write new coefficients to detect the 1000Hz and 1200Hz - part 1 # RAM_BANK 1 select which RAM bank to access in register addresses 0x00-0x7f # write new coefficients to detect the 1000Hz and 1200Hz - part 1 # select RAM coefficients for flicker detection by setting # fd_disable_constant_init to „1“ (FD_CFG0 register) in FD_CFG0 register - # 0xD7 # fd_disable_constant_init=1 # fd_samples=4 # in FD_CFG1 register - 0xd8 fd_time(7:0) = 0x40 # in FD_CFG2 register - 0xd9 fd_dcr_filter_size=1 fd_nr_data_sets(2:0)=5 # in FD_CFG3 register - 0xda fd_gain=9 # in CFG9 register - 0xb2 sien_fd=1 # in ENABLE - 0x80 fden=1 and pon=1 are enabled # SMUX_OUT.DISABLED # SMUX_OUT.ADC0 # SMUX_OUT.ADC1 # SMUX_OUT.ADC2 # SMUX_OUT.ADC3 # SMUX_OUT.ADC4 # SMUX_OUT.ADC5 Connect a pair of sensors to an ADC channel The ADC gain multiplier. Must be a valid :meth:`adafruit_as7341.Gain` The maximum allowed current through the attached LED in milliamps. Odd numbered values will be rounded down to the next lowest even number due to the internal configuration restrictions The attached LED. Set to True to turn on, False to turn off | 1.807199 | 2 |
test/selenium/src/lib/page/modal/create_new_object.py | Smotko/ggrc-core | 0 | 6631126 | <reponame>Smotko/ggrc-core<filename>test/selenium/src/lib/page/modal/create_new_object.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
"""Models for LHN modals when creating new objects"""
from lib.page.modal import base
class Programs(base.ProgramsModal, base.CreateNewObjectModal):
"""Class representing a program modal"""
class Controls(base.ControlsModal, base.CreateNewObjectModal):
"""Class representing a control modal"""
class OrgGroups(base.OrgGroupsModal, base.CreateNewObjectModal):
"""Class representing an org group modal"""
class Risks(base.RisksModal, base.CreateNewObjectModal):
"""Class representing a risk modal"""
class Requests(base.RequestsModal, base.CreateNewObjectModal):
"""Class representing an request modal"""
class Issues(base.IssuesModal, base.CreateNewObjectModal):
"""Class representing an issue modal"""
class Processes(base.ProcessesModal, base.CreateNewObjectModal):
"""Class representing a process modal"""
class DataAssets(base.DataAssetsModal, base.CreateNewObjectModal):
"""Class representing a Data Assets modal"""
class Systems(base.SystemsModal, base.CreateNewObjectModal):
"""Class representing a system modal"""
class Products(base.ProductsModal, base.CreateNewObjectModal):
"""Class representing a product modal"""
class Projects(base.ProjectsModal, base.CreateNewObjectModal):
"""Class representing a process modal"""
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
"""Models for LHN modals when creating new objects"""
from lib.page.modal import base
class Programs(base.ProgramsModal, base.CreateNewObjectModal):
"""Class representing a program modal"""
class Controls(base.ControlsModal, base.CreateNewObjectModal):
"""Class representing a control modal"""
class OrgGroups(base.OrgGroupsModal, base.CreateNewObjectModal):
"""Class representing an org group modal"""
class Risks(base.RisksModal, base.CreateNewObjectModal):
"""Class representing a risk modal"""
class Requests(base.RequestsModal, base.CreateNewObjectModal):
"""Class representing an request modal"""
class Issues(base.IssuesModal, base.CreateNewObjectModal):
"""Class representing an issue modal"""
class Processes(base.ProcessesModal, base.CreateNewObjectModal):
"""Class representing a process modal"""
class DataAssets(base.DataAssetsModal, base.CreateNewObjectModal):
"""Class representing a Data Assets modal"""
class Systems(base.SystemsModal, base.CreateNewObjectModal):
"""Class representing a system modal"""
class Products(base.ProductsModal, base.CreateNewObjectModal):
"""Class representing a product modal"""
class Projects(base.ProjectsModal, base.CreateNewObjectModal):
"""Class representing a process modal""" | en | 0.833313 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: <EMAIL> # Maintained By: <EMAIL> Models for LHN modals when creating new objects Class representing a program modal Class representing a control modal Class representing an org group modal Class representing a risk modal Class representing an request modal Class representing an issue modal Class representing a process modal Class representing a Data Assets modal Class representing a system modal Class representing a product modal Class representing a process modal | 2.166251 | 2 |
tests/swagger20_validator/type_validator_test.py | Timothyyung/bravado-opt-usfca | 0 | 6631127 | <reponame>Timothyyung/bravado-opt-usfca
# -*- coding: utf-8 -*-
from mock import patch
from bravado_core.swagger20_validator import type_validator
@patch('jsonschema._validators.type_draft4')
def test_skip_when_validating_a_parameter_schema_and_parameter_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
param_schema = {'name': 'foo', 'in': 'query', 'type': 'string'}
list(type_validator(
minimal_swagger_spec,
validator=None,
types=param_schema['type'],
instance=None, # parameter value
schema=param_schema))
assert m_draft4_type_validator.call_count == 0
@patch('jsonschema._validators.type_draft4')
def test_validate_when_parameter_schema_and_parameter_value_is_not_None(
m_draft4_type_validator, minimal_swagger_spec):
param_schema = {'name': 'foo', 'in': 'query', 'type': 'string'}
args = (None, param_schema['type'], 'foo',
param_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args)
@patch('jsonschema._validators.type_draft4')
def test_validate_when_not_a_parameter_schema(m_draft4_type_validator,
minimal_swagger_spec):
string_schema = {'name': 'foo', 'type': 'string'}
args = (None, string_schema['type'], 'foo',
string_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args)
@patch('jsonschema._validators.type_draft4')
def test_skip_when_nullable_property_schema_and_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
prop_schema = {'x-nullable': True, 'type': 'string'}
list(type_validator(
minimal_swagger_spec,
validator=None,
types=prop_schema['type'],
instance=None, # property value
schema=prop_schema))
assert m_draft4_type_validator.call_count == 0
@patch('jsonschema._validators.type_draft4')
def test_validate_when_not_nullable_property_schema_and_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
prop_schema = {'x-nullable': False, 'type': 'string'}
args = (None, prop_schema['type'], None, prop_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args)
| # -*- coding: utf-8 -*-
from mock import patch
from bravado_core.swagger20_validator import type_validator
@patch('jsonschema._validators.type_draft4')
def test_skip_when_validating_a_parameter_schema_and_parameter_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
param_schema = {'name': 'foo', 'in': 'query', 'type': 'string'}
list(type_validator(
minimal_swagger_spec,
validator=None,
types=param_schema['type'],
instance=None, # parameter value
schema=param_schema))
assert m_draft4_type_validator.call_count == 0
@patch('jsonschema._validators.type_draft4')
def test_validate_when_parameter_schema_and_parameter_value_is_not_None(
m_draft4_type_validator, minimal_swagger_spec):
param_schema = {'name': 'foo', 'in': 'query', 'type': 'string'}
args = (None, param_schema['type'], 'foo',
param_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args)
@patch('jsonschema._validators.type_draft4')
def test_validate_when_not_a_parameter_schema(m_draft4_type_validator,
minimal_swagger_spec):
string_schema = {'name': 'foo', 'type': 'string'}
args = (None, string_schema['type'], 'foo',
string_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args)
@patch('jsonschema._validators.type_draft4')
def test_skip_when_nullable_property_schema_and_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
prop_schema = {'x-nullable': True, 'type': 'string'}
list(type_validator(
minimal_swagger_spec,
validator=None,
types=prop_schema['type'],
instance=None, # property value
schema=prop_schema))
assert m_draft4_type_validator.call_count == 0
@patch('jsonschema._validators.type_draft4')
def test_validate_when_not_nullable_property_schema_and_value_is_None(
m_draft4_type_validator, minimal_swagger_spec):
prop_schema = {'x-nullable': False, 'type': 'string'}
args = (None, prop_schema['type'], None, prop_schema)
list(type_validator(minimal_swagger_spec, *args))
m_draft4_type_validator.assert_called_once_with(*args) | en | 0.087398 | # -*- coding: utf-8 -*- # parameter value # property value | 2.064494 | 2 |
CTFd/plugins/ctfd-owl/frp_utils.py | Lz1y/H1ve | 1 | 6631128 | import requests
from .db_utils import DBUtils
from .models import DynamicCheckChallenge
class FrpUtils:
@staticmethod
def update_frp_redirect():
configs = DBUtils.get_all_configs()
containers = DBUtils.get_all_alive_container()
# frps config
output = configs.get("frpc_config_template")
http_template = "\n\n[http_%s]\n" + \
"type = http\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"subdomain = %s\n" + \
"use_compression = true"
direct_template = "\n\n[direct_%s_tcp]\n" + \
"type = tcp\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"remote_port = %s\n" + \
"use_compression = true" + \
"\n\n[direct_%s_udp]\n" + \
"type = udp\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"remote_port = %s\n" + \
"use_compression = true"
for c in containers:
dynamic_docker_challenge = DynamicCheckChallenge.query \
.filter(DynamicCheckChallenge.id == c.challenge_id) \
.first_or_404()
if dynamic_docker_challenge.redirect_type.upper() == 'HTTP':
output += http_template % (
"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.docker_id)
else:
output += direct_template % (
"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.port
,"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.port)
frp_api_ip = "frpc"
frp_api_port = "7400"
# print(output)
requests.put("http://" + frp_api_ip + ":" + frp_api_port + "/api/config", output,
timeout=5)
requests.get("http://" + frp_api_ip + ":" + frp_api_port + "/api/reload", timeout=5)
| import requests
from .db_utils import DBUtils
from .models import DynamicCheckChallenge
class FrpUtils:
@staticmethod
def update_frp_redirect():
configs = DBUtils.get_all_configs()
containers = DBUtils.get_all_alive_container()
# frps config
output = configs.get("frpc_config_template")
http_template = "\n\n[http_%s]\n" + \
"type = http\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"subdomain = %s\n" + \
"use_compression = true"
direct_template = "\n\n[direct_%s_tcp]\n" + \
"type = tcp\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"remote_port = %s\n" + \
"use_compression = true" + \
"\n\n[direct_%s_udp]\n" + \
"type = udp\n" + \
"local_ip = %s\n" + \
"local_port = %s\n" + \
"remote_port = %s\n" + \
"use_compression = true"
for c in containers:
dynamic_docker_challenge = DynamicCheckChallenge.query \
.filter(DynamicCheckChallenge.id == c.challenge_id) \
.first_or_404()
if dynamic_docker_challenge.redirect_type.upper() == 'HTTP':
output += http_template % (
"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.docker_id)
else:
output += direct_template % (
"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.port
,"user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, "user{}_{}_service_1".format(c.user_id, dynamic_docker_challenge.dirname.split("/")[1])
, dynamic_docker_challenge.redirect_port
, c.port)
frp_api_ip = "frpc"
frp_api_port = "7400"
# print(output)
requests.put("http://" + frp_api_ip + ":" + frp_api_port + "/api/config", output,
timeout=5)
requests.get("http://" + frp_api_ip + ":" + frp_api_port + "/api/reload", timeout=5)
| en | 0.131014 | # frps config # print(output) | 2.1675 | 2 |
tests/test_core.py | abersheeran/asgi-ratelim | 1 | 6631129 | import httpx
import pytest
from ratelimit import RateLimitMiddleware, Rule
from ratelimit.auths import EmptyInformation
from ratelimit.backends.redis import RedisBackend
from ratelimit.types import Receive, Scope, Send
async def hello_world(scope, receive, send):
assert scope["type"] == "http"
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello world!"})
async def auth_func(scope):
headers = scope["headers"]
user, group = None, None
for name, value in headers: # type: bytes, bytes
if name == b"user":
user = value.decode("utf8")
if name == b"group":
group = value.decode("utf8")
if user is None:
raise EmptyInformation(scope)
assert group in ["default", "admin"], "Invalid group"
group = group or "default"
return user, group
async def handle_auth_error(exc):
async def send_response(scope, receive, send):
await send({"type": "http.response.start", "status": 401})
await send({"type": "http.response.body", "body": b"", "more_body": False})
return send_response
@pytest.mark.asyncio
async def test_on_auth_error_default():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/": [Rule(group="admin")],
},
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "test", "group": "default"})
assert response.status_code == 200
assert response.text == "Hello world!"
# No headers result in EmptyInformation
with pytest.raises(EmptyInformation):
await client.get("/", headers=None)
# Raise the right exception
with pytest.raises(AssertionError):
await client.get("/", headers={"user": "test", "group": "-"})
@pytest.mark.asyncio
async def test_on_auth_error_with_handler():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/": [Rule(group="admin")],
},
on_auth_error=handle_auth_error,
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "test", "group": "default"})
assert response.status_code == 200
assert response.text == "Hello world!"
response = await client.get("/", headers=None)
assert response.status_code == 401
assert response.text == ""
def yourself_429(retry_after: int):
async def inside_yourself_429(scope: Scope, receive: Receive, send: Send) -> None:
await send({"type": "http.response.start", "status": 429})
await send(
{
"type": "http.response.body",
"body": b"custom 429 page",
"more_body": False,
}
)
return inside_yourself_429
@pytest.mark.asyncio
async def test_custom_blocked():
rate_limit = RateLimitMiddleware(
hello_world,
authenticate=auth_func,
backend=RedisBackend(),
config={r"/": [Rule(second=1), Rule(group="admin")]},
on_blocked=yourself_429,
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "user", "group": "default"})
assert response.status_code == 200
response = await client.get("/", headers={"user": "user", "group": "default"})
assert response.status_code == 429
assert response.content == b"custom 429 page"
@pytest.mark.asyncio
async def test_rule_zone():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/message": [Rule(second=1, zone="commom")],
r"/\d+": [Rule(second=1, zone="commom")],
},
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/10", headers={"user": "user", "group": "default"})
assert response.status_code == 200
response = await client.get(
"/message", headers={"user": "user", "group": "default"}
)
assert response.status_code == 429
| import httpx
import pytest
from ratelimit import RateLimitMiddleware, Rule
from ratelimit.auths import EmptyInformation
from ratelimit.backends.redis import RedisBackend
from ratelimit.types import Receive, Scope, Send
async def hello_world(scope, receive, send):
assert scope["type"] == "http"
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello world!"})
async def auth_func(scope):
headers = scope["headers"]
user, group = None, None
for name, value in headers: # type: bytes, bytes
if name == b"user":
user = value.decode("utf8")
if name == b"group":
group = value.decode("utf8")
if user is None:
raise EmptyInformation(scope)
assert group in ["default", "admin"], "Invalid group"
group = group or "default"
return user, group
async def handle_auth_error(exc):
async def send_response(scope, receive, send):
await send({"type": "http.response.start", "status": 401})
await send({"type": "http.response.body", "body": b"", "more_body": False})
return send_response
@pytest.mark.asyncio
async def test_on_auth_error_default():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/": [Rule(group="admin")],
},
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "test", "group": "default"})
assert response.status_code == 200
assert response.text == "Hello world!"
# No headers result in EmptyInformation
with pytest.raises(EmptyInformation):
await client.get("/", headers=None)
# Raise the right exception
with pytest.raises(AssertionError):
await client.get("/", headers={"user": "test", "group": "-"})
@pytest.mark.asyncio
async def test_on_auth_error_with_handler():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/": [Rule(group="admin")],
},
on_auth_error=handle_auth_error,
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "test", "group": "default"})
assert response.status_code == 200
assert response.text == "Hello world!"
response = await client.get("/", headers=None)
assert response.status_code == 401
assert response.text == ""
def yourself_429(retry_after: int):
async def inside_yourself_429(scope: Scope, receive: Receive, send: Send) -> None:
await send({"type": "http.response.start", "status": 429})
await send(
{
"type": "http.response.body",
"body": b"custom 429 page",
"more_body": False,
}
)
return inside_yourself_429
@pytest.mark.asyncio
async def test_custom_blocked():
rate_limit = RateLimitMiddleware(
hello_world,
authenticate=auth_func,
backend=RedisBackend(),
config={r"/": [Rule(second=1), Rule(group="admin")]},
on_blocked=yourself_429,
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/", headers={"user": "user", "group": "default"})
assert response.status_code == 200
response = await client.get("/", headers={"user": "user", "group": "default"})
assert response.status_code == 429
assert response.content == b"custom 429 page"
@pytest.mark.asyncio
async def test_rule_zone():
rate_limit = RateLimitMiddleware(
hello_world,
auth_func,
RedisBackend(),
{
r"/message": [Rule(second=1, zone="commom")],
r"/\d+": [Rule(second=1, zone="commom")],
},
)
async with httpx.AsyncClient(
app=rate_limit, base_url="http://testserver"
) as client: # type: httpx.AsyncClient
response = await client.get("/10", headers={"user": "user", "group": "default"})
assert response.status_code == 200
response = await client.get(
"/message", headers={"user": "user", "group": "default"}
)
assert response.status_code == 429
| en | 0.356766 | # type: bytes, bytes # type: httpx.AsyncClient # No headers result in EmptyInformation # Raise the right exception # type: httpx.AsyncClient # type: httpx.AsyncClient # type: httpx.AsyncClient | 2.321676 | 2 |
prometheus.py | M4RC02U1F4A4/LOG-Discord | 0 | 6631130 | <gh_stars>0
import os
import discord
from dotenv import load_dotenv
import datetime
from prometheus_client import Gauge
from prometheus_client import Counter
from prometheus_client import start_http_server
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD_NAME')
bot = discord.Client()
@bot.event
async def on_ready():
for guild in bot.guilds:
if guild.name == GUILD:
break
nOnline = 0
nNotOffline = 0
nOffline = 0
nDnd = 0
nIdle = 0
online = 0
for user in bot.guilds[0].members:
if(str(user.status) != "offline" and user.bot == False):
nNotOffline +=1
if(str(user.status) == "online" and user.bot == False):
nOnline +=1
if(str(user.status) == "offline" and user.bot == False):
nOffline += 1
if(str(user.status) == "dnd" and user.bot == False):
nDnd += 1
if(str(user.status) == "idle" and user.bot == False):
nIdle +=1
if(str(user.voice) != "None"):
online += 1
discord_user_not_offline.set(nNotOffline)
discord_user_online.set(nOnline)
discord_user_offline.set(nOffline)
discord_user_dnd.set(nDnd)
discord_user_idle.set(nIdle)
discord_user_vocal.set(online)
print("OK PROMETHEUS")
@bot.event
async def on_voice_state_update(member, before, after):
if(before.channel == None):
discord_user_connect.inc()
online = 0
for user in bot.guilds[0].members:
if(str(user.voice) != "None"):
online += 1
discord_user_vocal.set(online)
elif(after.channel == None):
discord_user_disconnect.inc()
online = 0
for user in bot.guilds[0].members:
if(str(user.voice) != "None"):
online += 1
discord_user_vocal.set(online)
@bot.event
async def on_member_update(before, after):
if(before.status != after.status):
nOnline = 0
nNotOffline = 0
nOffline = 0
nDnd = 0
nIdle = 0
for user in bot.guilds[0].members:
if(str(user.status) != "offline" and user.bot == False):
nNotOffline +=1
if(str(user.status) == "online" and user.bot == False):
nOnline +=1
if(str(user.status) == "offline" and user.bot == False):
nOffline += 1
if(str(user.status) == "dnd" and user.bot == False):
nDnd += 1
if(str(user.status) == "idle" and user.bot == False):
nIdle +=1
discord_user_not_offline.set(nNotOffline)
discord_user_online.set(nOnline)
discord_user_offline.set(nOffline)
discord_user_dnd.set(nDnd)
discord_user_idle.set(nIdle)
if(any(after.activities)):
controllo = True
for role in after.roles:
if str(role.name) == "BOT":
controllo = False
if(controllo):
discord_games_activities.inc()
@bot.event
async def on_message(message):
discord_message.inc()
print("Starting...")
start_http_server(9324)
discord_user_online = Gauge('discord_user_online', 'Numero di utenti online')
discord_user_offline = Gauge('discord_user_offline', 'Numero di utenti offline')
discord_user_not_offline = Gauge('discord_user_not_offline', 'Numero di utenti non offline')
discord_user_dnd = Gauge('discord_user_dnd', 'Numero di utenti in dnd')
discord_user_idle = Gauge('discord_user_idle', 'Numero di utenti in idle')
discord_message = Counter('discord_message', 'Numero di utenti online')
discord_user_connect = Counter('discord_user_connect', 'Numero di connessioni')
discord_user_disconnect = Counter('discord_user_disconnect', 'Numero di disconnessioni')
discord_games_activities = Counter('discord_games_activities', 'Numero di attività nei giochi')
discord_user_vocal = Gauge('discord_user_vocal', 'Numero di utenti connessi ai canali vocali')
bot.run(TOKEN) | import os
import discord
from dotenv import load_dotenv
import datetime
from prometheus_client import Gauge
from prometheus_client import Counter
from prometheus_client import start_http_server
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD_NAME')
bot = discord.Client()
@bot.event
async def on_ready():
for guild in bot.guilds:
if guild.name == GUILD:
break
nOnline = 0
nNotOffline = 0
nOffline = 0
nDnd = 0
nIdle = 0
online = 0
for user in bot.guilds[0].members:
if(str(user.status) != "offline" and user.bot == False):
nNotOffline +=1
if(str(user.status) == "online" and user.bot == False):
nOnline +=1
if(str(user.status) == "offline" and user.bot == False):
nOffline += 1
if(str(user.status) == "dnd" and user.bot == False):
nDnd += 1
if(str(user.status) == "idle" and user.bot == False):
nIdle +=1
if(str(user.voice) != "None"):
online += 1
discord_user_not_offline.set(nNotOffline)
discord_user_online.set(nOnline)
discord_user_offline.set(nOffline)
discord_user_dnd.set(nDnd)
discord_user_idle.set(nIdle)
discord_user_vocal.set(online)
print("OK PROMETHEUS")
@bot.event
async def on_voice_state_update(member, before, after):
if(before.channel == None):
discord_user_connect.inc()
online = 0
for user in bot.guilds[0].members:
if(str(user.voice) != "None"):
online += 1
discord_user_vocal.set(online)
elif(after.channel == None):
discord_user_disconnect.inc()
online = 0
for user in bot.guilds[0].members:
if(str(user.voice) != "None"):
online += 1
discord_user_vocal.set(online)
@bot.event
async def on_member_update(before, after):
if(before.status != after.status):
nOnline = 0
nNotOffline = 0
nOffline = 0
nDnd = 0
nIdle = 0
for user in bot.guilds[0].members:
if(str(user.status) != "offline" and user.bot == False):
nNotOffline +=1
if(str(user.status) == "online" and user.bot == False):
nOnline +=1
if(str(user.status) == "offline" and user.bot == False):
nOffline += 1
if(str(user.status) == "dnd" and user.bot == False):
nDnd += 1
if(str(user.status) == "idle" and user.bot == False):
nIdle +=1
discord_user_not_offline.set(nNotOffline)
discord_user_online.set(nOnline)
discord_user_offline.set(nOffline)
discord_user_dnd.set(nDnd)
discord_user_idle.set(nIdle)
if(any(after.activities)):
controllo = True
for role in after.roles:
if str(role.name) == "BOT":
controllo = False
if(controllo):
discord_games_activities.inc()
@bot.event
async def on_message(message):
discord_message.inc()
print("Starting...")
start_http_server(9324)
discord_user_online = Gauge('discord_user_online', 'Numero di utenti online')
discord_user_offline = Gauge('discord_user_offline', 'Numero di utenti offline')
discord_user_not_offline = Gauge('discord_user_not_offline', 'Numero di utenti non offline')
discord_user_dnd = Gauge('discord_user_dnd', 'Numero di utenti in dnd')
discord_user_idle = Gauge('discord_user_idle', 'Numero di utenti in idle')
discord_message = Counter('discord_message', 'Numero di utenti online')
discord_user_connect = Counter('discord_user_connect', 'Numero di connessioni')
discord_user_disconnect = Counter('discord_user_disconnect', 'Numero di disconnessioni')
discord_games_activities = Counter('discord_games_activities', 'Numero di attività nei giochi')
discord_user_vocal = Gauge('discord_user_vocal', 'Numero di utenti connessi ai canali vocali')
bot.run(TOKEN) | none | 1 | 2.3791 | 2 |
|
muranodashboard/common/fields.py | sbrf-clouddev/murano-dashboard | 37 | 6631131 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from muranodashboard.common import widgets
from django.core.exceptions import ValidationError
from django.core import validators
from django import forms
from django.utils.translation import ugettext_lazy as _
class TriStateMultipleChoiceField(forms.ChoiceField):
"""A multiple choice checkbox field where checkboxes has three states.
States are:
- Checked
- Unchecked
- Indeterminate
It takes a ``dict`` instance as a value,
where keys are internal values from `choices`
and values are ones from following (in order respectively to states):
- True
- False
- None
"""
widget = widgets.TriStateCheckboxSelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one '
'of the available choices.'),
'invalid_value': _('Enter a dict with choices and values. '
'Got %(value)s.'),
}
def to_python(self, value):
"""Checks if value, that comes from widget, is a dict."""
if value in validators.EMPTY_VALUES:
return {}
elif not isinstance(value, dict):
raise ValidationError(self.error_messages['invalid_value'],
code='invalid_value')
return value
def validate(self, value):
"""Ensures that value has only allowed values."""
if not set(value.keys()) <= {k for k, _ in self.choices}:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
elif not (set(value.values()) <=
set(widgets.TriStateCheckboxSelectMultiple
.VALUES_MAP.values())):
raise ValidationError(
self.error_messages['invalid_value'],
code='invalid_value',
params={'value': value},
)
| # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from muranodashboard.common import widgets
from django.core.exceptions import ValidationError
from django.core import validators
from django import forms
from django.utils.translation import ugettext_lazy as _
class TriStateMultipleChoiceField(forms.ChoiceField):
"""A multiple choice checkbox field where checkboxes has three states.
States are:
- Checked
- Unchecked
- Indeterminate
It takes a ``dict`` instance as a value,
where keys are internal values from `choices`
and values are ones from following (in order respectively to states):
- True
- False
- None
"""
widget = widgets.TriStateCheckboxSelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one '
'of the available choices.'),
'invalid_value': _('Enter a dict with choices and values. '
'Got %(value)s.'),
}
def to_python(self, value):
"""Checks if value, that comes from widget, is a dict."""
if value in validators.EMPTY_VALUES:
return {}
elif not isinstance(value, dict):
raise ValidationError(self.error_messages['invalid_value'],
code='invalid_value')
return value
def validate(self, value):
"""Ensures that value has only allowed values."""
if not set(value.keys()) <= {k for k, _ in self.choices}:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
elif not (set(value.values()) <=
set(widgets.TriStateCheckboxSelectMultiple
.VALUES_MAP.values())):
raise ValidationError(
self.error_messages['invalid_value'],
code='invalid_value',
params={'value': value},
)
| en | 0.872656 | # Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. A multiple choice checkbox field where checkboxes has three states. States are: - Checked - Unchecked - Indeterminate It takes a ``dict`` instance as a value, where keys are internal values from `choices` and values are ones from following (in order respectively to states): - True - False - None Checks if value, that comes from widget, is a dict. Ensures that value has only allowed values. | 2.32647 | 2 |
pypy/module/pypyjit/test/test_jit_not_in_trace.py | nanjekyejoannah/pypy | 381 | 6631132 |
class AppTestJitNotInTrace(object):
spaceconfig = dict(usemodules=('pypyjit',))
def test_not_from_assembler(self):
import pypyjit
@pypyjit.not_from_assembler
def f(x, y):
return 42
r = f(3, 4)
assert r is f
def test_not_from_assembler_exception(self):
import pypyjit
@pypyjit.not_from_assembler
def f(x, y):
raise ValueError(y, x)
e = raises(ValueError, f, 3, 4)
assert e.value.args == (4, 3)
|
class AppTestJitNotInTrace(object):
spaceconfig = dict(usemodules=('pypyjit',))
def test_not_from_assembler(self):
import pypyjit
@pypyjit.not_from_assembler
def f(x, y):
return 42
r = f(3, 4)
assert r is f
def test_not_from_assembler_exception(self):
import pypyjit
@pypyjit.not_from_assembler
def f(x, y):
raise ValueError(y, x)
e = raises(ValueError, f, 3, 4)
assert e.value.args == (4, 3)
| none | 1 | 2.367831 | 2 |
|
source/core-api/custom_resources/initialize_state.py | aws-solutions/aws-virtual-waiting-room | 30 | 6631133 | <filename>source/core-api/custom_resources/initialize_state.py<gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module is the custom resource used to intialize the various counters used by the core API.
It also redeploys the public and private APIs whenever there's an update.
"""
import os
import time
from urllib.parse import urlparse
from crhelper import CfnResource
import requests
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
import boto3
from botocore import config
# connection info and other globals
helper = CfnResource()
EVENT_ID = os.environ.get("EVENT_ID")
CORE_API_ENDPOINT = os.environ.get("CORE_API_ENDPOINT")
PUBLIC_API_ID = os.environ.get("PUBLIC_API_ID")
PRIVATE_API_ID = os.environ.get("PRIVATE_API_ID")
API_STAGE = os.environ.get("API_STAGE")
boto_session = boto3.session.Session()
region = boto_session.region_name
SOLUTION_ID = os.environ['SOLUTION_ID']
user_agent_extra = {"user_agent_extra": SOLUTION_ID}
user_config = config.Config(**user_agent_extra)
api_client = boto3.client("apigateway", config=user_config)
@helper.create
def create(event, _):
"""
This function makes an authenticated call to the private API endpoint
to intialize the various counters used by the core API.
"""
print(event)
core_api = f'{CORE_API_ENDPOINT}/reset_initial_state'
body = {
"event_id": EVENT_ID
}
parsed = urlparse(CORE_API_ENDPOINT)
# create an authentication signer for AWS
auth = BotoAWSRequestsAuth(aws_host=parsed.netloc,
aws_region=region,
aws_service='execute-api')
response = requests.post(core_api, json=body, auth=auth)
print(response.status_code)
@helper.update
def update(event, _):
"""
Counters and DynamoDB table are not reset during update.
Both public and private APIs are redeployed since that doesn't happen automatically.
"""
print(event)
print("Not resetting counters on update.")
print("Redeploying APIs.")
api_client.create_deployment(
restApiId=PUBLIC_API_ID,
stageName=API_STAGE,
description="Automated deployment through waiting room core API update.")
# avoid throttling
time.sleep(5)
api_client.create_deployment(
restApiId=PRIVATE_API_ID,
stageName=API_STAGE,
description="Automated deployment through waiting room core API update.")
@helper.delete
def delete(event, _):
"""
Counters and DynamoDB table are untouched during delete.
"""
print(event)
print("Not deleting counters on delete.")
def handler(event, context):
"""
This function is the entry point for the Lambda-backed custom resource.
"""
helper(event, context)
| <filename>source/core-api/custom_resources/initialize_state.py<gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module is the custom resource used to intialize the various counters used by the core API.
It also redeploys the public and private APIs whenever there's an update.
"""
import os
import time
from urllib.parse import urlparse
from crhelper import CfnResource
import requests
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
import boto3
from botocore import config
# connection info and other globals
helper = CfnResource()
EVENT_ID = os.environ.get("EVENT_ID")
CORE_API_ENDPOINT = os.environ.get("CORE_API_ENDPOINT")
PUBLIC_API_ID = os.environ.get("PUBLIC_API_ID")
PRIVATE_API_ID = os.environ.get("PRIVATE_API_ID")
API_STAGE = os.environ.get("API_STAGE")
boto_session = boto3.session.Session()
region = boto_session.region_name
SOLUTION_ID = os.environ['SOLUTION_ID']
user_agent_extra = {"user_agent_extra": SOLUTION_ID}
user_config = config.Config(**user_agent_extra)
api_client = boto3.client("apigateway", config=user_config)
@helper.create
def create(event, _):
"""
This function makes an authenticated call to the private API endpoint
to intialize the various counters used by the core API.
"""
print(event)
core_api = f'{CORE_API_ENDPOINT}/reset_initial_state'
body = {
"event_id": EVENT_ID
}
parsed = urlparse(CORE_API_ENDPOINT)
# create an authentication signer for AWS
auth = BotoAWSRequestsAuth(aws_host=parsed.netloc,
aws_region=region,
aws_service='execute-api')
response = requests.post(core_api, json=body, auth=auth)
print(response.status_code)
@helper.update
def update(event, _):
"""
Counters and DynamoDB table are not reset during update.
Both public and private APIs are redeployed since that doesn't happen automatically.
"""
print(event)
print("Not resetting counters on update.")
print("Redeploying APIs.")
api_client.create_deployment(
restApiId=PUBLIC_API_ID,
stageName=API_STAGE,
description="Automated deployment through waiting room core API update.")
# avoid throttling
time.sleep(5)
api_client.create_deployment(
restApiId=PRIVATE_API_ID,
stageName=API_STAGE,
description="Automated deployment through waiting room core API update.")
@helper.delete
def delete(event, _):
"""
Counters and DynamoDB table are untouched during delete.
"""
print(event)
print("Not deleting counters on delete.")
def handler(event, context):
"""
This function is the entry point for the Lambda-backed custom resource.
"""
helper(event, context)
| en | 0.903725 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 This module is the custom resource used to intialize the various counters used by the core API. It also redeploys the public and private APIs whenever there's an update. # connection info and other globals This function makes an authenticated call to the private API endpoint to intialize the various counters used by the core API. # create an authentication signer for AWS Counters and DynamoDB table are not reset during update. Both public and private APIs are redeployed since that doesn't happen automatically. # avoid throttling Counters and DynamoDB table are untouched during delete. This function is the entry point for the Lambda-backed custom resource. | 2.335431 | 2 |
setup.py | docloud/luna.sso | 0 | 6631134 | #!/usr/bin/env python
from setuptools import setup, find_packages
entry_points = [
]
setup(
name='luna.sso',
version='0.0.1',
description='Luna Project',
url='http://github.com/luna/luna',
include_package_data=True,
packages=find_packages(),
entry_points={"console_scripts": entry_points},
# package_data={'folder': ['']},
install_requires=open('requirements.txt').readlines(),
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
entry_points = [
]
setup(
name='luna.sso',
version='0.0.1',
description='Luna Project',
url='http://github.com/luna/luna',
include_package_data=True,
packages=find_packages(),
entry_points={"console_scripts": entry_points},
# package_data={'folder': ['']},
install_requires=open('requirements.txt').readlines(),
)
| en | 0.201281 | #!/usr/bin/env python # package_data={'folder': ['']}, | 1.111567 | 1 |
tests/common/bridgecrew/vulnerability_scanning/integrations/test_package_scanning.py | pmalkki/checkov | 1 | 6631135 | <gh_stars>1-10
import os
from pathlib import Path
import pytest
from aioresponses import aioresponses
from pytest_mock import MockerFixture
from checkov.common.bridgecrew.vulnerability_scanning.integrations.package_scanning import package_scanning_integration
def get_report_url() -> str:
base_url = "https://www.bridgecrew.cloud/api/v1/vulnerabilities"
return f"{base_url}/results"
@pytest.mark.asyncio
async def test_report_results(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=200)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 0
assert next(iter(m.requests.values()))[0].kwargs["json"] == {
"packageName": "requirements.txt",
"packageFilePath": "/requirements.txt",
"type": "Package",
"sourceId": "bridgecrewio/checkov",
"branch": "master",
"sourceType": "Github",
"vulnerabilities": [
{
"cveId": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.",
"riskFactors": ["Critical severity", "Has fix", "Attack complexity: low", "Attack vector: network"],
"publishedDate": "2019-12-18T20:15:00+01:00",
},
],
"packages": [
{
"type": "python",
"name": "django",
"version": "1.2",
"licenses": []
}
]
}
@pytest.mark.asyncio
async def test_report_results_with_cicd(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
cicd_details = {
"runId": 123,
"pr": "patch-1",
"commit": "qwerty1234",
}
mock_bc_integration.cicd_details = cicd_details
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=200)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 0
assert next(iter(m.requests.values()))[0].kwargs["json"]["cicdDetails"] == cicd_details
@pytest.mark.asyncio
async def test_report_results_fail(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=403)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 1
| import os
from pathlib import Path
import pytest
from aioresponses import aioresponses
from pytest_mock import MockerFixture
from checkov.common.bridgecrew.vulnerability_scanning.integrations.package_scanning import package_scanning_integration
def get_report_url() -> str:
base_url = "https://www.bridgecrew.cloud/api/v1/vulnerabilities"
return f"{base_url}/results"
@pytest.mark.asyncio
async def test_report_results(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=200)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 0
assert next(iter(m.requests.values()))[0].kwargs["json"] == {
"packageName": "requirements.txt",
"packageFilePath": "/requirements.txt",
"type": "Package",
"sourceId": "bridgecrewio/checkov",
"branch": "master",
"sourceType": "Github",
"vulnerabilities": [
{
"cveId": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.",
"riskFactors": ["Critical severity", "Has fix", "Attack complexity: low", "Attack vector: network"],
"publishedDate": "2019-12-18T20:15:00+01:00",
},
],
"packages": [
{
"type": "python",
"name": "django",
"version": "1.2",
"licenses": []
}
]
}
@pytest.mark.asyncio
async def test_report_results_with_cicd(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
cicd_details = {
"runId": 123,
"pr": "patch-1",
"commit": "qwerty1234",
}
mock_bc_integration.cicd_details = cicd_details
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=200)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 0
assert next(iter(m.requests.values()))[0].kwargs["json"]["cicdDetails"] == cicd_details
@pytest.mark.asyncio
async def test_report_results_fail(mocker: MockerFixture, mock_bc_integration, package_scan_result):
# given
bc_api_key = "<KEY>"
report_url = get_report_url()
mocker.patch.dict(os.environ, {"BC_ROOT_DIR": "app"})
# when
with aioresponses() as m:
m.post(report_url, status=403)
result = await package_scanning_integration.report_results_async(
twistcli_scan_result=package_scan_result,
bc_platform_integration=mock_bc_integration,
bc_api_key=bc_api_key,
file_path=Path("app/requirements.txt"),
)
# then
assert result == 1 | en | 0.245449 | # given # when # then # given # when # then # given # when # then | 2.189933 | 2 |
openprocurement/auction/texas/event_source.py | OrysiaDrabych/openprocurement.auction.texas | 0 | 6631136 | <gh_stars>0
from sse import Sse as PySse
from gevent.queue import Queue
from flask import (
current_app, Blueprint, request,
session, Response, jsonify, abort
)
from openprocurement.auction.event_source import (
send_event_to_client, send_event, SseStream
)
from openprocurement.auction.utils import (
prepare_extra_journal_fields, get_bidder_id
)
sse = Blueprint('sse', __name__)
@sse.route("/set_sse_timeout", methods=['POST'])
def set_sse_timeout():
current_app.logger.info(
'Handle set_sse_timeout request with session {}'.format(repr(dict(session))),
extra=prepare_extra_journal_fields(request.headers)
)
if 'remote_oauth' in session and 'client_id' in session:
bidder_data = get_bidder_id(current_app, session)
if bidder_data:
current_app.logger.info("Bidder {} with client_id {} set sse_timeout".format(
bidder_data['bidder_id'], session['client_id'],
), extra=prepare_extra_journal_fields(request.headers))
bidder = bidder_data['bidder_id']
if 'timeout' in request.json:
session["sse_timeout"] = int(request.json['timeout'])
send_event_to_client(
bidder, session['client_id'], '',
event='StopSSE'
)
return jsonify({'timeout': session["sse_timeout"]})
return abort(401)
@sse.route("/event_source")
def event_source():
current_app.logger.debug(
'Handle event_source request with session {}'.format(repr(dict(session))),
extra=prepare_extra_journal_fields(request.headers)
)
if 'remote_oauth' in session and 'client_id' in session:
bidder_data = get_bidder_id(current_app, session)
if bidder_data:
valid_bidder = False
client_hash = session['client_id']
bidder = bidder_data['bidder_id']
for bidder_info in current_app.context['bidders_data']:
if bidder_info['id'] == bidder:
valid_bidder = True
break
if valid_bidder:
if bidder not in current_app.auction_bidders:
current_app.auction_bidders[bidder] = {
"clients": {},
"channels": {}
}
if client_hash not in current_app.auction_bidders[bidder]:
real_ip = request.environ.get('HTTP_X_REAL_IP', '')
if real_ip.startswith('172.'):
real_ip = ''
current_app.auction_bidders[bidder]["clients"][client_hash] = {
'ip': ','.join(
[request.headers.get('X-Forwarded-For', ''), real_ip]
),
'User-Agent': request.headers.get('User-Agent'),
}
current_app.auction_bidders[bidder]["channels"][client_hash] = Queue()
current_app.logger.info(
'Send identification for bidder: {} with client_hash {}'.format(bidder, client_hash),
extra=prepare_extra_journal_fields(request.headers)
)
identification_data = {"bidder_id": bidder,
"client_id": client_hash,
"return_url": session.get('return_url', '')}
send_event_to_client(bidder, client_hash, identification_data,
"Identification")
if not session.get("sse_timeout", 0):
current_app.logger.debug('Send ClientsList')
send_event(
bidder,
current_app.auction_bidders[bidder]["clients"],
"ClientsList"
)
response = Response(
SseStream(
current_app.auction_bidders[bidder]["channels"][client_hash],
bidder_id=bidder,
client_id=client_hash,
timeout=session.get("sse_timeout", 0)
),
direct_passthrough=True,
mimetype='text/event-stream',
content_type='text/event-stream'
)
response.headers['Cache-Control'] = 'no-cache'
response.headers['X-Accel-Buffering'] = 'no'
return response
else:
current_app.logger.info(
'Not valid bidder: bidder_id {} with client_hash {}'.format(bidder, client_hash),
extra=prepare_extra_journal_fields(request.headers)
)
current_app.logger.debug(
'Disable event_source for unauthorized user.',
extra=prepare_extra_journal_fields(request.headers)
)
events_close = PySse()
events_close.add_message("Close", "Disable")
response = Response(
iter([bytearray(''.join([x for x in events_close]), 'UTF-8')]),
direct_passthrough=True,
mimetype='text/event-stream',
content_type='text/event-stream'
)
response.headers['Cache-Control'] = 'no-cache'
response.headers['X-Accel-Buffering'] = 'no'
return response
| from sse import Sse as PySse
from gevent.queue import Queue
from flask import (
current_app, Blueprint, request,
session, Response, jsonify, abort
)
from openprocurement.auction.event_source import (
send_event_to_client, send_event, SseStream
)
from openprocurement.auction.utils import (
prepare_extra_journal_fields, get_bidder_id
)
sse = Blueprint('sse', __name__)
@sse.route("/set_sse_timeout", methods=['POST'])
def set_sse_timeout():
current_app.logger.info(
'Handle set_sse_timeout request with session {}'.format(repr(dict(session))),
extra=prepare_extra_journal_fields(request.headers)
)
if 'remote_oauth' in session and 'client_id' in session:
bidder_data = get_bidder_id(current_app, session)
if bidder_data:
current_app.logger.info("Bidder {} with client_id {} set sse_timeout".format(
bidder_data['bidder_id'], session['client_id'],
), extra=prepare_extra_journal_fields(request.headers))
bidder = bidder_data['bidder_id']
if 'timeout' in request.json:
session["sse_timeout"] = int(request.json['timeout'])
send_event_to_client(
bidder, session['client_id'], '',
event='StopSSE'
)
return jsonify({'timeout': session["sse_timeout"]})
return abort(401)
@sse.route("/event_source")
def event_source():
current_app.logger.debug(
'Handle event_source request with session {}'.format(repr(dict(session))),
extra=prepare_extra_journal_fields(request.headers)
)
if 'remote_oauth' in session and 'client_id' in session:
bidder_data = get_bidder_id(current_app, session)
if bidder_data:
valid_bidder = False
client_hash = session['client_id']
bidder = bidder_data['bidder_id']
for bidder_info in current_app.context['bidders_data']:
if bidder_info['id'] == bidder:
valid_bidder = True
break
if valid_bidder:
if bidder not in current_app.auction_bidders:
current_app.auction_bidders[bidder] = {
"clients": {},
"channels": {}
}
if client_hash not in current_app.auction_bidders[bidder]:
real_ip = request.environ.get('HTTP_X_REAL_IP', '')
if real_ip.startswith('172.'):
real_ip = ''
current_app.auction_bidders[bidder]["clients"][client_hash] = {
'ip': ','.join(
[request.headers.get('X-Forwarded-For', ''), real_ip]
),
'User-Agent': request.headers.get('User-Agent'),
}
current_app.auction_bidders[bidder]["channels"][client_hash] = Queue()
current_app.logger.info(
'Send identification for bidder: {} with client_hash {}'.format(bidder, client_hash),
extra=prepare_extra_journal_fields(request.headers)
)
identification_data = {"bidder_id": bidder,
"client_id": client_hash,
"return_url": session.get('return_url', '')}
send_event_to_client(bidder, client_hash, identification_data,
"Identification")
if not session.get("sse_timeout", 0):
current_app.logger.debug('Send ClientsList')
send_event(
bidder,
current_app.auction_bidders[bidder]["clients"],
"ClientsList"
)
response = Response(
SseStream(
current_app.auction_bidders[bidder]["channels"][client_hash],
bidder_id=bidder,
client_id=client_hash,
timeout=session.get("sse_timeout", 0)
),
direct_passthrough=True,
mimetype='text/event-stream',
content_type='text/event-stream'
)
response.headers['Cache-Control'] = 'no-cache'
response.headers['X-Accel-Buffering'] = 'no'
return response
else:
current_app.logger.info(
'Not valid bidder: bidder_id {} with client_hash {}'.format(bidder, client_hash),
extra=prepare_extra_journal_fields(request.headers)
)
current_app.logger.debug(
'Disable event_source for unauthorized user.',
extra=prepare_extra_journal_fields(request.headers)
)
events_close = PySse()
events_close.add_message("Close", "Disable")
response = Response(
iter([bytearray(''.join([x for x in events_close]), 'UTF-8')]),
direct_passthrough=True,
mimetype='text/event-stream',
content_type='text/event-stream'
)
response.headers['Cache-Control'] = 'no-cache'
response.headers['X-Accel-Buffering'] = 'no'
return response | none | 1 | 2.111104 | 2 |
|
tfx/tools/cli/container_builder/builder.py | Anon-Artist/tfx | 3 | 6631137 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContainerBuilder builds the container image."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text
import click
from tfx.tools.cli.container_builder import buildspec
from tfx.tools.cli.container_builder import labels
from tfx.tools.cli.container_builder.dockerfile import Dockerfile
from tfx.tools.cli.container_builder.skaffold_cli import SkaffoldCli
# TODO(b/142357382): add e2e tests.
class ContainerBuilder(object):
"""Build containers.
ContainerBuilder prepares the build files and run Skaffold to build the
containers.
Attributes:
_buildspec: BuildSpec instance.
_skaffold_cmd: Skaffold command.
"""
def __init__(self,
target_image: Optional[Text] = None,
base_image: Optional[Text] = None,
skaffold_cmd: Optional[Text] = None,
buildspec_filename: Optional[Text] = None,
dockerfile_name: Optional[Text] = None,
setup_py_filename: Optional[Text] = None):
"""Initialization.
Args:
target_image: the target image path to be built.
base_image: the image path to use as the base image.
skaffold_cmd: skaffold command.
buildspec_filename: the buildspec file path that is accessible to the
current execution environment. It could be either absolute path or
relative path.
dockerfile_name: the dockerfile name, which is stored in the workspace
directory. The workspace directory is specified in the build spec and
the default workspace directory is '.'.
setup_py_filename: the setup.py file name, which is used to build a
python package for the workspace directory. If not specified, the
whole directory is copied and PYTHONPATH is configured.
"""
self._skaffold_cmd = skaffold_cmd or labels.SKAFFOLD_COMMAND
buildspec_filename = buildspec_filename or labels.BUILD_SPEC_FILENAME
dockerfile_name = dockerfile_name or labels.DOCKERFILE_NAME
if os.path.exists(buildspec_filename):
self._buildspec = buildspec.BuildSpec(filename=buildspec_filename)
if target_image is not None:
click.echo(
'Target image %s is not used. If the build spec is '
'provided, update the target image in the build spec '
'file %s.' % (target_image, buildspec_filename))
else:
self._buildspec = buildspec.BuildSpec.load_default(
filename=buildspec_filename,
target_image=target_image,
dockerfile_name=dockerfile_name)
Dockerfile(
filename=os.path.join(self._buildspec.build_context, dockerfile_name),
setup_py_filename=setup_py_filename,
base_image=base_image)
def build(self):
"""Build the container and return the built image path with SHA."""
skaffold_cli = SkaffoldCli(cmd=self._skaffold_cmd)
image_sha = skaffold_cli.build(self._buildspec)
target_image = self._buildspec.target_image
return target_image + '@' + image_sha
| # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ContainerBuilder builds the container image."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text
import click
from tfx.tools.cli.container_builder import buildspec
from tfx.tools.cli.container_builder import labels
from tfx.tools.cli.container_builder.dockerfile import Dockerfile
from tfx.tools.cli.container_builder.skaffold_cli import SkaffoldCli
# TODO(b/142357382): add e2e tests.
class ContainerBuilder(object):
"""Build containers.
ContainerBuilder prepares the build files and run Skaffold to build the
containers.
Attributes:
_buildspec: BuildSpec instance.
_skaffold_cmd: Skaffold command.
"""
def __init__(self,
target_image: Optional[Text] = None,
base_image: Optional[Text] = None,
skaffold_cmd: Optional[Text] = None,
buildspec_filename: Optional[Text] = None,
dockerfile_name: Optional[Text] = None,
setup_py_filename: Optional[Text] = None):
"""Initialization.
Args:
target_image: the target image path to be built.
base_image: the image path to use as the base image.
skaffold_cmd: skaffold command.
buildspec_filename: the buildspec file path that is accessible to the
current execution environment. It could be either absolute path or
relative path.
dockerfile_name: the dockerfile name, which is stored in the workspace
directory. The workspace directory is specified in the build spec and
the default workspace directory is '.'.
setup_py_filename: the setup.py file name, which is used to build a
python package for the workspace directory. If not specified, the
whole directory is copied and PYTHONPATH is configured.
"""
self._skaffold_cmd = skaffold_cmd or labels.SKAFFOLD_COMMAND
buildspec_filename = buildspec_filename or labels.BUILD_SPEC_FILENAME
dockerfile_name = dockerfile_name or labels.DOCKERFILE_NAME
if os.path.exists(buildspec_filename):
self._buildspec = buildspec.BuildSpec(filename=buildspec_filename)
if target_image is not None:
click.echo(
'Target image %s is not used. If the build spec is '
'provided, update the target image in the build spec '
'file %s.' % (target_image, buildspec_filename))
else:
self._buildspec = buildspec.BuildSpec.load_default(
filename=buildspec_filename,
target_image=target_image,
dockerfile_name=dockerfile_name)
Dockerfile(
filename=os.path.join(self._buildspec.build_context, dockerfile_name),
setup_py_filename=setup_py_filename,
base_image=base_image)
def build(self):
"""Build the container and return the built image path with SHA."""
skaffold_cli = SkaffoldCli(cmd=self._skaffold_cmd)
image_sha = skaffold_cli.build(self._buildspec)
target_image = self._buildspec.target_image
return target_image + '@' + image_sha
| en | 0.806617 | # Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ContainerBuilder builds the container image. # TODO(b/142357382): add e2e tests. Build containers. ContainerBuilder prepares the build files and run Skaffold to build the containers. Attributes: _buildspec: BuildSpec instance. _skaffold_cmd: Skaffold command. Initialization. Args: target_image: the target image path to be built. base_image: the image path to use as the base image. skaffold_cmd: skaffold command. buildspec_filename: the buildspec file path that is accessible to the current execution environment. It could be either absolute path or relative path. dockerfile_name: the dockerfile name, which is stored in the workspace directory. The workspace directory is specified in the build spec and the default workspace directory is '.'. setup_py_filename: the setup.py file name, which is used to build a python package for the workspace directory. If not specified, the whole directory is copied and PYTHONPATH is configured. Build the container and return the built image path with SHA. | 1.946285 | 2 |
4_test_tpe.py | g00nsquad/face-identification-tpe | 245 | 6631138 | import json
import matplotlib.pyplot as plt
import numpy as np
from bottleneck import Bottleneck
from cnn import build_cnn
from identification import get_scores, calc_metrics
from tpe import build_tpe
n_in = 256
n_out = 256
with open('data/meta.json', 'r') as f:
meta = json.load(f)
cnn = build_cnn(227, meta['n_subjects'])
cnn.load_weights('data/weights/weights.best.h5')
bottleneck = Bottleneck(cnn, ~1)
train_x, train_y = np.load('data/train_x.npy'), np.load('data/train_y.npy')
dev_x = np.load('data/dev_x.npy')
dev_protocol = np.load('data/dev_protocol.npy')
train_emb = bottleneck.predict(train_x, batch_size=256)
dev_emb = bottleneck.predict(dev_x, batch_size=256)
del train_x
W_pca = np.load('data/w_pca.npy')
tpe, tpe_pred = build_tpe(n_in, n_out, W_pca.T)
train_y = np.array(train_y)
subjects = list(set(train_y))
tpe.load_weights('data/weights/weights.tpe.mineer.h5')
dev_emb2 = tpe_pred.predict(dev_emb)
protocol = np.load('data/dev_protocol.npy')
tsc, isc = get_scores(dev_emb2, protocol)
eer, fars, frrs, dists = calc_metrics(tsc, isc)
print('EER: {}'.format(eer * 100))
plt.figure()
plt.hist(tsc, 20, color='g', normed=True, alpha=0.3)
plt.hist(isc, 20, color='r', normed=True, alpha=0.3)
plt.figure()
plt.loglog(fars, frrs)
plt.show()
for a, b, c in zip(fars, frrs, dists):
print('a: {:.2f} | r: {:.2f} | d: {:.2f}'.format(a, b, c))
| import json
import matplotlib.pyplot as plt
import numpy as np
from bottleneck import Bottleneck
from cnn import build_cnn
from identification import get_scores, calc_metrics
from tpe import build_tpe
n_in = 256
n_out = 256
with open('data/meta.json', 'r') as f:
meta = json.load(f)
cnn = build_cnn(227, meta['n_subjects'])
cnn.load_weights('data/weights/weights.best.h5')
bottleneck = Bottleneck(cnn, ~1)
train_x, train_y = np.load('data/train_x.npy'), np.load('data/train_y.npy')
dev_x = np.load('data/dev_x.npy')
dev_protocol = np.load('data/dev_protocol.npy')
train_emb = bottleneck.predict(train_x, batch_size=256)
dev_emb = bottleneck.predict(dev_x, batch_size=256)
del train_x
W_pca = np.load('data/w_pca.npy')
tpe, tpe_pred = build_tpe(n_in, n_out, W_pca.T)
train_y = np.array(train_y)
subjects = list(set(train_y))
tpe.load_weights('data/weights/weights.tpe.mineer.h5')
dev_emb2 = tpe_pred.predict(dev_emb)
protocol = np.load('data/dev_protocol.npy')
tsc, isc = get_scores(dev_emb2, protocol)
eer, fars, frrs, dists = calc_metrics(tsc, isc)
print('EER: {}'.format(eer * 100))
plt.figure()
plt.hist(tsc, 20, color='g', normed=True, alpha=0.3)
plt.hist(isc, 20, color='r', normed=True, alpha=0.3)
plt.figure()
plt.loglog(fars, frrs)
plt.show()
for a, b, c in zip(fars, frrs, dists):
print('a: {:.2f} | r: {:.2f} | d: {:.2f}'.format(a, b, c))
| none | 1 | 2.309342 | 2 |
|
tf_external/__init__.py | allez-allez-allez/terraform-external-file-cache | 0 | 6631139 | # -*- coding: utf-8 -*-
"""Provides wrapper implementing the terraform external interface."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import json
class TfExternal(object):
"""Wrap Terraform External provider."""
@staticmethod
def query_args(obj):
"""Load json object from stdin."""
return {} if obj.isatty() else json.load(obj)
@staticmethod
def out_json(result):
"""Print result to stdout."""
print(json.dumps(result))
| # -*- coding: utf-8 -*-
"""Provides wrapper implementing the terraform external interface."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import json
class TfExternal(object):
"""Wrap Terraform External provider."""
@staticmethod
def query_args(obj):
"""Load json object from stdin."""
return {} if obj.isatty() else json.load(obj)
@staticmethod
def out_json(result):
"""Print result to stdout."""
print(json.dumps(result))
| en | 0.522363 | # -*- coding: utf-8 -*- Provides wrapper implementing the terraform external interface. Wrap Terraform External provider. Load json object from stdin. Print result to stdout. | 2.162975 | 2 |
mvpd/MVPD_L2_LR.py | sccnlab/PyMVPD | 6 | 6631140 | # MVPD - Regularized (L2) Linear Regression Model
import os
import numpy as np
import nibabel as nib
import itertools as it
from mvpd.dataloader.loader_regression import ROI_Dataset
from mvpd.func_regression.L2_LR import L2_LR
from mvpd.evaluation import var_expl
from mvpd.viz import viz_map
def run_L2_LR(model_type, sub, total_run, alpha, roidata_save_dir, roi_1_name, roi_2_name, filepath_func, filepath_mask1, filepath_mask2, results_save_dir, save_prediction):
# create output folder if not exists
if not os.path.exists(results_save_dir):
os.mkdir(results_save_dir)
for this_run in range(1, total_run+1):
print("test run:", this_run)
# Load functioanl data and ROI masks
# Training
roi_train = ROI_Dataset()
roi_train.get_train(roidata_save_dir, roi_1_name, roi_2_name, this_run, total_run)
ROI_1_train = roi_train[:]['ROI_1']
ROI_2_train = roi_train[:]['ROI_2']
# Testing
roi_test = ROI_Dataset()
roi_test.get_test(roidata_save_dir, roi_1_name, roi_2_name, this_run, total_run)
ROI_1_test = roi_test[:]['ROI_1']
ROI_2_test = roi_test[:]['ROI_2']
# L2 Regularized Linear Regression Model
predict_ROI_2_test, err_LR = L2_LR(ROI_1_train, ROI_2_train, ROI_1_test, ROI_2_test, alpha)
if save_prediction:
np.save(results_save_dir+sub+'_predict_ROI_2_'+model_type+'_testrun'+str(this_run)+'.npy', predict_ROI_2_test)
# Evaluation: variance explained
varexpl = var_expl.eval_var_expl(err_LR, ROI_2_test)
# Visualization
var_expl_map, var_expl_img = viz_map.cmetric_to_map(filepath_mask2, varexpl)
nib.save(var_expl_img, results_save_dir+sub+'_var_expl_map_'+model_type+'_testrun'+str(this_run)+'.nii.gz')
| # MVPD - Regularized (L2) Linear Regression Model
import os
import numpy as np
import nibabel as nib
import itertools as it
from mvpd.dataloader.loader_regression import ROI_Dataset
from mvpd.func_regression.L2_LR import L2_LR
from mvpd.evaluation import var_expl
from mvpd.viz import viz_map
def run_L2_LR(model_type, sub, total_run, alpha, roidata_save_dir, roi_1_name, roi_2_name, filepath_func, filepath_mask1, filepath_mask2, results_save_dir, save_prediction):
# create output folder if not exists
if not os.path.exists(results_save_dir):
os.mkdir(results_save_dir)
for this_run in range(1, total_run+1):
print("test run:", this_run)
# Load functioanl data and ROI masks
# Training
roi_train = ROI_Dataset()
roi_train.get_train(roidata_save_dir, roi_1_name, roi_2_name, this_run, total_run)
ROI_1_train = roi_train[:]['ROI_1']
ROI_2_train = roi_train[:]['ROI_2']
# Testing
roi_test = ROI_Dataset()
roi_test.get_test(roidata_save_dir, roi_1_name, roi_2_name, this_run, total_run)
ROI_1_test = roi_test[:]['ROI_1']
ROI_2_test = roi_test[:]['ROI_2']
# L2 Regularized Linear Regression Model
predict_ROI_2_test, err_LR = L2_LR(ROI_1_train, ROI_2_train, ROI_1_test, ROI_2_test, alpha)
if save_prediction:
np.save(results_save_dir+sub+'_predict_ROI_2_'+model_type+'_testrun'+str(this_run)+'.npy', predict_ROI_2_test)
# Evaluation: variance explained
varexpl = var_expl.eval_var_expl(err_LR, ROI_2_test)
# Visualization
var_expl_map, var_expl_img = viz_map.cmetric_to_map(filepath_mask2, varexpl)
nib.save(var_expl_img, results_save_dir+sub+'_var_expl_map_'+model_type+'_testrun'+str(this_run)+'.nii.gz')
| en | 0.64219 | # MVPD - Regularized (L2) Linear Regression Model # create output folder if not exists # Load functioanl data and ROI masks # Training # Testing # L2 Regularized Linear Regression Model # Evaluation: variance explained # Visualization | 2.423099 | 2 |
src/openpersonen/api/tests/test_data/__init__.py | maykinmedia/open-personen | 2 | 6631141 | from .ingeschreven_persoon import * # noqa
from .kind import * # noqa
from .nationaliteit_historie import * # noqa
from .ouder import * # noqa
from .partner import * # noqa
from .partner_historie import * # noqa
from .verblijf_plaats_historie import * # noqa
from .verblijfs_titel_historie import * # noqa
| from .ingeschreven_persoon import * # noqa
from .kind import * # noqa
from .nationaliteit_historie import * # noqa
from .ouder import * # noqa
from .partner import * # noqa
from .partner_historie import * # noqa
from .verblijf_plaats_historie import * # noqa
from .verblijfs_titel_historie import * # noqa
| uz | 0.449824 | # noqa # noqa # noqa # noqa # noqa # noqa # noqa # noqa | 1.039865 | 1 |
mailqueue/tests/utils.py | winfieldco/django-mail-queue | 0 | 6631142 | <reponame>winfieldco/django-mail-queue
from ..models import MailerMessage
def create_email(**kwargs):
"""
Utility function to make creating MailerMessage instances easier when testing.
You can create a MailerMessage with default options in the database::
message = create_email()
You can override default options::
message = create_email(subject="My subject", content="My content")
You can also avoid saving to the database::
message = create_email(do_not_save=True)
"""
defaults = {
"subject": "Test email",
"to_address": "<EMAIL>",
"from_address": "<EMAIL>",
"content": "We are the Knights who say... NI.",
"app": "Test suite"
}
defaults.update(**kwargs)
if defaults.pop("do_not_save", False):
return MailerMessage(**defaults)
return MailerMessage.objects.create(**defaults) | from ..models import MailerMessage
def create_email(**kwargs):
"""
Utility function to make creating MailerMessage instances easier when testing.
You can create a MailerMessage with default options in the database::
message = create_email()
You can override default options::
message = create_email(subject="My subject", content="My content")
You can also avoid saving to the database::
message = create_email(do_not_save=True)
"""
defaults = {
"subject": "Test email",
"to_address": "<EMAIL>",
"from_address": "<EMAIL>",
"content": "We are the Knights who say... NI.",
"app": "Test suite"
}
defaults.update(**kwargs)
if defaults.pop("do_not_save", False):
return MailerMessage(**defaults)
return MailerMessage.objects.create(**defaults) | en | 0.39645 | Utility function to make creating MailerMessage instances easier when testing. You can create a MailerMessage with default options in the database:: message = create_email() You can override default options:: message = create_email(subject="My subject", content="My content") You can also avoid saving to the database:: message = create_email(do_not_save=True) | 2.832484 | 3 |
install/app_store/tk-multi-shotgunpanel/v1.4.9/python/app/action_manager.py | JoanAzpeitia/lp_sg | 0 | 6631143 | # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
import datetime
import os
import sys
from sgtk.platform.qt import QtCore, QtGui
from tank_vendor import shotgun_api3
from sgtk import TankError
class ActionManager(QtCore.QObject):
"""
Manager class that is used to generate action menus and dispatch action
execution into the various action hooks. This provides an interface between
the action hooks, action defs in the config, and the rest of the app.
"""
# emitted when the user requests a refresh via the actions system
refresh_request = QtCore.Signal()
# the area of the UI that an action is being requested/run for.
UI_AREA_MAIN = 0x1
UI_AREA_DETAILS = 0x2
def __init__(self, parent):
"""
Constructor
"""
QtCore.QObject.__init__(self, parent)
self._app = sgtk.platform.current_bundle()
def get_actions(self, sg_data, ui_area):
"""
Returns a list of actions for an entity
:param sg_data: Shotgun data for a publish
:param ui_area: Indicates which part of the UI the request is coming from.
Currently one of UI_AREA_MAIN, UI_AREA_DETAILS and UI_AREA_HISTORY
:returns: List of QAction objects, ready to be parented to some QT Widgetry.
"""
if sg_data is None:
return []
# check if we have logic configured to handle this
action_defs = []
all_mappings = self._app.get_setting("action_mappings")
if all_mappings.get(sg_data["type"]):
mappings = all_mappings[ sg_data["type"] ]
# this is now a list of items, each a dictioary
# with keys filters and actions
# [{'filters': {}, 'actions': ['assign_task']}]
# now cull out actions that don't match our filters
actions_to_evaluate = []
for mapping in mappings:
actions_def = mapping["actions"]
filters_def = mapping["filters"]
if filters_def is None or len(filters_def) == 0:
# no filters to consider
actions_to_evaluate.extend(actions_def)
else:
# filters are on the form
# field_name: value
for (field_name, field_value) in filters_def.iteritems():
# resolve linked fields into a string value
sg_value = sg_data.get(field_name)
if isinstance(sg_value, dict):
sg_value = sg_value.get("name")
# check if the filter is valid
if sg_value == field_value:
actions_to_evaluate.extend(actions_def)
if len(actions_to_evaluate) > 0:
# no actions to run through the hook
# cool so we have one or more actions
# call out to hook to give us the specifics.
# resolve UI area
if ui_area == self.UI_AREA_DETAILS:
ui_area_str = "details"
elif ui_area == self.UI_AREA_MAIN:
ui_area_str = "main"
else:
raise TankError("Unsupported UI_AREA. Contact support.")
# convert created_at unix time stamp to shotgun std time stamp
unix_timestamp = sg_data.get("created_at")
if unix_timestamp:
sg_timestamp = datetime.datetime.fromtimestamp(unix_timestamp,
shotgun_api3.sg_timezone.LocalTimezone())
sg_data["created_at"] = sg_timestamp
action_defs = []
try:
action_defs = self._app.execute_hook_method("actions_hook",
"generate_actions",
sg_data=sg_data,
actions=actions_to_evaluate,
ui_area=ui_area_str)
except Exception:
self._app.log_exception("Could not execute generate_actions hook.")
# create QActions
actions = []
for action_def in action_defs:
name = action_def["name"]
caption = action_def["caption"]
params = action_def["params"]
description = action_def["description"]
a = QtGui.QAction(caption, None)
a.setToolTip(description)
a.triggered[()].connect(lambda n=name, sg=sg_data, p=params: self._execute_hook(n, sg, p))
actions.append(a)
if ui_area == self.UI_AREA_DETAILS:
actions = self._get_default_detail_actions(sg_data) + actions
return actions
def _get_default_detail_actions(self, sg_data):
"""
Returns a list of default actions for the detail area
:param sg_data: Shotgun data directory
"""
refresh = QtGui.QAction("Refresh", None)
refresh.triggered[()].connect(lambda f=sg_data: self._refresh(f))
view_in_sg = QtGui.QAction("View in Shotgun", None)
view_in_sg.triggered[()].connect(lambda f=sg_data: self._show_in_sg(f))
copy_url = QtGui.QAction("Copy Shotgun url to clipboard", None)
copy_url.triggered[()].connect(lambda f=sg_data: self._copy_to_clipboard(f))
show_docs = QtGui.QAction("Documentation", None)
show_docs.triggered[()].connect(self._show_docs)
separator = QtGui.QAction(None)
separator.setSeparator(True)
return [refresh, view_in_sg, copy_url, show_docs, separator]
########################################################################################
# callbacks
def _execute_hook(self, action_name, sg_data, params):
"""
callback - executes a hook
:param action_name: Name of action to execute
:param sg_data: Shotgun data dictionary
:param params: action parameters passed in from the hook
"""
self._app.log_debug("Calling action hook for %s. "
"Params: %s. Sg data: %s" % (action_name, params, sg_data))
try:
self._app.execute_hook_method("actions_hook",
"execute_action",
name=action_name,
params=params,
sg_data=sg_data)
# refresh UI
self.refresh_request.emit()
except Exception, e:
self._app.log_exception("Could not execute execute_action hook.")
QtGui.QMessageBox.critical(None, "Action Error", "Error: %s" % e)
else:
self._app._log_metric_launched_action(action_name)
def _show_docs(self):
"""
Internal action callback - Launch app documentation
"""
self._app.log_debug("Opening url %s..." % self._app.documentation_url)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(self._app.documentation_url))
def _refresh(self, entity):
"""
Internal action callback - refreshes the main dialog UI
:param entity: std sg entity dict with keys type, id and name
"""
self.refresh_request.emit()
def _show_in_sg(self, entity):
"""
Internal action callback - Shows a shotgun entity in the web browser
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
def _copy_to_clipboard(self, entity):
"""
Internal action callback - copy shotgun url to clipboard
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
app = QtCore.QCoreApplication.instance()
app.clipboard().setText(url)
| # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
import datetime
import os
import sys
from sgtk.platform.qt import QtCore, QtGui
from tank_vendor import shotgun_api3
from sgtk import TankError
class ActionManager(QtCore.QObject):
"""
Manager class that is used to generate action menus and dispatch action
execution into the various action hooks. This provides an interface between
the action hooks, action defs in the config, and the rest of the app.
"""
# emitted when the user requests a refresh via the actions system
refresh_request = QtCore.Signal()
# the area of the UI that an action is being requested/run for.
UI_AREA_MAIN = 0x1
UI_AREA_DETAILS = 0x2
def __init__(self, parent):
"""
Constructor
"""
QtCore.QObject.__init__(self, parent)
self._app = sgtk.platform.current_bundle()
def get_actions(self, sg_data, ui_area):
"""
Returns a list of actions for an entity
:param sg_data: Shotgun data for a publish
:param ui_area: Indicates which part of the UI the request is coming from.
Currently one of UI_AREA_MAIN, UI_AREA_DETAILS and UI_AREA_HISTORY
:returns: List of QAction objects, ready to be parented to some QT Widgetry.
"""
if sg_data is None:
return []
# check if we have logic configured to handle this
action_defs = []
all_mappings = self._app.get_setting("action_mappings")
if all_mappings.get(sg_data["type"]):
mappings = all_mappings[ sg_data["type"] ]
# this is now a list of items, each a dictioary
# with keys filters and actions
# [{'filters': {}, 'actions': ['assign_task']}]
# now cull out actions that don't match our filters
actions_to_evaluate = []
for mapping in mappings:
actions_def = mapping["actions"]
filters_def = mapping["filters"]
if filters_def is None or len(filters_def) == 0:
# no filters to consider
actions_to_evaluate.extend(actions_def)
else:
# filters are on the form
# field_name: value
for (field_name, field_value) in filters_def.iteritems():
# resolve linked fields into a string value
sg_value = sg_data.get(field_name)
if isinstance(sg_value, dict):
sg_value = sg_value.get("name")
# check if the filter is valid
if sg_value == field_value:
actions_to_evaluate.extend(actions_def)
if len(actions_to_evaluate) > 0:
# no actions to run through the hook
# cool so we have one or more actions
# call out to hook to give us the specifics.
# resolve UI area
if ui_area == self.UI_AREA_DETAILS:
ui_area_str = "details"
elif ui_area == self.UI_AREA_MAIN:
ui_area_str = "main"
else:
raise TankError("Unsupported UI_AREA. Contact support.")
# convert created_at unix time stamp to shotgun std time stamp
unix_timestamp = sg_data.get("created_at")
if unix_timestamp:
sg_timestamp = datetime.datetime.fromtimestamp(unix_timestamp,
shotgun_api3.sg_timezone.LocalTimezone())
sg_data["created_at"] = sg_timestamp
action_defs = []
try:
action_defs = self._app.execute_hook_method("actions_hook",
"generate_actions",
sg_data=sg_data,
actions=actions_to_evaluate,
ui_area=ui_area_str)
except Exception:
self._app.log_exception("Could not execute generate_actions hook.")
# create QActions
actions = []
for action_def in action_defs:
name = action_def["name"]
caption = action_def["caption"]
params = action_def["params"]
description = action_def["description"]
a = QtGui.QAction(caption, None)
a.setToolTip(description)
a.triggered[()].connect(lambda n=name, sg=sg_data, p=params: self._execute_hook(n, sg, p))
actions.append(a)
if ui_area == self.UI_AREA_DETAILS:
actions = self._get_default_detail_actions(sg_data) + actions
return actions
def _get_default_detail_actions(self, sg_data):
"""
Returns a list of default actions for the detail area
:param sg_data: Shotgun data directory
"""
refresh = QtGui.QAction("Refresh", None)
refresh.triggered[()].connect(lambda f=sg_data: self._refresh(f))
view_in_sg = QtGui.QAction("View in Shotgun", None)
view_in_sg.triggered[()].connect(lambda f=sg_data: self._show_in_sg(f))
copy_url = QtGui.QAction("Copy Shotgun url to clipboard", None)
copy_url.triggered[()].connect(lambda f=sg_data: self._copy_to_clipboard(f))
show_docs = QtGui.QAction("Documentation", None)
show_docs.triggered[()].connect(self._show_docs)
separator = QtGui.QAction(None)
separator.setSeparator(True)
return [refresh, view_in_sg, copy_url, show_docs, separator]
########################################################################################
# callbacks
def _execute_hook(self, action_name, sg_data, params):
"""
callback - executes a hook
:param action_name: Name of action to execute
:param sg_data: Shotgun data dictionary
:param params: action parameters passed in from the hook
"""
self._app.log_debug("Calling action hook for %s. "
"Params: %s. Sg data: %s" % (action_name, params, sg_data))
try:
self._app.execute_hook_method("actions_hook",
"execute_action",
name=action_name,
params=params,
sg_data=sg_data)
# refresh UI
self.refresh_request.emit()
except Exception, e:
self._app.log_exception("Could not execute execute_action hook.")
QtGui.QMessageBox.critical(None, "Action Error", "Error: %s" % e)
else:
self._app._log_metric_launched_action(action_name)
def _show_docs(self):
"""
Internal action callback - Launch app documentation
"""
self._app.log_debug("Opening url %s..." % self._app.documentation_url)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(self._app.documentation_url))
def _refresh(self, entity):
"""
Internal action callback - refreshes the main dialog UI
:param entity: std sg entity dict with keys type, id and name
"""
self.refresh_request.emit()
def _show_in_sg(self, entity):
"""
Internal action callback - Shows a shotgun entity in the web browser
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
QtGui.QDesktopServices.openUrl(QtCore.QUrl(url))
def _copy_to_clipboard(self, entity):
"""
Internal action callback - copy shotgun url to clipboard
:param entity: std sg entity dict with keys type, id and name
"""
url = "%s/detail/%s/%d" % (self._app.sgtk.shotgun.base_url, entity["type"], entity["id"])
app = QtCore.QCoreApplication.instance()
app.clipboard().setText(url)
| en | 0.755321 | # Copyright (c) 2015 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. Manager class that is used to generate action menus and dispatch action execution into the various action hooks. This provides an interface between the action hooks, action defs in the config, and the rest of the app. # emitted when the user requests a refresh via the actions system # the area of the UI that an action is being requested/run for. Constructor Returns a list of actions for an entity :param sg_data: Shotgun data for a publish :param ui_area: Indicates which part of the UI the request is coming from. Currently one of UI_AREA_MAIN, UI_AREA_DETAILS and UI_AREA_HISTORY :returns: List of QAction objects, ready to be parented to some QT Widgetry. # check if we have logic configured to handle this # this is now a list of items, each a dictioary # with keys filters and actions # [{'filters': {}, 'actions': ['assign_task']}] # now cull out actions that don't match our filters # no filters to consider # filters are on the form # field_name: value # resolve linked fields into a string value # check if the filter is valid # no actions to run through the hook # cool so we have one or more actions # call out to hook to give us the specifics. # resolve UI area # convert created_at unix time stamp to shotgun std time stamp # create QActions Returns a list of default actions for the detail area :param sg_data: Shotgun data directory ######################################################################################## # callbacks callback - executes a hook :param action_name: Name of action to execute :param sg_data: Shotgun data dictionary :param params: action parameters passed in from the hook # refresh UI Internal action callback - Launch app documentation Internal action callback - refreshes the main dialog UI :param entity: std sg entity dict with keys type, id and name Internal action callback - Shows a shotgun entity in the web browser :param entity: std sg entity dict with keys type, id and name Internal action callback - copy shotgun url to clipboard :param entity: std sg entity dict with keys type, id and name | 1.894489 | 2 |
env/Lib/site-packages/google_images_search/fetch_resize_save.py | theXtroyer1221/Climate-luft | 1 | 6631144 | <filename>env/Lib/site-packages/google_images_search/fetch_resize_save.py
import os
import curses
import requests
import threading
from PIL import Image
from resizeimage import resizeimage, imageexceptions
from .google_api import GoogleCustomSearch
IMAGES_NUM_LIMIT = 10
class FetchResizeSave(object):
"""Class with resizing and downloading logic"""
def __init__(self, developer_key, custom_search_cx,
progressbar_fn=None, progress=False, validate_images=True):
# initialise google api
self._google_custom_search = GoogleCustomSearch(
developer_key, custom_search_cx, self)
self._search_result = []
self.validate_images = validate_images
self._stdscr = None
self._progress = False
self._chunk_sizes = {}
self.zero_return = False
self._terminal_lines = {}
self._search_again = False
self._download_progress = {}
self._custom_image_name = None
self._report_progress = progressbar_fn
self._set_data()
self._page = 1
self._number_of_images = None
if progressbar_fn:
# user nserted progressbar fn
self._progress = True
else:
if progress:
# initialise internal progressbar
self._progress = True
self._stdscr = curses.initscr()
self._report_progress = self.__report_progress
def _set_data(self, search_params=None, path_to_dir=False,
width=None, height=None, cache_discovery=True):
"""Set data for Google api search, save and resize
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:return: None
"""
self._width = width
self._height = height
self._path_to_dir = path_to_dir
self._search_params = search_params
self._cache_discovery = cache_discovery
def _get_data(self):
"""Get data for Google api search, save and resize
:return: tuple
"""
return self._search_params, \
self._path_to_dir, \
self._width,\
self._height,\
self._cache_discovery
def search(self, search_params, path_to_dir=False, width=None,
height=None, custom_image_name=None, cache_discovery=False):
"""Fetched images using Google API and does the download and resize
if path_to_dir and width and height variables are provided.
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:param custom_image_name: define custom filename
:return: None
"""
self._custom_image_name = custom_image_name
if not self._search_again:
self._set_data(
search_params, path_to_dir, width, height, cache_discovery
)
self._search_result = []
# number of images required from lib user is important
# save it only when searching for the first time
if not self._number_of_images:
self._number_of_images = search_params.get('num') or 1
start = self._number_of_images * (self._page - 1)
end = self._number_of_images * self._page
for i, page in enumerate(range(start, end, IMAGES_NUM_LIMIT)):
start = page+1
if self._number_of_images >= IMAGES_NUM_LIMIT*(i+1):
num = IMAGES_NUM_LIMIT
else:
num = (self._number_of_images % IMAGES_NUM_LIMIT) or \
self._number_of_images
self._search_params['start'] = start
self._search_params['num'] = num
self._search_images(*self._get_data())
if len(self._search_result) >= self._number_of_images or self.zero_return:
break
else:
# run search again if validation removed some images
# and desired number of images is not reached
self.next_page(search_again=True)
self._search_result = self._search_result[:self._number_of_images]
def _search_images(self, search_params, path_to_dir=False, width=None,
height=None, cache_discovery=False):
"""Fetched images using Google API and does the download and resize
if path_to_dir and width and height variables are provided.
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:return: None
"""
i = 0
threads = []
for url in self._google_custom_search.search(
search_params, cache_discovery
):
# initialise image object
image = GSImage(self)
image.url = url
# set thread safe variables
self._download_progress[url] = 0
self._terminal_lines[url] = i
i += 2
# set thread with function and arguments
thread = threading.Thread(
target=self._download_and_resize,
args=(path_to_dir, image, width, height)
)
# start thread
thread.start()
# register thread
threads.append(thread)
# wait for all threads to end here
for thread in threads:
thread.join()
if self._progress:
if self._stdscr:
curses.endwin()
def next_page(self, search_again=False):
"""Get next batch of images.
Number of images is defined with num search parameter.
:return: search results
"""
self._page += 1
self._search_again = search_again
self.search(*self._get_data())
def set_chunk_size(self, url, content_size):
"""Set images chunk size according to its size
:param url: image url
:param content_size: image size
:return: None
"""
self._chunk_sizes[url] = int(int(content_size) / 100) + 1
def _download_and_resize(self, path_to_dir, image, width, height):
"""Method used for threading
:param path_to_dir: path to download dir
:param image: image object
:param width: crop width
:param height: crop height
:return: None
"""
if path_to_dir:
image.download(path_to_dir)
if width and height:
try:
image.resize(width, height)
except imageexceptions.ImageSizeError:
pass
self._search_result.append(image)
def results(self):
"""Returns objects of downloaded images
:return: list
"""
return self._search_result
def download(self, url, path_to_dir):
"""Downloads image from url to path dir
Used only by GSImage class
:param url: image url
:param path_to_dir: path to directory where image should be saved
:return: path to image
"""
if not os.path.exists(path_to_dir):
os.makedirs(path_to_dir)
raw_filename = url.split('/')[-1].split('?')[0]
basename, _ = os.path.splitext(raw_filename)
ext = '.jpg'
if self._custom_image_name:
def increment_naming(dir_list, name, number=0):
if number:
file_name = ''.join([name, '(', str(number), ')', ext])
else:
file_name = ''.join([name, ext])
if file_name in dir_list:
return increment_naming(dir_list, name, number+1)
else:
return file_name
basename = increment_naming(
os.listdir(path_to_dir), self._custom_image_name)
else:
basename = basename + ext
path_to_image = os.path.join(path_to_dir, basename)
with open(path_to_image, 'wb') as f:
for chunk in self.get_raw_data(url):
f.write(chunk)
Image.open(path_to_image).convert('RGB').save(path_to_image, 'jpeg')
return path_to_image
def get_raw_data(self, url):
"""Generator method for downloading images in chunks
:param url: url to image
:return: raw image data
"""
with requests.get(url, stream=True) as req:
for chunk in req.iter_content(chunk_size=self._chunk_sizes.get(url)):
# filter out keep-alive new chunks
if chunk:
# report progress
if self._progress:
self._download_progress[url] += 1
if self._download_progress[url] <= 100:
self._report_progress(url, self._download_progress[url])
yield chunk
@staticmethod
def resize(path_to_image, width, height):
"""Resize the image and save it again.
:param path_to_image: os.path
:param width: int
:param height: int
:return: None
"""
fd_img = open(path_to_image, 'rb')
img = Image.open(fd_img)
img = resizeimage.resize_cover(img, [int(width), int(height)])
img.save(path_to_image, img.format)
fd_img.close()
def __report_progress(self, url, progress):
"""Prints a progress bar in terminal
:param url:
:param progress:
:return:
"""
self._stdscr.addstr(
self._terminal_lines[url], 0, "Downloading file: {0}".format(url)
)
self._stdscr.addstr(
self._terminal_lines[url] + 1, 0,
"Progress: [{1:100}] {0}%".format(progress, "#" * progress)
)
self._stdscr.refresh()
class GSImage(object):
"""Class for handling one image"""
def __init__(self, fetch_resize_save):
self._fetch_resize_save = fetch_resize_save
self._url = None
self._path = None
self.resized = False
@property
def url(self):
"""Returns the image url
:return: url
"""
return self._url
@url.setter
def url(self, image_url):
"""Sets the image url
:param image_url: url
:return: None
"""
self._url = image_url
@property
def path(self):
"""Returns image path
:return: path
"""
return self._path
@path.setter
def path(self, image_path):
"""Sets image path
:param image_path: path
:return: None
"""
self._path = image_path
def download(self, path_to_dir):
"""Downloads image from url to path
:param path_to_dir: path
:return: None
"""
self._path = self._fetch_resize_save.download(self._url, path_to_dir)
def get_raw_data(self):
"""Gets images raw data
:return: raw data
"""
return b''.join(list(self._fetch_resize_save.get_raw_data(self._url)))
def copy_to(self, obj, raw_data=None):
"""Copies raw image data to another object, preferably BytesIO
:param obj: BytesIO
:param raw_data: raw data
:return: None
"""
if not raw_data:
raw_data = self.get_raw_data()
obj.write(raw_data)
def resize(self, width, height):
"""Resize the image
:param width: int
:param height: int
:return: None
"""
self._fetch_resize_save.__class__.resize(self._path, width, height)
self.resized = True
| <filename>env/Lib/site-packages/google_images_search/fetch_resize_save.py
import os
import curses
import requests
import threading
from PIL import Image
from resizeimage import resizeimage, imageexceptions
from .google_api import GoogleCustomSearch
IMAGES_NUM_LIMIT = 10
class FetchResizeSave(object):
"""Class with resizing and downloading logic"""
def __init__(self, developer_key, custom_search_cx,
progressbar_fn=None, progress=False, validate_images=True):
# initialise google api
self._google_custom_search = GoogleCustomSearch(
developer_key, custom_search_cx, self)
self._search_result = []
self.validate_images = validate_images
self._stdscr = None
self._progress = False
self._chunk_sizes = {}
self.zero_return = False
self._terminal_lines = {}
self._search_again = False
self._download_progress = {}
self._custom_image_name = None
self._report_progress = progressbar_fn
self._set_data()
self._page = 1
self._number_of_images = None
if progressbar_fn:
# user nserted progressbar fn
self._progress = True
else:
if progress:
# initialise internal progressbar
self._progress = True
self._stdscr = curses.initscr()
self._report_progress = self.__report_progress
def _set_data(self, search_params=None, path_to_dir=False,
width=None, height=None, cache_discovery=True):
"""Set data for Google api search, save and resize
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:return: None
"""
self._width = width
self._height = height
self._path_to_dir = path_to_dir
self._search_params = search_params
self._cache_discovery = cache_discovery
def _get_data(self):
"""Get data for Google api search, save and resize
:return: tuple
"""
return self._search_params, \
self._path_to_dir, \
self._width,\
self._height,\
self._cache_discovery
def search(self, search_params, path_to_dir=False, width=None,
height=None, custom_image_name=None, cache_discovery=False):
"""Fetched images using Google API and does the download and resize
if path_to_dir and width and height variables are provided.
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:param custom_image_name: define custom filename
:return: None
"""
self._custom_image_name = custom_image_name
if not self._search_again:
self._set_data(
search_params, path_to_dir, width, height, cache_discovery
)
self._search_result = []
# number of images required from lib user is important
# save it only when searching for the first time
if not self._number_of_images:
self._number_of_images = search_params.get('num') or 1
start = self._number_of_images * (self._page - 1)
end = self._number_of_images * self._page
for i, page in enumerate(range(start, end, IMAGES_NUM_LIMIT)):
start = page+1
if self._number_of_images >= IMAGES_NUM_LIMIT*(i+1):
num = IMAGES_NUM_LIMIT
else:
num = (self._number_of_images % IMAGES_NUM_LIMIT) or \
self._number_of_images
self._search_params['start'] = start
self._search_params['num'] = num
self._search_images(*self._get_data())
if len(self._search_result) >= self._number_of_images or self.zero_return:
break
else:
# run search again if validation removed some images
# and desired number of images is not reached
self.next_page(search_again=True)
self._search_result = self._search_result[:self._number_of_images]
def _search_images(self, search_params, path_to_dir=False, width=None,
height=None, cache_discovery=False):
"""Fetched images using Google API and does the download and resize
if path_to_dir and width and height variables are provided.
:param search_params: parameters for Google API Search
:param path_to_dir: path where the images should be downloaded
:param width: crop width of the images
:param height: crop height of the images
:param cache_discovery: whether or not to cache the discovery doc
:return: None
"""
i = 0
threads = []
for url in self._google_custom_search.search(
search_params, cache_discovery
):
# initialise image object
image = GSImage(self)
image.url = url
# set thread safe variables
self._download_progress[url] = 0
self._terminal_lines[url] = i
i += 2
# set thread with function and arguments
thread = threading.Thread(
target=self._download_and_resize,
args=(path_to_dir, image, width, height)
)
# start thread
thread.start()
# register thread
threads.append(thread)
# wait for all threads to end here
for thread in threads:
thread.join()
if self._progress:
if self._stdscr:
curses.endwin()
def next_page(self, search_again=False):
"""Get next batch of images.
Number of images is defined with num search parameter.
:return: search results
"""
self._page += 1
self._search_again = search_again
self.search(*self._get_data())
def set_chunk_size(self, url, content_size):
"""Set images chunk size according to its size
:param url: image url
:param content_size: image size
:return: None
"""
self._chunk_sizes[url] = int(int(content_size) / 100) + 1
def _download_and_resize(self, path_to_dir, image, width, height):
"""Method used for threading
:param path_to_dir: path to download dir
:param image: image object
:param width: crop width
:param height: crop height
:return: None
"""
if path_to_dir:
image.download(path_to_dir)
if width and height:
try:
image.resize(width, height)
except imageexceptions.ImageSizeError:
pass
self._search_result.append(image)
def results(self):
"""Returns objects of downloaded images
:return: list
"""
return self._search_result
def download(self, url, path_to_dir):
"""Downloads image from url to path dir
Used only by GSImage class
:param url: image url
:param path_to_dir: path to directory where image should be saved
:return: path to image
"""
if not os.path.exists(path_to_dir):
os.makedirs(path_to_dir)
raw_filename = url.split('/')[-1].split('?')[0]
basename, _ = os.path.splitext(raw_filename)
ext = '.jpg'
if self._custom_image_name:
def increment_naming(dir_list, name, number=0):
if number:
file_name = ''.join([name, '(', str(number), ')', ext])
else:
file_name = ''.join([name, ext])
if file_name in dir_list:
return increment_naming(dir_list, name, number+1)
else:
return file_name
basename = increment_naming(
os.listdir(path_to_dir), self._custom_image_name)
else:
basename = basename + ext
path_to_image = os.path.join(path_to_dir, basename)
with open(path_to_image, 'wb') as f:
for chunk in self.get_raw_data(url):
f.write(chunk)
Image.open(path_to_image).convert('RGB').save(path_to_image, 'jpeg')
return path_to_image
def get_raw_data(self, url):
"""Generator method for downloading images in chunks
:param url: url to image
:return: raw image data
"""
with requests.get(url, stream=True) as req:
for chunk in req.iter_content(chunk_size=self._chunk_sizes.get(url)):
# filter out keep-alive new chunks
if chunk:
# report progress
if self._progress:
self._download_progress[url] += 1
if self._download_progress[url] <= 100:
self._report_progress(url, self._download_progress[url])
yield chunk
@staticmethod
def resize(path_to_image, width, height):
"""Resize the image and save it again.
:param path_to_image: os.path
:param width: int
:param height: int
:return: None
"""
fd_img = open(path_to_image, 'rb')
img = Image.open(fd_img)
img = resizeimage.resize_cover(img, [int(width), int(height)])
img.save(path_to_image, img.format)
fd_img.close()
def __report_progress(self, url, progress):
"""Prints a progress bar in terminal
:param url:
:param progress:
:return:
"""
self._stdscr.addstr(
self._terminal_lines[url], 0, "Downloading file: {0}".format(url)
)
self._stdscr.addstr(
self._terminal_lines[url] + 1, 0,
"Progress: [{1:100}] {0}%".format(progress, "#" * progress)
)
self._stdscr.refresh()
class GSImage(object):
"""Class for handling one image"""
def __init__(self, fetch_resize_save):
self._fetch_resize_save = fetch_resize_save
self._url = None
self._path = None
self.resized = False
@property
def url(self):
"""Returns the image url
:return: url
"""
return self._url
@url.setter
def url(self, image_url):
"""Sets the image url
:param image_url: url
:return: None
"""
self._url = image_url
@property
def path(self):
"""Returns image path
:return: path
"""
return self._path
@path.setter
def path(self, image_path):
"""Sets image path
:param image_path: path
:return: None
"""
self._path = image_path
def download(self, path_to_dir):
"""Downloads image from url to path
:param path_to_dir: path
:return: None
"""
self._path = self._fetch_resize_save.download(self._url, path_to_dir)
def get_raw_data(self):
"""Gets images raw data
:return: raw data
"""
return b''.join(list(self._fetch_resize_save.get_raw_data(self._url)))
def copy_to(self, obj, raw_data=None):
"""Copies raw image data to another object, preferably BytesIO
:param obj: BytesIO
:param raw_data: raw data
:return: None
"""
if not raw_data:
raw_data = self.get_raw_data()
obj.write(raw_data)
def resize(self, width, height):
"""Resize the image
:param width: int
:param height: int
:return: None
"""
self._fetch_resize_save.__class__.resize(self._path, width, height)
self.resized = True
| en | 0.693213 | Class with resizing and downloading logic # initialise google api # user nserted progressbar fn # initialise internal progressbar Set data for Google api search, save and resize :param search_params: parameters for Google API Search :param path_to_dir: path where the images should be downloaded :param width: crop width of the images :param height: crop height of the images :param cache_discovery: whether or not to cache the discovery doc :return: None Get data for Google api search, save and resize :return: tuple Fetched images using Google API and does the download and resize if path_to_dir and width and height variables are provided. :param search_params: parameters for Google API Search :param path_to_dir: path where the images should be downloaded :param width: crop width of the images :param height: crop height of the images :param cache_discovery: whether or not to cache the discovery doc :param custom_image_name: define custom filename :return: None # number of images required from lib user is important # save it only when searching for the first time # run search again if validation removed some images # and desired number of images is not reached Fetched images using Google API and does the download and resize if path_to_dir and width and height variables are provided. :param search_params: parameters for Google API Search :param path_to_dir: path where the images should be downloaded :param width: crop width of the images :param height: crop height of the images :param cache_discovery: whether or not to cache the discovery doc :return: None # initialise image object # set thread safe variables # set thread with function and arguments # start thread # register thread # wait for all threads to end here Get next batch of images. Number of images is defined with num search parameter. :return: search results Set images chunk size according to its size :param url: image url :param content_size: image size :return: None Method used for threading :param path_to_dir: path to download dir :param image: image object :param width: crop width :param height: crop height :return: None Returns objects of downloaded images :return: list Downloads image from url to path dir Used only by GSImage class :param url: image url :param path_to_dir: path to directory where image should be saved :return: path to image Generator method for downloading images in chunks :param url: url to image :return: raw image data # filter out keep-alive new chunks # report progress Resize the image and save it again. :param path_to_image: os.path :param width: int :param height: int :return: None Prints a progress bar in terminal :param url: :param progress: :return: Class for handling one image Returns the image url :return: url Sets the image url :param image_url: url :return: None Returns image path :return: path Sets image path :param image_path: path :return: None Downloads image from url to path :param path_to_dir: path :return: None Gets images raw data :return: raw data Copies raw image data to another object, preferably BytesIO :param obj: BytesIO :param raw_data: raw data :return: None Resize the image :param width: int :param height: int :return: None | 2.765059 | 3 |
setup.py | Virtlink/mavenpy | 0 | 6631145 | <gh_stars>0
from setuptools import setup
setup(
name='mavenpy',
version='0.1.2',
description='Wrapper for calling Maven from Python',
url='http://github.com/Gohla/mavenpy',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
packages=['mavenpy'],
install_requires=['pystache'],
test_suite='nose.collector',
tests_require=['nose']
)
| from setuptools import setup
setup(
name='mavenpy',
version='0.1.2',
description='Wrapper for calling Maven from Python',
url='http://github.com/Gohla/mavenpy',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
packages=['mavenpy'],
install_requires=['pystache'],
test_suite='nose.collector',
tests_require=['nose']
) | none | 1 | 1.158213 | 1 |
|
junior/packg/__init__.py | Firekiss/python_learn | 0 | 6631146 | <gh_stars>0
from . import ali_pay | from . import ali_pay | none | 1 | 1.02841 | 1 |
|
indy_common/test/test_transactions.py | anikitinDSR/indy-node-jenkins-test | 0 | 6631147 | from indy_common.constants import NYM, NODE, ATTRIB, SCHEMA, CLAIM_DEF, DISCLO, GET_ATTR, GET_NYM, GET_TXNS, \
GET_SCHEMA, GET_CLAIM_DEF, POOL_UPGRADE, NODE_UPGRADE, POOL_CONFIG, REVOC_REG_DEF, REVOC_REG_ENTRY, \
GET_REVOC_REG_DEF, GET_REVOC_REG, GET_REVOC_REG_DELTA, POOL_RESTART, VALIDATOR_INFO, CHANGE_KEY, AUTH_RULE
from indy_common.transactions import IndyTransactions
def testTransactionsAreEncoded():
assert NODE == "0"
assert NYM == "1"
assert GET_TXNS == "3"
assert ATTRIB == "100"
assert SCHEMA == "101"
assert CLAIM_DEF == "102"
assert DISCLO == "103"
assert GET_ATTR == "104"
assert GET_NYM == "105"
assert GET_SCHEMA == "107"
assert GET_CLAIM_DEF == "108"
assert POOL_UPGRADE == "109"
assert NODE_UPGRADE == "110"
assert POOL_CONFIG == "111"
assert CHANGE_KEY == "112"
assert REVOC_REG_DEF == "113"
assert REVOC_REG_ENTRY == "114"
assert GET_REVOC_REG_DEF == "115"
assert GET_REVOC_REG == "116"
assert GET_REVOC_REG_DELTA == "117"
assert POOL_RESTART == "118"
assert VALIDATOR_INFO == "119"
assert AUTH_RULE == "120"
def testTransactionEnumDecoded():
assert IndyTransactions.NODE.name == "NODE"
assert IndyTransactions.NYM.name == "NYM"
assert IndyTransactions.ATTRIB.name == "ATTRIB"
assert IndyTransactions.SCHEMA.name == "SCHEMA"
assert IndyTransactions.CLAIM_DEF.name == "CLAIM_DEF"
assert IndyTransactions.DISCLO.name == "DISCLO"
assert IndyTransactions.GET_ATTR.name == "GET_ATTR"
assert IndyTransactions.GET_NYM.name == "GET_NYM"
assert IndyTransactions.GET_TXNS.name == "GET_TXNS"
assert IndyTransactions.GET_SCHEMA.name == "GET_SCHEMA"
assert IndyTransactions.GET_CLAIM_DEF.name == "GET_CLAIM_DEF"
assert IndyTransactions.POOL_UPGRADE.name == "POOL_UPGRADE"
assert IndyTransactions.NODE_UPGRADE.name == "NODE_UPGRADE"
assert IndyTransactions.POOL_CONFIG.name == "POOL_CONFIG"
assert IndyTransactions.POOL_RESTART.name == "POOL_RESTART"
assert IndyTransactions.CHANGE_KEY.name == "CHANGE_KEY"
assert IndyTransactions.REVOC_REG_DEF.name == "REVOC_REG_DEF"
assert IndyTransactions.REVOC_REG_ENTRY.name == "REVOC_REG_ENTRY"
assert IndyTransactions.GET_REVOC_REG_DEF.name == "GET_REVOC_REG_DEF"
assert IndyTransactions.GET_REVOC_REG.name == "GET_REVOC_REG"
assert IndyTransactions.GET_REVOC_REG_DELTA.name == "GET_REVOC_REG_DELTA"
assert IndyTransactions.VALIDATOR_INFO.name == "VALIDATOR_INFO"
def testTransactionEnumEncoded():
assert IndyTransactions.NODE.value == "0"
assert IndyTransactions.NYM.value == "1"
assert IndyTransactions.GET_TXNS.value == "3"
assert IndyTransactions.ATTRIB.value == "100"
assert IndyTransactions.SCHEMA.value == "101"
assert IndyTransactions.CLAIM_DEF.value == "102"
assert IndyTransactions.DISCLO.value == "103"
assert IndyTransactions.GET_ATTR.value == "104"
assert IndyTransactions.GET_NYM.value == "105"
assert IndyTransactions.GET_SCHEMA.value == "107"
assert IndyTransactions.GET_CLAIM_DEF.value == "108"
assert IndyTransactions.POOL_UPGRADE.value == "109"
assert IndyTransactions.NODE_UPGRADE.value == "110"
assert IndyTransactions.POOL_CONFIG.value == "111"
assert IndyTransactions.CHANGE_KEY.value == "112"
assert IndyTransactions.REVOC_REG_DEF.value == "113"
assert IndyTransactions.REVOC_REG_ENTRY.value == "114"
assert IndyTransactions.GET_REVOC_REG_DEF.value == "115"
assert IndyTransactions.GET_REVOC_REG.value == "116"
assert IndyTransactions.GET_REVOC_REG_DELTA.value == "117"
assert IndyTransactions.POOL_RESTART.value == "118"
assert IndyTransactions.VALIDATOR_INFO.value == "119"
def test_get_name_from_code():
assert IndyTransactions.get_name_from_code(IndyTransactions.NODE.value) == "NODE"
assert IndyTransactions.get_name_from_code(IndyTransactions.NYM.value) == "NYM"
assert IndyTransactions.get_name_from_code(IndyTransactions.ATTRIB.value) == "ATTRIB"
assert IndyTransactions.get_name_from_code(IndyTransactions.SCHEMA.value) == "SCHEMA"
assert IndyTransactions.get_name_from_code(IndyTransactions.CLAIM_DEF.value) == "CLAIM_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.DISCLO.value) == "DISCLO"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_ATTR.value) == "GET_ATTR"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_NYM.value) == "GET_NYM"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_TXNS.value) == "GET_TXNS"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_SCHEMA.value) == "GET_SCHEMA"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_CLAIM_DEF.value) == "GET_CLAIM_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_UPGRADE.value) == "POOL_UPGRADE"
assert IndyTransactions.get_name_from_code(IndyTransactions.NODE_UPGRADE.value) == "NODE_UPGRADE"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_CONFIG.value) == "POOL_CONFIG"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_RESTART.value) == "POOL_RESTART"
assert IndyTransactions.get_name_from_code(IndyTransactions.CHANGE_KEY.value) == "CHANGE_KEY"
assert IndyTransactions.get_name_from_code(IndyTransactions.REVOC_REG_DEF.value) == "REVOC_REG_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.REVOC_REG_ENTRY.value) == "REVOC_REG_ENTRY"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG_DEF.value) == "GET_REVOC_REG_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG.value) == "GET_REVOC_REG"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG_DELTA.value) == "GET_REVOC_REG_DELTA"
assert IndyTransactions.get_name_from_code(IndyTransactions.VALIDATOR_INFO.value) == "VALIDATOR_INFO"
assert IndyTransactions.get_name_from_code("some_unexpected_code") == "Unknown_transaction_type"
| from indy_common.constants import NYM, NODE, ATTRIB, SCHEMA, CLAIM_DEF, DISCLO, GET_ATTR, GET_NYM, GET_TXNS, \
GET_SCHEMA, GET_CLAIM_DEF, POOL_UPGRADE, NODE_UPGRADE, POOL_CONFIG, REVOC_REG_DEF, REVOC_REG_ENTRY, \
GET_REVOC_REG_DEF, GET_REVOC_REG, GET_REVOC_REG_DELTA, POOL_RESTART, VALIDATOR_INFO, CHANGE_KEY, AUTH_RULE
from indy_common.transactions import IndyTransactions
def testTransactionsAreEncoded():
assert NODE == "0"
assert NYM == "1"
assert GET_TXNS == "3"
assert ATTRIB == "100"
assert SCHEMA == "101"
assert CLAIM_DEF == "102"
assert DISCLO == "103"
assert GET_ATTR == "104"
assert GET_NYM == "105"
assert GET_SCHEMA == "107"
assert GET_CLAIM_DEF == "108"
assert POOL_UPGRADE == "109"
assert NODE_UPGRADE == "110"
assert POOL_CONFIG == "111"
assert CHANGE_KEY == "112"
assert REVOC_REG_DEF == "113"
assert REVOC_REG_ENTRY == "114"
assert GET_REVOC_REG_DEF == "115"
assert GET_REVOC_REG == "116"
assert GET_REVOC_REG_DELTA == "117"
assert POOL_RESTART == "118"
assert VALIDATOR_INFO == "119"
assert AUTH_RULE == "120"
def testTransactionEnumDecoded():
assert IndyTransactions.NODE.name == "NODE"
assert IndyTransactions.NYM.name == "NYM"
assert IndyTransactions.ATTRIB.name == "ATTRIB"
assert IndyTransactions.SCHEMA.name == "SCHEMA"
assert IndyTransactions.CLAIM_DEF.name == "CLAIM_DEF"
assert IndyTransactions.DISCLO.name == "DISCLO"
assert IndyTransactions.GET_ATTR.name == "GET_ATTR"
assert IndyTransactions.GET_NYM.name == "GET_NYM"
assert IndyTransactions.GET_TXNS.name == "GET_TXNS"
assert IndyTransactions.GET_SCHEMA.name == "GET_SCHEMA"
assert IndyTransactions.GET_CLAIM_DEF.name == "GET_CLAIM_DEF"
assert IndyTransactions.POOL_UPGRADE.name == "POOL_UPGRADE"
assert IndyTransactions.NODE_UPGRADE.name == "NODE_UPGRADE"
assert IndyTransactions.POOL_CONFIG.name == "POOL_CONFIG"
assert IndyTransactions.POOL_RESTART.name == "POOL_RESTART"
assert IndyTransactions.CHANGE_KEY.name == "CHANGE_KEY"
assert IndyTransactions.REVOC_REG_DEF.name == "REVOC_REG_DEF"
assert IndyTransactions.REVOC_REG_ENTRY.name == "REVOC_REG_ENTRY"
assert IndyTransactions.GET_REVOC_REG_DEF.name == "GET_REVOC_REG_DEF"
assert IndyTransactions.GET_REVOC_REG.name == "GET_REVOC_REG"
assert IndyTransactions.GET_REVOC_REG_DELTA.name == "GET_REVOC_REG_DELTA"
assert IndyTransactions.VALIDATOR_INFO.name == "VALIDATOR_INFO"
def testTransactionEnumEncoded():
assert IndyTransactions.NODE.value == "0"
assert IndyTransactions.NYM.value == "1"
assert IndyTransactions.GET_TXNS.value == "3"
assert IndyTransactions.ATTRIB.value == "100"
assert IndyTransactions.SCHEMA.value == "101"
assert IndyTransactions.CLAIM_DEF.value == "102"
assert IndyTransactions.DISCLO.value == "103"
assert IndyTransactions.GET_ATTR.value == "104"
assert IndyTransactions.GET_NYM.value == "105"
assert IndyTransactions.GET_SCHEMA.value == "107"
assert IndyTransactions.GET_CLAIM_DEF.value == "108"
assert IndyTransactions.POOL_UPGRADE.value == "109"
assert IndyTransactions.NODE_UPGRADE.value == "110"
assert IndyTransactions.POOL_CONFIG.value == "111"
assert IndyTransactions.CHANGE_KEY.value == "112"
assert IndyTransactions.REVOC_REG_DEF.value == "113"
assert IndyTransactions.REVOC_REG_ENTRY.value == "114"
assert IndyTransactions.GET_REVOC_REG_DEF.value == "115"
assert IndyTransactions.GET_REVOC_REG.value == "116"
assert IndyTransactions.GET_REVOC_REG_DELTA.value == "117"
assert IndyTransactions.POOL_RESTART.value == "118"
assert IndyTransactions.VALIDATOR_INFO.value == "119"
def test_get_name_from_code():
assert IndyTransactions.get_name_from_code(IndyTransactions.NODE.value) == "NODE"
assert IndyTransactions.get_name_from_code(IndyTransactions.NYM.value) == "NYM"
assert IndyTransactions.get_name_from_code(IndyTransactions.ATTRIB.value) == "ATTRIB"
assert IndyTransactions.get_name_from_code(IndyTransactions.SCHEMA.value) == "SCHEMA"
assert IndyTransactions.get_name_from_code(IndyTransactions.CLAIM_DEF.value) == "CLAIM_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.DISCLO.value) == "DISCLO"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_ATTR.value) == "GET_ATTR"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_NYM.value) == "GET_NYM"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_TXNS.value) == "GET_TXNS"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_SCHEMA.value) == "GET_SCHEMA"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_CLAIM_DEF.value) == "GET_CLAIM_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_UPGRADE.value) == "POOL_UPGRADE"
assert IndyTransactions.get_name_from_code(IndyTransactions.NODE_UPGRADE.value) == "NODE_UPGRADE"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_CONFIG.value) == "POOL_CONFIG"
assert IndyTransactions.get_name_from_code(IndyTransactions.POOL_RESTART.value) == "POOL_RESTART"
assert IndyTransactions.get_name_from_code(IndyTransactions.CHANGE_KEY.value) == "CHANGE_KEY"
assert IndyTransactions.get_name_from_code(IndyTransactions.REVOC_REG_DEF.value) == "REVOC_REG_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.REVOC_REG_ENTRY.value) == "REVOC_REG_ENTRY"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG_DEF.value) == "GET_REVOC_REG_DEF"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG.value) == "GET_REVOC_REG"
assert IndyTransactions.get_name_from_code(IndyTransactions.GET_REVOC_REG_DELTA.value) == "GET_REVOC_REG_DELTA"
assert IndyTransactions.get_name_from_code(IndyTransactions.VALIDATOR_INFO.value) == "VALIDATOR_INFO"
assert IndyTransactions.get_name_from_code("some_unexpected_code") == "Unknown_transaction_type"
| none | 1 | 1.46459 | 1 |
|
GANsynth_pytorch/loader.py | tbazin/GANsynth-pytorch | 0 | 6631148 | from typing import Tuple, Callable, Any, Iterable
import functools
import math
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from .spectrograms_helper import SpectrogramsHelper
DatasetElement = Tuple[torch.Tensor, Iterable[Any]]
class WavToSpectrogramDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset: Dataset,
spectrograms_helper: SpectrogramsHelper,
transform=transforms.Lambda(lambda x: x),
**kwargs,
):
super().__init__(dataset, **kwargs)
self.spectrograms_helper = spectrograms_helper
self.transform = transform
self._transforms = transforms.Compose(
[self._collated_to_spectrogram_transform,
self._make_collated_transform(self.transform)]
)
@staticmethod
def _make_collated_transform(transform: Callable[[torch.Tensor],
torch.Tensor]
):
"""Extend data transform to operate on elements of a Dataset with targets
"""
def collated_transform(x_and_targets: DatasetElement):
x = x_and_targets[0]
x = transform(x)
targets = x_and_targets[1:]
return [x] + targets
return collated_transform
@property
def _collated_to_spectrogram_transform(self) -> transforms.Compose:
"""Efficient audio-to-spectrogram conversion using CUDA if available"""
def to_spectrogram_ondevice(audio: torch.Tensor):
return self.spectrograms_helper.to_spectrogram(audio.to(
self.spectrograms_helper.device))
return self._make_collated_transform(to_spectrogram_ondevice)
def __iter__(self):
wavforms_iterator = super().__iter__()
return map(self._transforms, wavforms_iterator)
def mask_phase(spectrogram: torch.Tensor, min_magnitude: float):
"""Set IF to 0 where
2020/01/17(theis): threshold set at -13~~log(2e-6) since the
spectrograms returned by the NSynth dataset have minimum amplitude:
spec_helpers.SPEC_THRESHOLD = log(1e-6)
"""
if spectrogram.ndim == 3:
channel_dim = 0
elif spectrogram.ndim == 4:
channel_dim = 1
else:
raise ValueError(
f"Incorrect shape {spectrogram.shape} for parameter spec_and_IF")
logmag = spectrogram.select(channel_dim, 0)
IF = spectrogram.select(channel_dim, 1)
log_threshold = math.log(2 * min_magnitude)
mask = logmag < log_threshold
logmag.masked_fill_(mask, log_threshold)
IF.masked_fill_(mask, 0)
return spectrogram
def make_masked_phase_transform(min_magnitude: float):
"""Return a Torchvision-style transform for low-magnitude phase-masking"""
return transforms.Lambda(functools.partial(
mask_phase, min_magnitude=min_magnitude))
class MaskedPhaseWavToSpectrogramDataLoader(WavToSpectrogramDataLoader):
def __init__(self, dataset: Dataset,
spectrograms_helper: SpectrogramsHelper,
**kwargs,
):
threshold_phase_transform = make_masked_phase_transform(
spectrograms_helper.safelog_eps)
super().__init__(dataset, spectrograms_helper,
transform=threshold_phase_transform,
**kwargs)
| from typing import Tuple, Callable, Any, Iterable
import functools
import math
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from .spectrograms_helper import SpectrogramsHelper
DatasetElement = Tuple[torch.Tensor, Iterable[Any]]
class WavToSpectrogramDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset: Dataset,
spectrograms_helper: SpectrogramsHelper,
transform=transforms.Lambda(lambda x: x),
**kwargs,
):
super().__init__(dataset, **kwargs)
self.spectrograms_helper = spectrograms_helper
self.transform = transform
self._transforms = transforms.Compose(
[self._collated_to_spectrogram_transform,
self._make_collated_transform(self.transform)]
)
@staticmethod
def _make_collated_transform(transform: Callable[[torch.Tensor],
torch.Tensor]
):
"""Extend data transform to operate on elements of a Dataset with targets
"""
def collated_transform(x_and_targets: DatasetElement):
x = x_and_targets[0]
x = transform(x)
targets = x_and_targets[1:]
return [x] + targets
return collated_transform
@property
def _collated_to_spectrogram_transform(self) -> transforms.Compose:
"""Efficient audio-to-spectrogram conversion using CUDA if available"""
def to_spectrogram_ondevice(audio: torch.Tensor):
return self.spectrograms_helper.to_spectrogram(audio.to(
self.spectrograms_helper.device))
return self._make_collated_transform(to_spectrogram_ondevice)
def __iter__(self):
wavforms_iterator = super().__iter__()
return map(self._transforms, wavforms_iterator)
def mask_phase(spectrogram: torch.Tensor, min_magnitude: float):
"""Set IF to 0 where
2020/01/17(theis): threshold set at -13~~log(2e-6) since the
spectrograms returned by the NSynth dataset have minimum amplitude:
spec_helpers.SPEC_THRESHOLD = log(1e-6)
"""
if spectrogram.ndim == 3:
channel_dim = 0
elif spectrogram.ndim == 4:
channel_dim = 1
else:
raise ValueError(
f"Incorrect shape {spectrogram.shape} for parameter spec_and_IF")
logmag = spectrogram.select(channel_dim, 0)
IF = spectrogram.select(channel_dim, 1)
log_threshold = math.log(2 * min_magnitude)
mask = logmag < log_threshold
logmag.masked_fill_(mask, log_threshold)
IF.masked_fill_(mask, 0)
return spectrogram
def make_masked_phase_transform(min_magnitude: float):
"""Return a Torchvision-style transform for low-magnitude phase-masking"""
return transforms.Lambda(functools.partial(
mask_phase, min_magnitude=min_magnitude))
class MaskedPhaseWavToSpectrogramDataLoader(WavToSpectrogramDataLoader):
def __init__(self, dataset: Dataset,
spectrograms_helper: SpectrogramsHelper,
**kwargs,
):
threshold_phase_transform = make_masked_phase_transform(
spectrograms_helper.safelog_eps)
super().__init__(dataset, spectrograms_helper,
transform=threshold_phase_transform,
**kwargs)
| en | 0.768669 | Extend data transform to operate on elements of a Dataset with targets Efficient audio-to-spectrogram conversion using CUDA if available Set IF to 0 where 2020/01/17(theis): threshold set at -13~~log(2e-6) since the spectrograms returned by the NSynth dataset have minimum amplitude: spec_helpers.SPEC_THRESHOLD = log(1e-6) Return a Torchvision-style transform for low-magnitude phase-masking | 2.565046 | 3 |
Session_07/car.py | CristiPV/kea-pyton | 0 | 6631149 | class Car:
def __init__( self, make, model, bhp, mph ):
self.make = make
self.model = model
self.bhp = bhp
self.mph = mph
@property
def make( self ):
return self.__make
@property
def model( self ):
return self.__model
@property
def bhp( self ):
return self__bhp
@property
def mph( self ):
return self__mph
@make.setter
def make( self, make ):
self.__make = make
@model.setter
def model( self, model ):
self.__model = model
@bhp.setter
def bhp( self, bhp ):
if bhp < 0:
self.___bhp = 0
elif bhp > 1479:
self.__bhp = 1479
else:
self.__bhp = bhp
@mph.setter
def mph( self, mph ):
if mph < 0:
self.__mph = 0
elif mph > 400:
self.__mph = 400
else:
self.__mph = mph
def __str__( self ):
return f'{self.__dict__}'
def __repr__( self ):
return f'{self.__dict__}'
c1 = Car( "<NAME>", "4C", 1000, 500 )
c2 = Car( "Audi", "A4", 900, -11 )
c3 = Car( "Audi", "TT", -199, 344 )
c4 = Car( "BMW", "2 Series", 1500, 255 )
print( c1 )
print( c2 )
print( c3 )
print( c4 ) | class Car:
def __init__( self, make, model, bhp, mph ):
self.make = make
self.model = model
self.bhp = bhp
self.mph = mph
@property
def make( self ):
return self.__make
@property
def model( self ):
return self.__model
@property
def bhp( self ):
return self__bhp
@property
def mph( self ):
return self__mph
@make.setter
def make( self, make ):
self.__make = make
@model.setter
def model( self, model ):
self.__model = model
@bhp.setter
def bhp( self, bhp ):
if bhp < 0:
self.___bhp = 0
elif bhp > 1479:
self.__bhp = 1479
else:
self.__bhp = bhp
@mph.setter
def mph( self, mph ):
if mph < 0:
self.__mph = 0
elif mph > 400:
self.__mph = 400
else:
self.__mph = mph
def __str__( self ):
return f'{self.__dict__}'
def __repr__( self ):
return f'{self.__dict__}'
c1 = Car( "<NAME>", "4C", 1000, 500 )
c2 = Car( "Audi", "A4", 900, -11 )
c3 = Car( "Audi", "TT", -199, 344 )
c4 = Car( "BMW", "2 Series", 1500, 255 )
print( c1 )
print( c2 )
print( c3 )
print( c4 ) | none | 1 | 3.970747 | 4 |
|
user_settings.sample.py | vsod99/proton-ge-custom | 0 | 6631150 | <reponame>vsod99/proton-ge-custom
#To enable these settings, name this file "user_settings.py".
#Settings here will take effect for all games run in this Proton version.
user_settings = {
#Logs are saved to $HOME/steam-<STEAM_GAME_ID>.log, overwriting any previous log with that name.
#Wine debug logging
"WINEDEBUG": "+timestamp,+pid,+tid,+seh,+debugstr,+loaddll,+mscoree",
#DXVK debug logging
"DXVK_LOG_LEVEL": "info",
#wine-mono debug logging (Wine's .NET replacement)
"WINE_MONO_TRACE": "E:System.NotImplementedException",
#"MONO_LOG_LEVEL": "info",
#Enable DXVK's HUD
# "DXVK_HUD": "devinfo,fps",
#Use OpenGL-based wined3d for d3d11 and d3d10 instead of Vulkan-based DXVK
# "PROTON_USE_WINED3D": "1",
#Disable d3d11 entirely
# "PROTON_NO_D3D11": "1",
#Disable in-process synchronization primitives
# "PROTON_NO_ESYNC": "1",
}
import protonfixes | #To enable these settings, name this file "user_settings.py".
#Settings here will take effect for all games run in this Proton version.
user_settings = {
#Logs are saved to $HOME/steam-<STEAM_GAME_ID>.log, overwriting any previous log with that name.
#Wine debug logging
"WINEDEBUG": "+timestamp,+pid,+tid,+seh,+debugstr,+loaddll,+mscoree",
#DXVK debug logging
"DXVK_LOG_LEVEL": "info",
#wine-mono debug logging (Wine's .NET replacement)
"WINE_MONO_TRACE": "E:System.NotImplementedException",
#"MONO_LOG_LEVEL": "info",
#Enable DXVK's HUD
# "DXVK_HUD": "devinfo,fps",
#Use OpenGL-based wined3d for d3d11 and d3d10 instead of Vulkan-based DXVK
# "PROTON_USE_WINED3D": "1",
#Disable d3d11 entirely
# "PROTON_NO_D3D11": "1",
#Disable in-process synchronization primitives
# "PROTON_NO_ESYNC": "1",
}
import protonfixes | en | 0.746508 | #To enable these settings, name this file "user_settings.py". #Settings here will take effect for all games run in this Proton version. #Logs are saved to $HOME/steam-<STEAM_GAME_ID>.log, overwriting any previous log with that name. #Wine debug logging #DXVK debug logging #wine-mono debug logging (Wine's .NET replacement) #"MONO_LOG_LEVEL": "info", #Enable DXVK's HUD # "DXVK_HUD": "devinfo,fps", #Use OpenGL-based wined3d for d3d11 and d3d10 instead of Vulkan-based DXVK # "PROTON_USE_WINED3D": "1", #Disable d3d11 entirely # "PROTON_NO_D3D11": "1", #Disable in-process synchronization primitives # "PROTON_NO_ESYNC": "1", | 1.524801 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.