max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tensorflow_constrained_optimization/python/rates/estimator_head.py | RMKruse/tensorflow_constrained_optimization | 276 | 12751057 | # Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""TFCO estimator head for use with TensorFlow 1.x and 2.x.
The classes `HeadV1` and `HeadV2` allow you to create custom heads that can be
used with a `tf.Estimator` to minimize a constrained minimization problem
in TF 1.x and 2.x respectively. You would need to provide an existing `_Head`
for TF 1.x and a `tf.estimator.Head` for TF 2.x and a function specifying
a constrained minimization problem, and the base head's minimization ops
will be accordingly modified.
Example
=======
Consider a binary classification problem, where we wish to train a
LinearEstimator by minimizing error rate subject to a recall constraint. For
this, we will first create a function that takes "logits", "labels",
"features", and an (optional) "weight_column", and returns the
`RateMinimizationProblem`.
```python
def problem_fn(logits, labels, features, weight_column=None):
context = tfco.rate_context(predictions=logits, labels=labels)
objective=tfco.error_rate(context)
problem = tfco.RateMinimizationProblem(
objective=tfco.error_rate(context),
constraints=[tfco.recall(context) >= 0.9])
return problem
```
In TF 2.x, we will then create a `tfco.HeadV2` instance from a base
`BinaryClassHead` instance and the `problem_fn` defined above.
```python
base_head = tf.estimator.BinaryClassHead()
custom_head = tfco.HeadV2(base_head, problem_fn)
```
The final step is to create a `LinearEstimator` using the custom head.
```python
estimator = tf.estimator.LinearEstimator(head=custom_head, ...)
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_constrained_optimization.python.train import constrained_optimizer
from tensorflow_constrained_optimization.python.train import lagrangian_optimizer
class _WrapperHead(tf.estimator.Head):
"""Base class for `HeadV1` and `HeadV2`.
This class is a wrapper around an existing base head, which can be either a
V1 `_Head` instance or a V2 `Head` instance. While this class implements the
`tf.estimator.Head` interface provided in TensorFlow 2.x, it can be used to
wrap both a V1 and V2 head instance, as they have similar signatures. Some of
the functions implemented may be relevant for only a V1 or V2 head.
"""
def __init__(self, base_head, problem_fn, weight_column=None):
"""Initializes a `_WrapperHead` instance that wraps the `base_head`.
Args:
base_head: A V1 `_Head` instance or a V2 `tf.estimator.Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
"""
self._base_head = base_head
self._problem_fn = problem_fn
self._weight_column = weight_column
@property
def name(self):
"""The name of this head.
Returns:
A string.
"""
return self._base_head.name
@property
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Often is the number of classes, labels, or real values to be predicted.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
return self._base_head.logits_dimension
def create_loss(self, features, mode, logits, labels):
"""Returns a loss Tensor from provided logits.
The returned loss is the same as that of the base head and is meant
solely for book-keeping purposes. We do not return the custom loss used to
create the train_op for constrained optimization, as this loss makes use of
auxilliary variables whose values may not be set properly in EVAL mode. This
function is relevant only for a V1 estimator.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used for loss construction.
labels: Labels `Tensor`, or `dict` of same.
Returns:
`LossSpec`.
"""
return self._base_head.create_loss(labels, logits, features, mode)
@property
def loss_reduction(self):
"""One of `tf.losses.Reduction`.
Returns the same value as the base head, and does not reflect how TFCO
aggregates its losses internally. This function is relevant only for a V2
estimator.
Returns:
The type of loss reduction used in the head.
"""
# TODO: Should we return SUM_OVER_BATCH_SIZE, as this better
# represents TFCO's aggregation strategy, and may be used for rescaling
# purposes during distributed training?
return self._base_head.loss_reduction
def predictions(self, logits, keys=None):
"""Returns a `dict` of predictions from provided logits.
This function is relevant only for a V2 estimator.
Args:
logits: Logits `Tensor` to be used for prediction construction.
keys: A list of `string` for prediction keys. Defaults to `None`, meaning
if not specified, predictions will be created for all the pre-defined
valid keys in the head.
Returns:
A `dict` of predicted `Tensor` keyed by prediction name.
"""
return self._base_head.predictions(logits, keys)
def metrics(self, regularization_losses=None):
"""Returns a `dict` of metric objects.
This function is relevant only for a V2 estimator.
Args:
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses.
Returns:
A `dict` of metrics keyed by string name. The value is an instance of
`Metric` class.
"""
return self._base_head.metrics(regularization_losses)
def update_metrics(self,
eval_metrics,
features,
logits,
labels,
mode=None,
regularization_losses=None):
"""Updates metric objects and returns a `dict` of the updated metrics.
This function is relevant only for a V2 estimator.
Args:
eval_metrics: A `dict` of metrics to be updated.
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
logits: logits `Tensor` to be used for metrics update.
labels: Labels `Tensor`, or `dict` mapping string label names to `Tensor`
objects of the label values.
mode: Estimator's `ModeKeys`. In most cases, this arg is not used and can
be removed in the method implementation.
regularization_losses: A list of additional scalar losses to be added to
the training and evaluation loss, such as regularization losses. Note
that, the `mode` arg is not used in the `tf.estimator.Head`. If the
update of the metrics doesn't rely on `mode`, it can be safely ignored
in the method signature.
Returns:
A `dict` of updated metrics keyed by name. The value is an instance of
`Metric` class.
"""
return self._base_head.update_metrics(
eval_metrics, features, logits, labels, mode, regularization_losses)
def loss(self,
labels,
logits,
features=None,
mode=None,
regularization_losses=None):
"""Returns a loss `Tensor` from provided arguments.
The returned loss is the same as that of the base head and is meant
solely for book-keeping purposes. We do not return the custom loss used to
create the train_op for constrained optimization, as this loss makes use of
auxilliary variables whose values may not be set properly in EVAL mode. This
function is relevant for a V2 estimator.
Args:
labels: Labels `Tensor`, or `dict` mapping string label names to `Tensor`
objects of the label values.
logits: Logits `Tensor` to be used for loss construction.
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
mode: Estimator's `ModeKeys`. To be used in case loss calculation is
different in Train and Eval mode.
regularization_losses: A list of additional scalar losses to be added to
the training loss, such as regularization losses.
Returns:
A scalar `Tensor` representing a dummy loss.
"""
return self._base_head.loss(
labels, logits, features, mode, regularization_losses)
def _create_no_op_estimator_spec(self, features, mode, logits, labels):
"""Returns `EstimatorSpec` for the base head with no `train_op`."""
return self._base_head.create_estimator_spec(
features=features,
mode=mode,
logits=logits,
labels=labels,
train_op_fn=lambda loss: tf.constant(0),
regularization_losses=None)
def _create_problem_and_update_optimizer(
self, logits, labels, features, optimizer):
"""Returns `ConstrainedMinimizationProblem` created using `_problem_fn`."""
problem = self._problem_fn(
logits, labels, features, weight_column=self._weight_column)
# Set the number of constraints in the optimizer. This is needed if
# `num_constraints` hasn't already been specified to the optimizer, and
# will also check that we aren't changing the number of constraints from
# a previously-specified value.
optimizer.num_constraints = problem.num_constraints
return problem
def _append_update_ops(self, train_op_fn, update_ops):
"""Returns `train_op` with control dependency on `update_ops`."""
# We handle the case of update_ops=None separately because calling
# tf.control_dependencies(None) in graph mode clears existing control
# dependencies.
if update_ops is None:
train_op = train_op_fn()
else:
with tf.control_dependencies(update_ops):
train_op = train_op_fn()
return train_op
class HeadV1(_WrapperHead):
"""A wrapper around an existing V1 `_Head` for use with TensorFlow 1.x.
This head modifies the base head's train_op to minimize a
`ConstrainedMinimizationProblem` specified via `problem_fn`, while
preserving the base head's other functionality. If the estimator's optimizer
is an instance of `ConstrainedOptimizerV1`, it uses the same for minimization.
If not, it creates a `ConstrainedOptimizerV1` instance from the estimator's
optimizer.
"""
def __init__(self, base_head, problem_fn, weight_column=None):
"""Initializes a `HeadV1` instance that wraps the `base_head`.
Args:
base_head: A `_Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
Raises:
ValueError: If a `tf.estimator.Head` instance is passed as the
`base_head`.
"""
if isinstance(base_head, tf.estimator.Head):
raise ValueError("You cannot pass a `tf.estimator.Head` instance as the "
"`base_head` to `HeadV1`.")
super(HeadV1, self).__init__(base_head, problem_fn, weight_column)
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
train_op_fn=None,
regularization_losses=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Optional labels `Tensor`, or `dict` mapping string label names to
`Tensor` objects of the label values.
optimizer: A `ConstrainedOptimizerV1` or a `tf.compat.v1.train.Optimizer`.
instance. If a `tf.compat.v1.train.Optimizer` is provided, the head
creates a `ConstrainedOptimizerV1` that wraps it. This is an optional
argument in the `_Head` base class, but needs to be passed here.
train_op_fn: This argument is ignored and can be left unspecified.
regularization_losses: This argument is ignored and can be left
unspecified.
Returns:
`EstimatorSpec`.
"""
estimator_spec = self._create_no_op_estimator_spec(
features=features,
mode=mode,
logits=logits,
labels=labels)
# When mode is PREDICT or EVAL, no modification needed to the base head.
if (mode == tf.estimator.ModeKeys.PREDICT) or (
mode == tf.estimator.ModeKeys.EVAL):
return estimator_spec
# When mode is TRAIN, replace train_op in estimator_spec.
if mode == tf.estimator.ModeKeys.TRAIN:
if optimizer is None:
raise ValueError("You must provide an optimizer to the estimator.")
# TODO: Add support for passing train_op_fn.
# If the optimizer is not a `ConstrainedOptimizerV1` instance, then
# create a `LagrangianOptimizerV1` that wraps the base heads's optimizer.
if not isinstance(
optimizer, constrained_optimizer.ConstrainedOptimizerV1):
optimizer = lagrangian_optimizer.LagrangianOptimizerV1(optimizer)
problem = self._create_problem_and_update_optimizer(
logits, labels, features, optimizer)
# Create `train_op` with a control dependency on `UPDATE_OPS`.
update_ops = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.UPDATE_OPS)
global_step = tf.compat.v1.train.get_global_step()
train_op = self._append_update_ops(
lambda: optimizer.minimize(problem, global_step=global_step),
update_ops)
return estimator_spec._replace(train_op=train_op)
raise ValueError("mode={} not recognized".format(mode))
class HeadV2(_WrapperHead):
"""A wrapper around an existing V2 `Head` for use with TensorFlow 2.x.
This head modifies the base head's train_op to minimize a
`ConstrainedMinimizationProblem` specified via `problem_fn`, while
preserving the base head's other functionality. If the estimator's optimizer
is an instance of `ConstrainedOptimizerV2`, it uses the same for minimization.
If not, it creates a `ConstrainedOptimizerV2` instance from the estimator's
optimizer.
"""
def __init__(self, base_head, problem_fn, weight_column=None):
"""Initializes a `HeadV2` instance that wraps the `base_head`.
Args:
base_head: A `tf.estimator.Head` instance.
problem_fn: A function that takes logits, labels and features as input,
and returns a `ConstrainedMinimizationProblem` instance.
weight_column: Optional string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to re-weight examples during training. This may be
an optional argument to the base_head, but however needs to be passed
again here as `weight_column` is not usually publicly accessible in the
base_head.
Raises:
ValueError: If a `tf.estimator.Head` instance is not passed as
the `base_head`.
"""
if not isinstance(base_head, tf.estimator.Head):
raise ValueError("You must pass a `tf.estimator.Head` instance as "
"`base_head` to `HeadV2`.")
super(HeadV2, self).__init__(base_head, problem_fn, weight_column)
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
trainable_variables=None,
train_op_fn=None,
update_ops=None,
regularization_losses=None):
"""Returns `EstimatorSpec` for constrained optimization.
The `EstimatorSpec` is the same as that of the base head with the
`train_op` alone replaced with one for minimizing the constrained
minimization problem specified by self._problem_fn.
Args:
features: Input `dict` mapping string feature names to `Tensor` or
`SparseTensor` objects containing the values for that feature in a
minibatch. Often to be used to fetch example-weight tensor.
mode: Estimator's `ModeKeys`.
logits: Logits `Tensor` to be used by the head.
labels: Optional labels `Tensor`, or `dict` mapping string label names to
`Tensor` objects of the label values.
optimizer: A `ConstrainedOptimizerV2` or a `tf.keras.optimizers.Optimizer`
instance. If a `tf.keras.optimizers.Optimizer` is provided, the head
creates a `ConstrainedOptimizerV2` that wraps it. This is an optional
argument in the `tf.estimator.Head` base class, but needs to be passed
here.
trainable_variables: A list or tuple of `Variable` objects to update to
solve the constrained minimization problem. In Tensorflow 1.x, by
default these are the list of variables collected in the graph under the
key `GraphKeys.TRAINABLE_VARIABLES`. As Tensorflow 2.x doesn't have
collections and GraphKeys, trainable_variables needs to be passed
explicitly here.
train_op_fn: This argument is ignored and can be left unspecified.
update_ops: Optional list or tuple of update ops to be run at training
time. For example, layers such as BatchNormalization create mean and
variance update ops that need to be run at training time. In Tensorflow
1.x, these are thrown into an UPDATE_OPS collection. As Tensorflow 2.x
doesn't have collections, update_ops needs to be passed explicitly here.
regularization_losses: This argument is ignored and can be left
unspecified.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If mode is not recognized or optimizer is not specified in
TRAIN mode.
"""
estimator_spec = self._create_no_op_estimator_spec(
features=features,
mode=mode,
logits=logits,
labels=labels)
# When mode is PREDICT or EVAL, no modification needed to the base head.
if (mode == tf.estimator.ModeKeys.PREDICT) or (
mode == tf.estimator.ModeKeys.EVAL):
return estimator_spec
# When mode is TRAIN, replace train_op in estimator_spec.
if mode == tf.estimator.ModeKeys.TRAIN:
if optimizer is None:
raise ValueError("You must provide an optimizer to the estimator.")
# TODO: Add support for passing train_op_fn.
# If the optimizer is not a `ConstrainedOptimizerV2` instance, then
# create a `LagrangianOptimizer` that wraps the base heads's optimizer.
if not isinstance(
optimizer, constrained_optimizer.ConstrainedOptimizerV2):
iterations = optimizer.iterations
optimizer = lagrangian_optimizer.LagrangianOptimizerV2(optimizer)
# Pass the iterations member (which contains the global step) in the
# base head's optimizer to the newly created one.
optimizer.iterations = iterations
problem = self._create_problem_and_update_optimizer(
logits, labels, features, optimizer)
# Create `train_op` with a control dependency on the `update_ops`.
var_list = trainable_variables + list(
problem.trainable_variables) + optimizer.trainable_variables()
train_op = self._append_update_ops(
lambda: tf.group(optimizer.get_updates(problem, var_list)),
update_ops)
return estimator_spec._replace(train_op=train_op)
raise ValueError("mode={} not recognized".format(mode))
|
main_sdf.py | ashawkey/torch-ngp | 262 | 12751060 | import torch
import argparse
from sdf.utils import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path', type=str)
parser.add_argument('--test', action='store_true', help="test mode")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--lr', type=float, default=1e-4, help="initial learning rate")
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--ff', action='store_true', help="use fully-fused MLP")
parser.add_argument('--tcnn', action='store_true', help="use TCNN backend")
opt = parser.parse_args()
print(opt)
seed_everything(opt.seed)
if opt.ff:
assert opt.fp16, "fully-fused mode must be used with fp16 mode"
from sdf.netowrk_ff import SDFNetwork
elif opt.tcnn:
assert opt.fp16, "tcnn mode must be used with fp16 mode"
from sdf.network_tcnn import SDFNetwork
else:
from sdf.netowrk import SDFNetwork
model = SDFNetwork(encoding="hashgrid")
print(model)
if opt.test:
trainer = Trainer('ngp', model, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint='best', eval_interval=1)
trainer.save_mesh(os.path.join(opt.workspace, 'results', 'output.ply'), 1024)
else:
from sdf.provider import SDFDataset
from loss import mape_loss
train_dataset = SDFDataset(opt.path, size=100, num_samples=2**18)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True)
valid_dataset = SDFDataset(opt.path, size=1, num_samples=2**18) # just a dummy
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1)
criterion = mape_loss # torch.nn.L1Loss()
optimizer = lambda model: torch.optim.Adam([
{'name': 'encoding', 'params': model.encoder.parameters()},
{'name': 'net', 'params': model.backbone.parameters(), 'weight_decay': 1e-6},
], lr=opt.lr, betas=(0.9, 0.99), eps=1e-15)
scheduler = lambda optimizer: optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
trainer = Trainer('ngp', model, workspace=opt.workspace, optimizer=optimizer, criterion=criterion, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint='latest', eval_interval=1)
trainer.train(train_loader, valid_loader, 20)
# also test
trainer.save_mesh(os.path.join(opt.workspace, 'results', 'output.ply'), 1024)
|
venv/Lib/site-packages/altair/examples/scatter_with_minimap.py | ajayiagbebaku/NFL-Model | 6,831 | 12751062 | """
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
src/py2.x/ml/jqxxsz/6.SVM/svm_svc.py | BinLeeBit/AILearners | 350 | 12751072 | <filename>src/py2.x/ml/jqxxsz/6.SVM/svm_svc.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : svm-svc.py
@Time : 2019/06/17 21:20:49
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Desc : sklearn.svm.SVC实现手写体识别
@github : https://github.com/aimi-cn/AILearners
'''
# here put the import lib
import numpy as np
import operator
from os import listdir
from sklearn.svm import SVC
import time
def img2vector(filename):
"""
将32x32的二进制图像转换为1x1024向量。
Parameters:
filename - 文件名
Returns:
returnVect - 返回的二进制图像的1x1024向量
"""
#创建1x1024零向量
returnVect = np.zeros((1, 1024))
#打开文件
fr = open(filename)
#按行读取
for i in range(32):
#读一行数据
lineStr = fr.readline()
#每一行的前32个元素依次添加到returnVect中
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j])
#返回转换后的1x1024向量
return returnVect
def handwritingClassTest():
"""
手写数字分类测试
Parameters:
无
Returns:
无
"""
#测试集的Labels
hwLabels = []
#返回trainingDigits目录下的文件名
trainingFileList = listdir('C:/Users/Administrator/Desktop/blog/github/AILearners/data/ml/jqxxsz/2.KNN/trainingDigits')
#返回文件夹下文件的个数
m = len(trainingFileList)
#初始化训练的Mat矩阵,测试集
trainingMat = np.zeros((m, 1024))
#从文件名中解析出训练集的类别
for i in range(m):
#获得文件的名字
fileNameStr = trainingFileList[i]
#获得分类的数字
classNumber = int(fileNameStr.split('_')[0])
#将获得的类别添加到hwLabels中
hwLabels.append(classNumber)
#将每一个文件的1x1024数据存储到trainingMat矩阵中
trainingMat[i,:] = img2vector('C:/Users/Administrator/Desktop/blog/github/AILearners/data/ml/jqxxsz/2.KNN/trainingDigits/%s' % (fileNameStr))
clf = SVC(C=200,kernel='rbf')
clf.fit(trainingMat,hwLabels)
#返回testDigits目录下的文件列表
testFileList = listdir('C:/Users/Administrator/Desktop/blog/github/AILearners/data/ml/jqxxsz/2.KNN/testDigits')
#错误检测计数
errorCount = 0.0
#测试数据的数量
mTest = len(testFileList)
#从文件中解析出测试集的类别并进行分类测试
for i in range(mTest):
#获得文件的名字
fileNameStr = testFileList[i]
#获得分类的数字
classNumber = int(fileNameStr.split('_')[0])
#获得测试集的1x1024向量,用于训练
vectorUnderTest = img2vector('C:/Users/Administrator/Desktop/blog/github/AILearners/data/ml/jqxxsz/2.KNN/testDigits/%s' % (fileNameStr))
#获得预测结果
# classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
classifierResult = clf.predict(vectorUnderTest)
print("分类返回结果为%d\t真实结果为%d" % (classifierResult, classNumber)).decode('utf-8').encode('gb2312')
if(classifierResult != classNumber):
errorCount += 1.0
print("总共错了%d个数据\n错误率为%f%%" % (errorCount, errorCount/mTest * 100)).decode('utf-8').encode('gb2312')
if __name__ == '__main__':
start = time.clock()
handwritingClassTest()
end = time.clock()
t=end-start
print("Runtime is:",t) |
tests/adapter/conftest.py | Clinical-Genomics/scout | 111 | 12751079 | from copy import deepcopy
import pytest
@pytest.fixture
def real_oldcase_database(real_panel_database, parsed_case):
# add case with old case id construct
config_data = deepcopy(parsed_case)
config_data["case_id"] = "-".join([config_data["owner"], config_data["display_name"]])
case_obj = real_panel_database.load_case(config_data)
# add suspect and causative!
institute_obj = real_panel_database.institute(case_obj["owner"])
user_obj = real_panel_database.users()[0]
variant_obj = real_panel_database.variant_collection.find_one()
real_panel_database.pin_variant(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="",
variant=variant_obj,
)
real_panel_database.mark_causative(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="",
variant=variant_obj,
)
# add ACMG evaluation
real_panel_database.submit_evaluation(
variant_obj=variant_obj,
user_obj=user_obj,
institute_obj=institute_obj,
case_obj=case_obj,
link="",
criteria=[{"term": "PS1"}, {"term": "PM1"}],
)
# add comment on a variant
real_panel_database.comment(
institute=institute_obj,
case=case_obj,
user=user_obj,
link="",
variant=variant_obj,
comment_level="specific",
)
yield {
"adapter": real_panel_database,
"variant": variant_obj,
"case": real_panel_database.case(case_obj["_id"]),
}
@pytest.fixture
def parsed_gene():
gene_info = {
"hgnc_id": 1,
"hgnc_symbol": "AAA",
"ensembl_id": "ENSG1",
"chrom": "1",
"start": 10,
"end": 100,
"build": "37",
}
return gene_info
|
src/biotite/structure/io/mol/file.py | alex123012/biotite | 208 | 12751084 | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.structure.io.mol"
__author__ = "<NAME>"
__all__ = ["MOLFile"]
import datetime
from warnings import warn
import numpy as np
from ...atoms import AtomArray
from ....file import TextFile, InvalidFileError
from ...error import BadStructureError
from ..ctab import read_structure_from_ctab, write_structure_to_ctab
# Number of header lines
N_HEADER = 3
DATE_FORMAT = "%d%m%y%H%M"
class MOLFile(TextFile):
"""
This class represents a file in MOL format, that is used to store
structure information for small molecules. :footcite:`Dalby1992`
Since its use is intended for single small molecules, it stores
less atom annotation information than the macromolecular structure
formats:
Only the atom positions, charges, elements and bonds can be read
from the file, chain and and residue information is missing.
This class can also be used to parse the first structure from an SDF
file, as the SDF format extends the MOL format.
References
----------
.. footbibliography::
Examples
--------
>>> from os.path import join
>>> mol_file = MOLFile.read(join(path_to_structures, "molecules", "TYR.sdf"))
>>> atom_array = mol_file.get_structure()
>>> print(atom_array)
0 N 1.320 0.952 1.428
0 C -0.018 0.429 1.734
0 C -0.103 0.094 3.201
0 O 0.886 -0.254 3.799
0 C -0.274 -0.831 0.907
0 C -0.189 -0.496 -0.559
0 C 1.022 -0.589 -1.219
0 C -1.324 -0.102 -1.244
0 C 1.103 -0.282 -2.563
0 C -1.247 0.210 -2.587
0 C -0.032 0.118 -3.252
0 O 0.044 0.420 -4.574
0 O -1.279 0.184 3.842
0 H 1.977 0.225 1.669
0 H 1.365 1.063 0.426
0 H -0.767 1.183 1.489
0 H 0.473 -1.585 1.152
0 H -1.268 -1.219 1.134
0 H 1.905 -0.902 -0.683
0 H -2.269 -0.031 -0.727
0 H 2.049 -0.354 -3.078
0 H -2.132 0.523 -3.121
0 H -0.123 -0.399 -5.059
0 H -1.333 -0.030 4.784
"""
def __init__(self):
super().__init__()
# empty header lines
self.lines = [""] * N_HEADER
def get_header(self):
"""
Get the header from the MOL file.
Returns
-------
mol_name : str
The name of the molecule.
initials : str
The author's initials.
program : str
The program name.
time : datetime
The time of file creation.
dimensions : str
Dimensional codes.
scaling_factors : str
Scaling factors.
energy : str
Energy from modeling program.
registry_number : str
MDL registry number.
comments : str
Additional comments.
"""
mol_name = self.lines[0].strip()
initials = self.lines[1][0:2].strip()
program = self.lines[1][2:10].strip()
time = datetime.datetime.strptime(self.lines[1][10:20],
DATE_FORMAT)
dimensions = self.lines[1][20:22].strip()
scaling_factors = self.lines[1][22:34].strip()
energy = self.lines[1][34:46].strip()
registry_number = self.lines[1][46:52].strip()
comments = self.lines[2].strip()
return mol_name, initials, program, time, dimensions, \
scaling_factors, energy, registry_number, comments
def set_header(self, mol_name, initials="", program="", time=None,
dimensions="", scaling_factors="", energy="",
registry_number="", comments=""):
"""
Set the header for the MOL file.
Parameters
----------
mol_name : str
The name of the molecule.
initials : str, optional
The author's initials. Maximum length is 2.
program : str, optional
The program name. Maximum length is 8.
time : datetime or date, optional
The time of file creation.
dimensions : str, optional
Dimensional codes. Maximum length is 2.
scaling_factors : str, optional
Scaling factors. Maximum length is 12.
energy : str, optional
Energy from modeling program. Maximum length is 12.
registry_number : str, optional
MDL registry number. Maximum length is 6.
comments : str, optional
Additional comments.
"""
if time is None:
time = datetime.datetime.now()
time_str = time.strftime(DATE_FORMAT)
self.lines[0] = str(mol_name)
self.lines[1] = (
f"{initials:>2}"
f"{program:>8}"
f"{time_str:>10}"
f"{dimensions:>2}"
f"{scaling_factors:>12}"
f"{energy:>12}"
f"{registry_number:>6}"
)
self.lines[2] = str(comments)
def get_structure(self):
"""
Get an :class:`AtomArray` from the MOL file.
Returns
-------
array : AtomArray
This :class:`AtomArray` contains the optional ``charge``
annotation and has an associated :class:`BondList`.
All other annotation categories, except ``element`` are
empty.
"""
ctab_lines = _get_ctab_lines(self.lines)
if len(ctab_lines) == 0:
raise InvalidFileError("File does not contain structure data")
return read_structure_from_ctab(ctab_lines)
def set_structure(self, atoms):
"""
Set the :class:`AtomArray` for the file.
Parameters
----------
array : AtomArray
The array to be saved into this file.
Must have an associated :class:`BondList`.
"""
self.lines = self.lines[:N_HEADER] + write_structure_to_ctab(atoms)
def _get_ctab_lines(lines):
for i, line in enumerate(lines):
if line.startswith("M END"):
return lines[N_HEADER:i+1]
return lines[N_HEADER:] |
emukit/test_functions/quadrature/hennig2D.py | EmuKit/Emukit | 272 | 12751092 | <reponame>EmuKit/Emukit
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Tuple
import numpy as np
from emukit.core.loop.user_function import UserFunctionWrapper
def hennig2D() -> Tuple[UserFunctionWrapper, List[Tuple[float, float]]]:
r"""2D toy integrand coined by <NAME>.
.. math::
f(x) = e^{-x'Sx -\sin(3\|x\|^2)}
:return: The wrapped test function, and the integrals bounds
(the latter default to [-3, 3]^2).
"""
integral_bounds = 2 * [(-3.0, 3.0)]
return UserFunctionWrapper(_hennig2D), integral_bounds
def _hennig2D(x: np.ndarray, S: np.ndarray = None) -> np.ndarray:
"""
:param x: Locations for evaluation (num_points, 2).
:return: The function values at x, shape (num_points, 2).
"""
if S is None:
S = np.array([[1, 0.5], [0.5, 1]])
f = np.exp(-np.sin(3 * np.sum(x**2, axis=1)) - np.sum((x @ S) * x, axis=1))
return np.reshape(f, [x.shape[0], 1])
|
solutions/problem_362.py | ksvr444/daily-coding-problem | 1,921 | 12751129 | def get_strob_numbers(num_digits):
if not num_digits:
return [""]
elif num_digits == 1:
return ["0", "1", "8"]
smaller_strob_numbers = get_strob_numbers(num_digits - 2)
strob_numbers = list()
for x in smaller_strob_numbers:
strob_numbers.extend([
"1" + x + "1",
"6" + x + "9",
"9" + x + "6",
"8" + x + "8",
])
return strob_numbers
# Tests
assert get_strob_numbers(1) == ['0', '1', '8']
assert get_strob_numbers(2) == ['11', '69', '96', '88']
assert get_strob_numbers(3) == ['101', '609', '906', '808', '111', '619',
'916', '818', '181', '689', '986', '888']
|
deephyper/nas/run/_run_horovod.py | felixeperez/deephyper | 185 | 12751136 | """The :func:`deephyper.nas.run.horovod.run` function is used to evaluate a deep neural network by enabling data-parallelism with Horovod to the :func:`deephyper.nas.run.alpha.run` function. This function will automatically apply the linear scaling rule to the learning rate and batch size given the current number of ranks (i.e., the initial learning rate and batch size are scaled by the number of ranks).
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.contrib.callbacks import import_callback
import horovod.tensorflow.keras as hvd
import deephyper.nas.trainer._arch as a
from deephyper.nas.trainer import HorovodTrainer
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
)
logger = logging.getLogger(__name__)
# Default callbacks parameters
default_callbacks_config = {
"EarlyStopping": dict(
monitor="val_loss", min_delta=0, mode="min", verbose=0, patience=0
),
"ModelCheckpoint": dict(
monitor="val_loss",
mode="min",
save_best_only=True,
verbose=1,
filepath="model.h5",
save_weights_only=False,
),
"TensorBoard": dict(
log_dir="",
histogram_freq=0,
batch_size=32,
write_graph=False,
write_grads=False,
write_images=False,
update_freq="epoch",
),
"CSVLogger": dict(filename="training.csv", append=True),
"CSVExtendedLogger": dict(filename="training.csv", append=True),
"TimeStopping": dict(),
"ReduceLROnPlateau": dict(patience=5, verbose=0),
}
# Name of Callbacks reserved for root node
hvd_root_cb = ["ModelCheckpoint", "Tensorboard", "CSVLogger", "CSVExtendedLogger"]
def run_horovod(config: dict) -> float:
hvd.init()
# Threading configuration
if os.environ.get("OMP_NUM_THREADS", None) is not None:
logger.debug(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
if os.environ.get("CUDA_VISIBLE_DEVICES") is not None:
devices = os.environ.get("CUDA_VISIBLE_DEVICES").split(",")
os.environ["CUDA_VISIBLE_DEVICES"] = devices[hvd.rank()]
config["seed"]
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
batch_size = config[a.hyperparameters][a.batch_size] * hvd.size()
learning_rate = config[a.hyperparameters][a.learning_rate] * hvd.size()
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
# Initialize Horovod
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks only
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
#! initial_lr argument is not available in horovod==0.19.0
hvd.callbacks.LearningRateWarmupCallback(
warmup_epochs=5, verbose=0, initial_lr=initial_lr
),
]
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config[a.hyperparameters].get(a.callbacks, {})
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
# cb_bame in hvd_root_cb implies hvd.rank() == 0
if not (cb_name in hvd_root_cb) or hvd.rank() == 0:
default_callbacks_config[cb_name].update(cb_conf)
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = HorovodTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
if hvd.rank() == 0:
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10:
result = -10
return result
|
onelinerizer/__init__.py | mayl8822/onelinerizer | 1,062 | 12751172 | from .onelinerizer import onelinerize
|
test/pytest/test_regexes.py | showipintbri/ttp | 254 | 12751173 | <reponame>showipintbri/ttp<gh_stars>100-1000
import sys
sys.path.insert(0, "../..")
import pprint
import logging
logging.basicConfig(level="INFO")
from ttp import ttp
def test_pipe_separated_regexes():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip | re("IP")}} {{ age | re(r"\\d+") }} {{ mac }} ARPA {{ interface | re("INTF_RE") }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950.5785.5cd1",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "0950.5C8A.5c41",
},
]
}
]
]
# test_pipe_separated_regexes()
def test_multiple_inline_regexes():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac }} ARPA {{ interface | re(r"GigabitEthernet\\S+") | re(r"Fast\\S+") }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950.5785.5cd1",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "0950.5C8A.5c41",
},
]
}
]
]
# test_multiple_inline_regexes()
def test_MAC_regex_formatter():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.2 98 0950:5785:5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.1 98 0950-5785-5cd1 ARPA FastEthernet2.13
Internet 10.12.13.4 198 09:50:5C:8A:5c:41 ARPA GigabitEthernet2.17
Internet 10.12.13.5 198 09.50.5C.8A.5c.41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09-50-5C-8A-5c-41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09505C8A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C8:A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C.8.A5c41 ARPA GigabitEthernet2.will_not_match
</input>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac | MAC }} ARPA {{ interface }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "Loopback0",
"ip": "10.12.13.2",
"mac": "0950:5785:5cd2",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950-5785-5cd1",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "09:50:5C:8A:5c:41",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.5",
"mac": "09.50.5C.8A.5c.41",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.6",
"mac": "09-50-5C-8A-5c-41",
},
]
}
]
]
# test_MAC_regex_formatter()
|
numpy&pandas/13_set_value.py | subshine/tutorials | 10,786 | 12751178 | <reponame>subshine/tutorials
# View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import pandas as pd
import numpy as np
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=['A', 'B', 'C', 'D'])
df.iloc[2,2] = 1111
df.loc['2013-01-03', 'D'] = 2222
df.A[df.A>0] = 0
df['F'] = np.nan
df['G'] = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130101', periods=6))
print(df) |
train.py | lipiji/Guyu | 173 | 12751190 | <gh_stars>100-1000
# coding=utf-8
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from biglm import BIGLM
from data import Vocab, DataLoader, s2xy
from adam import AdamWeightDecayOptimizer
from optim import Optim
import argparse, os
import random
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--embed_dim', type=int)
parser.add_argument('--ff_embed_dim', type=int)
parser.add_argument('--num_heads', type=int)
parser.add_argument('--layers', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--train_data', type=str)
parser.add_argument('--dev_data', type=str)
parser.add_argument('--vocab', type=str)
parser.add_argument('--min_occur_cnt', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--warmup_steps', type=int)
parser.add_argument('--lr', type=float)
parser.add_argument('--smoothing', type=float)
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--max_len', type=int)
parser.add_argument('--min_len', type=int)
parser.add_argument('--print_every', type=int)
parser.add_argument('--save_every', type=int)
parser.add_argument('--epoch', type=int)
parser.add_argument('--start_from', type=str, default=None)
parser.add_argument('--save_dir', type=str)
parser.add_argument('--approx', type=str, default='none')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--world_size', type=int)
parser.add_argument('--gpus', type=int)
parser.add_argument('--MASTER_ADDR', type=str)
parser.add_argument('--MASTER_PORT', type=str)
parser.add_argument('--start_rank', type=int)
parser.add_argument('--backend', type=str)
return parser.parse_args()
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def average_gradients(model):
""" Gradient averaging. """
normal = True
size = float(dist.get_world_size())
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
else:
normal = False
break
return normal
def eval_epoch(lm_args, model, lm_vocab, local_rank, label, batch_acm):
ds = []
with open(lm_args.dev_data, "r") as f:
for line in f:
line = line.strip()
if line:
ds.append(line)
batch_size = 10
batches = round(len(ds) / batch_size)
idx = 0
avg_nll = 0.
avg_ppl = 0.
avg_acc = 0.
count_bsz = 0.
count_tok = 0.
while idx < len(ds):
cplb = ds[idx:idx + batch_size]
ys_truth, ys_inp, msk = s2xy(cplb, lm_vocab, lm_args.max_len, lm_args.min_len)
ys_truth = ys_truth.cuda(local_rank)
ys_inp = ys_inp.cuda(local_rank)
msk = msk.cuda(local_rank)
acc, nll, ppl, toks, bsz = model.ppl(ys_truth, ys_inp, msk)
avg_acc += acc
avg_nll += nll
avg_ppl += ppl
count_bsz += bsz
count_tok += toks
idx += batch_size
print ('validating: label %s, batch_acm %d, acc %.6f, nll %.6f, ppl %.6f'\
%(label, batch_acm, avg_acc/count_tok, avg_nll/count_bsz, avg_ppl/count_bsz), flush=True)
def run(args, local_rank):
""" Distributed Synchronous """
torch.manual_seed(1234)
vocab = Vocab(args.vocab, min_occur_cnt=args.min_occur_cnt, specials=[])
if (args.world_size == 1 or dist.get_rank() == 0):
print ("vocab.size = %d"%vocab.size, flush=True)
model = BIGLM(local_rank, vocab, args.embed_dim, args.ff_embed_dim,\
args.num_heads, args.dropout, args.layers, args.smoothing, args.approx)
if args.start_from is not None:
ckpt = torch.load(args.start_from, map_location='cpu')
model.load_state_dict(ckpt['model'])
model = model.cuda(local_rank)
if args.world_size > 1:
torch.manual_seed(1234 + dist.get_rank())
random.seed(5678 + dist.get_rank())
optimizer = Optim(model.embed_dim, args.lr, args.warmup_steps, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.998), eps=1e-9))
if args.start_from is not None:
optimizer.load_state_dict(ckpt['optimizer'])
#train_data = DataLoader(vocab, args.train_data+"0"+str(local_rank), args.batch_size, args.max_len, args.min_len)
train_data = DataLoader(vocab, args.train_data, args.batch_size, args.max_len, args.min_len)
batch_acm = 0
acc_acm, nll_acm, ppl_acm, ntokens_acm, nxs, npairs_acm, loss_acm = 0., 0., 0., 0., 0., 0., 0.
while True:
if train_data.epoch_id > args.epoch:
break
model.train()
for truth, inp, msk in train_data:
batch_acm += 1
truth = truth.cuda(local_rank)
inp = inp.cuda(local_rank)
msk = msk.cuda(local_rank)
model.zero_grad()
res, loss, acc, nll, ppl, ntokens, npairs = model(truth, inp, msk)
loss_acm += loss.item()
acc_acm += acc
nll_acm += nll
ppl_acm += ppl
ntokens_acm += ntokens
npairs_acm += npairs
nxs += npairs
loss.backward()
if args.world_size > 1:
average_gradients(model)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
if (args.world_size==1 or dist.get_rank() ==0) and batch_acm%args.print_every == -1%args.print_every:
print ('batch_acm %d, loss %.3f, acc %.3f, nll %.3f, ppl %.3f, x_acm %d, lr %.6f'\
%(batch_acm, loss_acm/args.print_every, acc_acm/ntokens_acm, \
nll_acm/nxs, ppl_acm/nxs, npairs_acm, optimizer._rate), flush=True)
acc_acm, nll_acm, ppl_acm, ntokens_acm, loss_acm, nxs = 0., 0., 0., 0., 0., 0.
if (args.world_size==1 or dist.get_rank() ==0) and batch_acm%args.save_every == -1%args.save_every:
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
torch.save({'args':args, 'model':model.state_dict(), 'optimizer':optimizer.state_dict()}, '%s/epoch%d_batch_%d'%(args.save_dir, train_data.epoch_id, batch_acm))
model.eval()
eval_epoch(args, model, vocab, local_rank, "epoch-" + str(train_data.epoch_id) + "-acm-" + str(batch_acm), batch_acm)
model.train()
def init_processes(args, local_rank, fn, backend='nccl'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = args.MASTER_ADDR
os.environ['MASTER_PORT'] = args.MASTER_PORT
dist.init_process_group(backend, rank=args.start_rank + local_rank, world_size=args.world_size)
fn(args, local_rank)
if __name__ == "__main__":
mp.set_start_method('spawn')
args = parse_config()
if args.world_size == 1:
run(args, 0)
exit(0)
processes = []
for rank in range(args.gpus):
p = mp.Process(target=init_processes, args=(args, rank, run, args.backend))
p.start()
processes.append(p)
for p in processes:
p.join()
|
HVAC Controller/hvaccontrol_cleanup.py | Silenoz93/RaspberryPiThermostat | 160 | 12751193 | <filename>HVAC Controller/hvaccontrol_cleanup.py
#!/usr/bin/python
########################################################################################################################
#
# RasPi Smart HVAC Cleanup script
# Written by <NAME> - http://willseph.com/
#
########################################################################################################################
# Imports ##############################################################################################################
########################################################################################################################
import os; # OS functions
import RPi.GPIO as gpio; # GPIO library
import json; # JSON library
import time; # Time functions
import atexit; # Cleanup function registrar
# Constants ############################################################################################################
########################################################################################################################
DIR = '/home/pi/thermostat';
SETTINGS_FILE = DIR+'/settings.json';
STATUS_FILE = DIR+'/status.json';
EXCEPTIONS_LOG_FILE = DIR+'/hvaccontrol_exceptions.csv';
# Relay boolean constants. Flipped for Sainsmart relay module.
RELAY_ON = False;
RELAY_OFF = (not RELAY_ON);
TOGGLE_DELAY = 1;
IN_THE_BLIND_TIME = (60 * 10);
COMPRESSOR_RECOVERY_TIME = 60;
COMPRESSOR_STICK_TIME = 60;
FAN_RECOVERY_TIME = 2;
# Relay pins
BOARD_MODE = gpio.BCM; # The GPIO board mode setting
PIN_FAN = 17; # Pin for activating the fan.
PIN_COMPRESSOR = 27; # Pin for compressor unit.
PIN_COOL = 22; # Pin for "blow direction". On = Cool, Off = Heat.
VER = '0.1';
VERBOSE = True;
# Variables ############################################################################################################
########################################################################################################################
settings = None;
fanOn = False;
heatingOn = False;
coolingOn = False;
lastCompressorDisableTime = 0;
lastCompressorEnableTime = 0;
lastFanDisableTime = 0;
lastSettingsUpdate = 0;
# Functions ############################################################################################################
########################################################################################################################
def goodbye():
cleanup(); # Cleaning up
writeVerbose('Goodbye!');
def cleanup():
writeVerbose('Running cleanup script...');
setCooling(False, True);
setHeating(False, True);
setFan(False, True); # Turning everything off
# gpio.cleanup(); # Cleans up GPIO settings
writeVerbose('Cleanup complete.', True);
def delay():
global TOGGLE_DELAY;
time.sleep(TOGGLE_DELAY);
def setCooling(toggle, force=False):
global RELAY_ON;
global RELAY_OFF;
global PIN_FAN;
global PIN_COMPRESSOR;
global PIN_COOL;
global fanOn;
global coolingOn;
global heatingOn;
global lastCompressorDisableTime;
global lastCompressorEnableTime;
global COMPRESSOR_RECOVERY_TIME;
if((not force) and (toggle == coolingOn)):
writeVerbose('*** Cooling unchanged ('+('on' if coolingOn else 'off')+').',True);
return;
if(toggle):
# Cannot enable A/C if fan is off
if(not (fanOn is True)):
writeVerbose('*** Cannot enable cooling if fan is disabled!',True);
return;
# Cannot enable A/C if heating is on
if(heatingOn):
writeVerbose('*** Cannot enable cooling if heating is on. Must disable heating first!',True);
return;
if(int(time.time()) < (lastCompressorDisableTime + COMPRESSOR_RECOVERY_TIME)):
writeVerbose('*** Cannot enable cooling, compressor in recovery.',True);
return;
writeVerbose('Enabling cooling...');
gpio.output(PIN_COMPRESSOR, RELAY_ON);
gpio.output(PIN_COOL, RELAY_ON);
coolingOn = True;
lastCompressorEnableTime = int(time.time());
writeVerbose('Cooling enabled.', True);
else:
writeVerbose('Disabling cooling...');
gpio.output(PIN_COOL, RELAY_OFF);
gpio.output(PIN_COMPRESSOR, RELAY_OFF);
if(coolingOn):
lastCompressorDisableTime = int(time.time());
coolingOn = False;
writeVerbose('Cooling disabled.', True);
delay();
def setFan(toggle, force=False):
global RELAY_ON;
global RELAY_OFF;
global PIN_FAN;
global PIN_COMPRESSOR;
global PIN_COOL;
global fanOn;
global coolingOn;
global heatingOn;
global lastFanDisableTime;
global FAN_RECOVERY_TIME;
if((not force) and (toggle == fanOn)):
writeVerbose('*** Fan unchanged ('+('on' if fanOn else 'off')+').',True);
return;
if(toggle):
if(int(time.time()) < (lastFanDisableTime + FAN_RECOVERY_TIME)):
writeVerbose('*** Cannot enable fan, fan in recovery.',True);
return;
writeVerbose('Enabling fan...');
gpio.output(PIN_FAN, RELAY_ON);
fanOn = True;
writeVerbose('Fan enabled.', True);
else:
if(coolingOn):
setCooling(False);
if(heatingOn):
setHeating(False);
writeVerbose('Disabling fan...');
gpio.output(PIN_FAN, RELAY_OFF);
if(fanOn):
lastFanDisableTime = int(time.time());
fanOn = False;
writeVerbose('Fan disabled.', True);
delay();
def setHeating(toggle, force=False):
global RELAY_ON;
global RELAY_OFF;
global PIN_FAN;
global PIN_COMPRESSOR;
global PIN_COOL;
global fanOn;
global coolingOn;
global heatingOn;
global lastCompressorEnableTime;
global lastCompressorDisableTime;
global COMPRESSOR_RECOVERY_TIME;
if((not force) and (toggle == heatingOn)):
writeVerbose('*** Heating unchanged ('+('on' if heatingOn else 'off')+').',True);
return;
if(toggle):
# Cannot enable heating if fan is off
if(not (fanOn is True)):
writeVerbose('*** Cannot enable heating if fan is disabled!',True);
return;
# Cannot enable heating if A/C is on
if(coolingOn):
writeVerbose('*** Cannot enable heating if cooling is on. Must disable cooling first!',True);
return;
if(int(time.time()) < (lastCompressorDisableTime + COMPRESSOR_RECOVERY_TIME)):
writeVerbose('*** Cannot enable heating, compressor in recovery.',True);
return;
writeVerbose('Enabling heating...');
gpio.output(PIN_COMPRESSOR, RELAY_ON);
gpio.output(PIN_COOL, RELAY_OFF);
lastCompressorEnableTime = int(time.time());
heatingOn = True;
writeVerbose('Heating enabled.', True);
else:
writeVerbose('Disabling heating...');
gpio.output(PIN_COMPRESSOR, RELAY_OFF);
if(heatingOn):
lastCompressorDisableTime = int(time.time());
heatingOn = False;
writeVerbose('Heating disabled.', True);
delay();
def setupGPIO():
global BOARD_MODE;
global PIN_FAN;
global PIN_COMPRESSOR;
global PIN_COOL;
writeVerbose('Setting up GPIO...');
gpio.setwarnings(False);
# Setting board mode.
gpio.setmode(BOARD_MODE);
# Setting up output pins
gpio.setup(PIN_FAN, gpio.OUT);
gpio.setup(PIN_COMPRESSOR, gpio.OUT);
gpio.setup(PIN_COOL, gpio.OUT);
setFan(False, True); # Setting all relays off.
setCooling(False, True);
setHeating(False, True);
writeVerbose('GPIO setup complete.',True);
def writeVerbose(s, newLine=False):
global VERBOSE;
if(VERBOSE):
print(s);
if(newLine is True):
print('');
# Main #################################################################################################################
########################################################################################################################
#os.system('clear'); # Clears the terminal
setupGPIO(); # Setting up GPIO
goodbye();
|
uberduck_ml_dev/monitoring/statistics.py | justinjohn0306/uberduck-ml-dev | 167 | 12751201 | <reponame>justinjohn0306/uberduck-ml-dev
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/monitoring.statistics.ipynb (unless otherwise specified).
__all__ = ['get_alignment_metrics']
# Cell
import torch
from ..utils.utils import get_mask_from_lengths
def get_alignment_metrics(
alignments, average_across_batch=True, input_lengths=None, output_lengths=None
):
alignments = alignments.transpose(1, 2) # [B, dec, enc] -> [B, enc, dec]
if input_lengths == None:
input_lengths = torch.ones(alignments.size(0), device=alignments.device) * (
alignments.shape[1] - 1
) # [B] # 147
if output_lengths == None:
output_lengths = torch.ones(alignments.size(0), device=alignments.device) * (
alignments.shape[2] - 1
) # [B] # 767
batch_size = alignments.size(0)
optimums = torch.sqrt(
input_lengths.double().pow(2) + output_lengths.double().pow(2)
).view(batch_size)
# [B, enc, dec] -> [B, dec], [B, dec]
values, cur_idxs = torch.max(alignments, 1)
cur_idxs = cur_idxs.float()
prev_indx = torch.cat((cur_idxs[:, 0][:, None], cur_idxs[:, :-1]), dim=1)
dist = ((prev_indx - cur_idxs).pow(2) + 1).pow(0.5) # [B, dec]
dist.masked_fill_(
~get_mask_from_lengths(output_lengths, max_len=dist.size(1)), 0.0
) # set dist of padded to zero
dist = dist.sum(dim=(1)) # get total dist for each B
diagonalness = (dist + 1.4142135) / optimums # dist / optimal dist
maxes = alignments.max(axis=1)[0].mean(axis=1)
if average_across_batch:
diagonalness = diagonalness.mean()
maxes = maxes.mean()
output = {}
output["diagonalness"] = diagonalness
output["max"] = maxes
return output |
tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py | tianyapiaozi/tensorflow | 522 | 12751214 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import confusion_matrix as cm
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Deprecated. Use tf.confusion_matrix instead."""
return cm.confusion_matrix(labels=labels, predictions=predictions,
num_classes=num_classes, dtype=dtype, name=name,
weights=weights)
|
object-detection/centernet/src/lib/models/networks/model_dlav0.py | AaratiAkkapeddi/nnabla-examples | 228 | 12751235 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DLA primitives and full network models.
"""
import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
from nnabla.initializer import UniformInitializer, ConstantInitializer, NormalInitializer, calc_normal_std_he_forward, calc_normal_std_he_backward
from nnabla.logger import logger
from nnabla.utils.save import save
from nnabla.utils.nnp_graph import NnpNetworkPass
from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer
RNG = np.random.RandomState(214)
def pf_depthwise_deconvolution(x, kernel, stride=(1, 1), pad=(1, 1), dilation=(2, 2), with_bias=False, w_init=None, b_init=None, channel_last=False):
out_map = x.shape[3] if channel_last else x.shape[1]
if channel_last:
w_init = np.transpose(w_init, (0, 2, 3, 1))
x = PF.deconvolution(
x,
out_map,
kernel,
pad=pad,
stride=stride,
dilation=dilation,
w_init=w_init,
with_bias=with_bias,
b_init=b_init,
group=out_map,
channel_last=channel_last
)
return x
def pf_affine(r, num_classes=1000, channel_last=False):
r = PF.convolution(r, num_classes, (1, 1), channel_last=channel_last,
w_init=NormalInitializer(sigma=0.01, rng=RNG), name='fc')
return F.reshape(r, (r.shape[0], -1), inplace=False)
def pf_convolution(x, ochannels, kernel, pad=None, stride=(1, 1), dilation=None, with_bias=False, w_init=None, b_init=None, channel_last=False):
return PF.convolution(x, ochannels, kernel, stride=stride, pad=pad, dilation=dilation,
with_bias=with_bias, w_init=w_init, b_init=b_init, channel_last=channel_last)
def shortcut(x, ochannels, stride, shortcut_type, test, channel_last=False):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
use_conv = shortcut_type.lower() == 'c'
if ichannels != ochannels:
assert (ichannels * 2 == ochannels) or (ichannels * 4 == ochannels)
if shortcut_type.lower() == 'b':
use_conv = True
if use_conv:
# Convolution does everything.
# Matching channels, striding.
with nn.parameter_scope("shortcut_conv"):
x = PF.convolution(x, ochannels, (1, 1),
stride=(stride, stride), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
else:
# shortcut block is slightly different for dla
if stride != 1:
# Stride
x = F.max_pooling(
x, kernel=(
stride, stride), stride=(
stride, stride), channel_last=channel_last)
if ichannels != ochannels:
x = PF.convolution(
x, ochannels, (1, 1), stride=(
1, 1), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
return x
def basicblock(x, residual, ochannels, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
if residual is None:
residual = x
with nn.parameter_scope("basicblock1"):
h = F.relu(bn(PF.convolution(x, ochannels, (3, 3), stride=(
stride, stride), pad=(1, 1), with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("basicblock2"):
h = bn(
PF.convolution(
h, ochannels, (3, 3), pad=(
1, 1), with_bias=False, channel_last=channel_last))
return F.relu(F.add2(h, residual))
def bottleneck(x, ochannels, shortcut_type, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
assert ochannels % 4 == 0
hchannels = ochannels / 4
with nn.parameter_scope("bottleneck1"):
h = F.relu(
bn(PF.convolution(x, hchannels, (1, 1),
with_bias=False, channel_last=channel_last))
)
with nn.parameter_scope("bottleneck2"):
h = F.relu(
bn(PF.convolution(h, hchannels, (3, 3), pad=(1, 1),
stride=stride, with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("bottleneck3"):
h = bn(PF.convolution(h, ochannels, (1, 1),
with_bias=False, channel_last=channel_last))
with nn.parameter_scope("bottleneck_s"):
s = shortcut(x, ochannels, stride, shortcut_type, test, channel_last)
return F.relu(F.add2(h, s))
def layer(x, block, ochannels, count, stride, shortcut_type, test, channel_last=False):
for i in range(count):
with nn.parameter_scope("layer{}".format(i + 1)):
x = block(x, ochannels, stride if i ==
0 else (1, 1), shortcut_type, test, channel_last=channel_last)
return x
def _make_conv_level(x, ochannels, convs, test, stride=1, dilation=1, channel_last=False):
axes = [3 if channel_last else 1]
for i in range(convs):
with nn.parameter_scope("conv{}".format(i + 1)):
s = (stride, stride) if i == 0 else (1, 1)
x = pf_convolution(
x, ochannels, (3, 3), stride=s,
pad=(dilation, dilation),
dilation=(dilation, dilation),
with_bias=False,
channel_last=channel_last)
x = F.relu(PF.batch_normalization(
x, axes=axes, batch_stat=not test))
return x
def root(x, children, ochannels, test, concat_axis=1, kernel_size=1, channel_last=False):
axes = 3 if channel_last else 1
with nn.parameter_scope("root"):
rng = np.random.RandomState(313)
x = F.concatenate(x, *children, axis=axes)
x = pf_convolution(
x, ochannels, (kernel_size, kernel_size), pad=((kernel_size-1)//2, (kernel_size-1)//2), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
x = F.relu(x)
return x
def upsample(x, ochannels, test, kernel_size=4, channel_last=False):
rng = np.random.RandomState(313)
axes = 3 if channel_last else 1
with nn.parameter_scope("up"):
x = pf_convolution(
x, ochannels, (1, 1), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = F.relu(
PF.batch_normalization(
x,
axes=[axes],
batch_stat=not test)
)
ichannels = x.shape[axes]
x = pf_depthwise_deconvolution(
x,
(kernel_size, kernel_size),
pad=(1, 1),
stride=(2, 2),
dilation=(1, 1),
with_bias=False,
w_init=bilinear_depthwise_initializer(ichannels, kernel_size),
channel_last=channel_last
)
return x
def _make_tree_level1(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False
):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
bottom = F.max_pooling(
x,
kernel=(stride, stride),
stride=(stride, stride),
channel_last=channel_last
) if stride > 1 else x
if ichannels != ochannels:
residual = pf_convolution(
bottom, ochannels, (1, 1), stride=(1, 1), pad=None, with_bias=False, channel_last=channel_last)
residual = PF.batch_normalization(
residual, axes=[axes], batch_stat=not test)
else:
residual = bottom
with nn.parameter_scope('block1'):
b1 = block(x, residual, ochannels, stride,
test, channel_last=channel_last)
with nn.parameter_scope('block2'):
b2 = block(b1, b1, ochannels, 1, test, channel_last=channel_last)
_children = [bottom, b2] if level_root else [b2]
if children:
_children += children
x = root(b1, _children, ochannels, test,
kernel_size=1, channel_last=channel_last)
return x, bottom
def _make_tree_level2(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False):
with nn.parameter_scope('node1'):
ag1, bottom1 = _make_tree_level1(
x, None, block, ochannels, level, test, False, stride, channel_last=channel_last)
with nn.parameter_scope('node2'):
x, _ = _make_tree_level1(
ag1, [bottom1], block, ochannels, level, test, level_root, 1, channel_last=channel_last)
return x
def dla_imagenet(
x,
num_classes,
num_layers,
test,
residual_root=False,
tiny=False,
channel_last=False):
"""
Args:
x : Variable
num_classes : Number of classes of outputs
num_layers : Number of layers of DLA chosen from (34).
test : Construct net for testing.
tiny (bool): Tiny imagenet mode. Input image must be (3, 56, 56).
"""
layers = {
# 18: ((2, 2, 2, 2), basicblock, 1),
34: ((1, 1, 1, 2, 2, 1), (False, False, False, True, True, True), basicblock)
# 50: ((3, 4, 6, 3), bottleneck, 4),
# 101: ((3, 4, 23, 3), bottleneck, 4),
# 152: ((3, 8, 36, 3), bottleneck, 4)
}
ochannels = [16, 32, 64, 128, 256, 512]
levels, levels_root, block = layers[num_layers]
strides = [1, 2, 2, 2, 2, 2]
logger.debug(x.shape)
axes = 3 if channel_last else 1
with nn.parameter_scope("conv1"):
stride = (1, 1)
r = pf_convolution(x, 16, (7, 7),
pad=(3, 3), stride=stride, with_bias=False, channel_last=channel_last)
r = F.relu(PF.batch_normalization(
r, axes=[axes], batch_stat=not test))
hidden = {}
hidden['conv0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level0"):
r = _make_conv_level(
r,
ochannels[0],
levels[0],
test=test,
stride=strides[0],
channel_last=channel_last)
hidden['level0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level1"):
r = _make_conv_level(
r,
ochannels[1],
levels[1],
test=test,
stride=strides[1],
channel_last=channel_last)
hidden['level1'] = r
logger.debug(r.shape)
with nn.parameter_scope("level2"):
r, _ = _make_tree_level1(
r, None, block, ochannels[2], levels[2], test, levels_root[2], stride=strides[2], channel_last=channel_last)
hidden['level2'] = r
logger.debug(r.shape)
with nn.parameter_scope("level3"):
r = _make_tree_level2(
r,
None,
block,
ochannels[3],
levels[3],
test,
levels_root[3],
stride=strides[3],
channel_last=channel_last)
hidden['level3'] = r
logger.debug(r.shape)
with nn.parameter_scope("level4"):
r = _make_tree_level2(
r,
None,
block,
ochannels[4],
levels[4],
test,
levels_root[4],
stride=strides[4],
channel_last=channel_last)
hidden['level4'] = r
logger.debug(r.shape)
with nn.parameter_scope("level5"):
r, _ = _make_tree_level1(
r, None, block, ochannels[5], levels[5], test, levels_root[5], stride=strides[5], channel_last=channel_last)
hidden['level5'] = r
logger.debug(r.shape)
pool_shape = r.shape[-2:]
if channel_last:
pool_shape = r.shape[1:3]
r = F.average_pooling(r, pool_shape, channel_last=channel_last)
with nn.parameter_scope("fc"):
r = pf_affine(r, num_classes, channel_last=channel_last)
logger.debug(r.shape)
return r, hidden
# Upsampling portion of DLA
def DLAUp(x, test, residual_root=False, channel_last=False):
r, hidden = dla_imagenet(
x, num_classes=1000, num_layers=34, test=test, channel_last=channel_last)
callback = NnpNetworkPass(True)
callback.remove_and_rewire('fc')
ochannels = [256, 128, 64, 32]
with nn.parameter_scope("up16"):
x = upsample(hidden['level5'], ochannels[0], test,
kernel_size=4, channel_last=channel_last)
hidden['up16'] = x
with nn.parameter_scope("up8"):
x = root(x, [hidden['level4']], ochannels[0], test,
kernel_size=3, channel_last=channel_last)
x = upsample(x, ochannels[1], test,
kernel_size=4, channel_last=channel_last)
hidden['up8'] = x
with nn.parameter_scope("up4"):
with nn.parameter_scope("residual_level3"):
level4up = upsample(
hidden['level4'], ochannels[1], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_root"):
level3up = root(
level4up, [hidden['level3']], ochannels[1], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level3up], ochannels[1], test,
kernel_size=1, channel_last=channel_last)
x = upsample(x, ochannels[2], test,
kernel_size=4, channel_last=channel_last)
hidden['up4'] = x
with nn.parameter_scope("up2_b"):
level3up_b = upsample(
level3up, ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("up2_c"):
level3up_c = upsample(
hidden['level3'], ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_c_root"):
level3up_c = root(hidden['level2'], [
level3up_c], ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("level2up_root"):
level2up = root(level3up_b, [level3up_c],
ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level2up], ochannels[2], test,
kernel_size=3, channel_last=channel_last)
return x
|
src/filter_and_group.py | LaudateCorpus1/GCGC | 395 | 12751240 | <filename>src/filter_and_group.py<gh_stars>100-1000
# filter_and_group.py
#
# Given a set of filters, and groupings, take data from pandas gc event dataframes,
# and return a modified subset of the data that follows the passed filters/groups.
# Note: It is possible to export the following function to hold a state variable of the
# modified subset: apply_filter
#
# The functions defined are used within in the plotting functions, and are not called in the GCGC notebook
#
import matplotlib
# filter_and_group
#
# Given a list of datasets, use provided parameters to group and split
# the data into useable pandas series.
#
def filter_and_group(
datasets, # list of gc_event_dataframes
group_by=None, # creates a group for each unique value in a column specified
filter_by=None, # a function to be applied to the row of the table. Should return a boolean
labels=None, # a list of strings to describe the datasets passed in
column="Duration_milliseconds", # the column name that we are analyzing from our dataset
colors=None, # a list colors. If none are provided, determinsitic colors returned for dataset
column_timing = None # Overrides the timing column to collect, if provided. All values in the column must be ints/floats
):
# Apply the filters, if any
if filter_by:
datasets = apply_filter(datasets, filter_by)
# Create the labels if non provided
if not labels:
labels = [str(num + 1) for num in range(len(datasets))]
if not column_timing:
column_timing = "TimeFromStart_seconds"
# Group into lists of X/Y associated data with labels.
timestamp_groups = [] # For time of event
datapoint_groups = [] # For data in 'column'
group_labels = [] # Label to descrbe the group.
if group_by:
timestamp_groups, datapoint_groups, group_labels = arrange_into_groups(datasets, group_by, column, column_timing, labels)
else:
timestamp_groups, datapoint_groups, group_labels = arrange_no_groups(datasets, column, column_timing, labels)
# Add the colors.
if not colors:
colors, alphas = get_colors_and_alphas(len(group_labels))
else:
alphas = [1 for i in range(len(colors))]
if column_timing == "DateTime":
# find the minimum time in any timestamp_group, and subtract it from all recorded values.
timestamp_groups = __remove_datetime_scaling(timestamp_groups)
return timestamp_groups, datapoint_groups, group_labels, colors, alphas
import pandas as pd
# apply_filter
#
# For each dataset, apply each filter. Create a copy of the data to be fitered,
# so the original data is not modified or lost. Return the list of copied & filetered datasets
#
def apply_filter(datasets, filter_by=None):
dfs = []
if filter_by:
# create a copy, to be modified
for df in datasets:
dfs.append(df.copy())
# the reason to use index is to update the actual value
for i in range(len(dfs)):
# Apply functions return a boolean. Only retain rows that evaluate to True
dfs[i] = dfs[i][dfs[i].apply(filter_by, axis=1)]
else:
# Return the same data if no filters needed
dfs = datasets
return dfs
# get_colors_and_alphas
#
# Given then number of colors, returns that many colors from a preset
# sequence of repeating colors, begging at the start of the sequence
#
def get_colors_and_alphas(number_of_colors):
preset_colors = [(230/255, 25/255, 75/255),
(60/255, 180/255, 75/255),
(215/255, 215/255, 25/255),
(0/255, 130/255, 200/255),
(245/255, 130/255, 48/255),
(145/255, 30/255, 180/255),
(70/255, 240/255, 240/255),
(240/255, 50/255, 230/255),
(210/255, 245/255, 60/255),
(250/255, 190/255, 212/255),
(0/255, 128/255, 128/255),
(220/255, 190/255, 255/255),
(170/255, 110/255, 40/255),
(255/255, 250/255, 200/255),
(128/255, 0/255, 0/255),
(170/255, 255/255, 195/255),
(128/255, 128/255, 0/255),
(255/255, 215/255, 180/255),
(0/255, 0/255, 128/255),
(128/255, 128/255, 128/255),
(0, 0, 0)] # https://sashamaps.net/docs/resources/20-colors/
colors = []
alphas = []
while number_of_colors > len(preset_colors): # In the case where we need MANY colors, copy the cycle and add more.
preset_colors = preset_colors + preset_colors
for idx in range(number_of_colors):
colors.append(preset_colors[idx])
alphas.append(1)
return colors, alphas
# arrange_into_groups
#
# Given a grouping pattern, and set of filtered datasets, creates a list of
# X and Y datalists for each group found in the passed dataset.
#
def arrange_into_groups(datasets, group_by, column, column_timing, labels):
timestamp_groups = []
datapoint_groups = []
group_labels = []
for idx, df in enumerate(datasets): # Loop through all provided log datasets
if not df.empty:
if group_by not in df:
print("Warning: group_by group " + str(group_by) + " column not in dataset with columns " + str(df.columns))
elif column not in df:
print("Warning: column \"" + str(column) + "\" not in dataset with columns " + str(df.columns))
elif column_timing not in df:
print("Warning: column_timing \"" + str(column_timing) + "\" not in dataset with columns " + str(df.columns))
else:
# A non-empty df contains both X and Y columns.
groups = {} # Create a dictionary to hold unique groups
if column_timing == "DateTime":
print("Case number 1")
timing = pd.Series(matplotlib.dates.date2num(df[column_timing]))
else:
timing = df[column_timing]
for group, time, datapoint in zip(df[group_by], timing, df[column]):
if not group:
group = "( " + str(group_by) + " = None )" # None groups should all be put together
if group not in groups:
# Create a new group for each unique item
groups[group] = [[], [], str(labels[idx]) + ": " + str(group)]
# add the datapoints and time, based on the grouping
groups[group][0].append(time)
groups[group][1].append(datapoint)
# Sort keys so groups print in the same order between files
keys = list(groups.keys())
keys.sort()
for key in keys:
timestamp_groups.append(pd.Series(groups[key][0]))
datapoint_groups.append(pd.Series(groups[key][1]))
group_labels.append(groups[key][2])
return timestamp_groups, datapoint_groups, group_labels
# arrange_no_groups
#
# Given no grouping pattern, take data from datasets, place them
# into of X and Y datasets.
#
def arrange_no_groups(datasets, column, column_timing, labels):
timestamp_groups = []
datapoint_groups = []
group_labels = [] # Included in case no data is extracted from a df
for idx, df in enumerate(datasets):
# Make sure both the columns are present, and rows are present
if not df.empty and column_timing in df and column in df:
if column_timing == "DateTime":
timestamp_groups.append(pd.Series(matplotlib.dates.date2num(df[column_timing])))
else:
timestamp_groups.append(df[column_timing])
datapoint_groups.append(df[column])
group_labels.append(labels[idx])
return timestamp_groups, datapoint_groups, group_labels
# Given that column_timing is set to "DateTime" characters,
# shift them into TimePassed in Seconds scaling.
def __remove_datetime_scaling(timestamp_groups):
# Each list of timestamp groups contains numbers in matplotlib datetime formats.
# First, determine the minimum time present
# Then, subtract from all that minimum time to make the values begin from zero.
# Then, scale the times into seconds (x84600). Scaling explained here: https://matplotlib.org/stable/api/dates_api.html
min_time = timestamp_groups[0].min()
print(len(timestamp_groups))
for index in range(1, len(timestamp_groups)):
min_time = min(min_time, timestamp_groups[index].min())
print(min_time)
new_times = []
for timestamp_list in timestamp_groups:
timestamp_list = [(time - min_time) * 86400 for time in timestamp_list] # scaling
new_times.append(timestamp_list)
# Note the datatype returned. Series rather than list
return pd.Series(new_times) |
quantmod/datetools.py | EfimovIN/dash-technical-charting | 129 | 12751246 | """Date and time functions
Refactored from Cufflinks' 'date_tools.py' module.
Credits to @jorgesantos.
"""
import datetime as dt
def get_date_from_today(delta, strfmt='%Y%m%d'):
""" Returns a string that represents a date n numbers of days from today.
Parameters
----------
delta : int
number of days
strfmt : string
format in which the date will be represented
"""
return (dt.date.today() + dt.timedelta(delta)).strftime(strfmt)
def string_to_date(string_date, strfmt='%Y%m%d'):
""" Converts a string format date into datetime.
Parameters
----------
string_date : string
date in string format
strfmt : string
format in which the input date is represented
"""
return dt.datetime.strptime(string_date, strfmt).date()
def int_to_date(int_date):
""" Converts an int format date into datetime.
Parameters
----------
int_date : int
date in int format
Example
-------
int_date(20151023)
"""
return string_to_date(str(int_date))
def date_to_int(date, strfmt='%Y%m%d'):
""" Converts a datetime date into int.
Parameters
----------
date : datetime
date in datetime format
strfmt : string
format in which the int date will be generated
Example
-------
date_to_int(dt.date(2015,10,23),'%Y')
"""
return int(date.strftime(strfmt))
|
mods/TowerofHanoi/client.py | thermalpilot/opennero | 215 | 12751261 | from OpenNero import *
from random import seed, randint
# add the key and mouse bindings
from inputConfig import createInputMapping, switchToHub
from common import *
import common.gui as gui
from TowerofHanoi.module import getMod, delMod
from TowerofHanoi.constants import *
class UI:
pass
def CreateGui(guiMan, mode):
print 'CreateGui'
guiMan.setTransparency(1.0)
guiMan.setFont("data/gui/fonthaettenschweiler.bmp")
ui = UI() # a collection of all the UI elements
window_width = 250 # width
control_height = 30 # height
# AGENT SELECTION BOX
x, y = 5, 1 * control_height + 5
w, h = window_width - 15, control_height - 10
ui.agentBoxLabel = gui.create_text(guiMan, 'agentLabel', Pos2i(x,y), Pos2i(3*w/10,h), 'Agent Type:')
ui.agentComboBox = gui.create_combo_box(guiMan, "agentComboBox", Pos2i(x + 5 + 3*w/10, y), Pos2i(7*w/10, h))
ui.active_agents = []
for agent_name, agent_function, agent_mode in getMod().AGENTS:
if mode == agent_mode:
ui.active_agents.append((agent_name, agent_function, agent_mode))
ui.agentComboBox.addItem(agent_name)
# START/RESET AND PAUSE/CONTINUE AGENT BUTTONS AND HELP BUTTON
x, y = 5, 0 * control_height + 5
w, h = (window_width - 25) / 4, control_height - 5
ui.startAgentButton = gui.create_button(guiMan, 'startAgentButton', Pos2i(x, y), Pos2i(w, h), '')
ui.pauseAgentButton = gui.create_button(guiMan, 'pauseAgentButton', Pos2i(x + w + 5, y), Pos2i(w, h), '')
ui.helpButton = gui.create_button(guiMan, 'helpButton', Pos2i(x + 2*w + 10, y), Pos2i(w, h), '')
ui.exitButton = gui.create_button(guiMan, 'exitButton', Pos2i(x + 3*w + 15, y), Pos2i(w, h), '')
ui.startAgentButton.text = 'Start'
ui.pauseAgentButton.text = 'Pause'
ui.helpButton.text = 'Help'
ui.exitButton.text = 'Exit'
ui.pauseAgentButton.enabled = False
ui.startAgentButton.OnMouseLeftClick = startAgent(ui)
ui.pauseAgentButton.OnMouseLeftClick = pauseAgent(ui)
ui.helpButton.OnMouseLeftClick = openWiki('BlocksWorldMod')
ui.exitButton.OnMouseLeftClick = lambda: switchToHub()
# SPEEDUP SLIDER
x, y = 5, 2 * control_height + 5
w, h = window_width - 20, control_height - 5
ui.speedupLabel = gui.create_text(guiMan, 'speedupLabel', Pos2i(x, y), Pos2i(3*w/10, h), 'Speedup:')
ui.speedupScroll = gui.create_scroll_bar(guiMan, 'speedupScroll', Pos2i(x + 5 + 3*w/10, y), Pos2i(3*w/5, h-5), True)
ui.speedupValue = gui.create_text(guiMan, 'speedupEditBox', Pos2i(x + 10 + 9*w/10, y), Pos2i(w/10, h), str(0))
ui.speedupScroll.setMax(100)
ui.speedupScroll.setLargeStep(10)
ui.speedupScroll.setSmallStep(1)
ui.speedupScroll.setPos(0)
getMod().set_speedup(0)
ui.speedupScroll.OnScrollBarChange = speedup_adjusted(ui)
# THE WINDOW THAT HOLDS ALL THE CONTROLS ABOVE
ui.agentWindow = gui.create_window(guiMan, 'agentWindow', Pos2i(10, 10), Pos2i(window_width, 3*control_height+25), 'Agent')
ui.agentWindow.addChild(ui.agentBoxLabel)
ui.agentWindow.addChild(ui.agentComboBox)
ui.agentWindow.addChild(ui.startAgentButton)
ui.agentWindow.addChild(ui.pauseAgentButton)
ui.agentWindow.addChild(ui.helpButton)
ui.agentWindow.addChild(ui.exitButton)
ui.agentWindow.addChild(ui.speedupLabel)
ui.agentWindow.addChild(ui.speedupScroll)
ui.agentWindow.addChild(ui.speedupValue)
def speedup_adjusted(ui):
"""generate a closure that will be called whenever the speedup slider is adjusted"""
ui.speedupValue.text = str(ui.speedupScroll.getPos())
getMod().set_speedup(float(ui.speedupScroll.getPos())/100)
def closure():
"""called whenever the speedup slider is adjusted"""
ui.speedupValue.text = str(ui.speedupScroll.getPos())
getMod().set_speedup(float(ui.speedupScroll.getPos())/100)
return closure
def startAgent(ui):
""" return a function that starts or stops the agent """
def closure():
"""starts or stops the agent"""
if ui.startAgentButton.text == 'Start':
i = ui.agentComboBox.getSelected()
(agent_name, agent_function, agent_mode) = ui.active_agents[i]
print 'Starting', agent_name
agent_function()
ui.pauseAgentButton.text = 'Pause'
ui.pauseAgentButton.enabled = True
ui.startAgentButton.text = 'Reset'
ui.agentComboBox.enabled = False
else:
getMod().stop_agent()
disable_ai()
get_environment().cleanup()
ui.startAgentButton.text = 'Start'
ui.pauseAgentButton.text = 'Pause'
ui.pauseAgentButton.enabled = False
ui.agentComboBox.enabled = True
return closure
def pauseAgent(ui):
""" return a function that pauses and continues the agent """
def closure():
"""pauses and continues the agent"""
if ui.pauseAgentButton.text == 'Continue':
ui.pauseAgentButton.text = 'Pause'
enable_ai()
else:
ui.pauseAgentButton.text = 'Continue'
disable_ai()
return closure
def recenter(cam):
def closure():
cam.setPosition(Vector3f(NUDGE_X, NUDGE_Y, 30))
cam.setTarget(Vector3f(NUDGE_X + GRID_DX * ROWS / 2, NUDGE_Y + GRID_DY * COLS / 2, 5))
return closure
def ClientMain(mode):
# create fog effect
getSimContext().setFog()
# don't show physics
# disable_physics()
# add a camera
camRotateSpeed = 100
camMoveSpeed = 3000
camZoomSpeed = 100
cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
cam.setFarPlane(5000)
cam.setEdgeScroll(False)
recenter_cam = recenter(cam) # create a closure to avoid having a global variable
recenter_cam() # call the recenter function
# load the background
#addObject("data/terrain/Sea.xml", Vector3f(-3000 + NUDGE_X,-3000 + NUDGE_Y,-20))
addObject("data/terrain/FlatTerrain.xml", Vector3f(-500,-500,0), Vector3f(0,0,0)) #Vector3f(-1100 + NUDGE_X, -2400 + NUDGE_Y, -17), Vector3f(0,0,-45))
addSkyBox("data/sky/irrlicht2")
# load the maze
getSimContext().addLightSource(Vector3f(-500,-500,1000), 1500)
getMod().add_maze()
# load the GUI
CreateGui(getGuiManager(), mode)
# create the key binding
ioMap = createInputMapping()
ioMap.BindKey( "KEY_SPACE", "onPress", recenter_cam )
getSimContext().setInputMapping(ioMap)
|
demo/matrix_transpose.py | neerajchhimwal/gradio | 5,481 | 12751337 | <filename>demo/matrix_transpose.py
import gradio as gr
import numpy as np
def transpose(matrix):
return matrix.T
iface = gr.Interface(
transpose,
gr.inputs.Dataframe(type="numpy", datatype="number", row_count=5, col_count=3),
"numpy",
examples=[
[np.zeros((3,3)).tolist()],
[np.ones((2,2)).tolist()],
[np.random.randint(0, 10, (3,10)).tolist()],
[np.random.randint(0, 10, (10,3)).tolist()],
[np.random.randint(0, 10, (10,10)).tolist()],
]
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()
|
mmedit/models/backbones/encoder_decoders/pconv_encoder_decoder.py | Jian137/mmediting-1 | 1,884 | 12751342 | <filename>mmedit/models/backbones/encoder_decoders/pconv_encoder_decoder.py
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import auto_fp16, load_checkpoint
from mmedit.models.builder import build_component
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class PConvEncoderDecoder(nn.Module):
"""Encoder-Decoder with partial conv module.
Args:
encoder (dict): Config of the encoder.
decoder (dict): Config of the decoder.
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
# support fp16
self.fp16_enabled = False
@auto_fp16()
def forward(self, x, mask_in):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
mask_in (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
enc_outputs = self.encoder(x, mask_in)
x, final_mask = self.decoder(enc_outputs)
return x, final_mask
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# Here, we just use the default initialization in `ConvModule`.
pass
else:
raise TypeError('pretrained must be a str or None')
|
src/networks/gnn_layer.py | ccoay/ec-extraction | 488 | 12751346 | <reponame>ccoay/ec-extraction<filename>src/networks/gnn_layer.py
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from config import DEVICE
class GraphAttentionLayer(nn.Module):
"""
reference: https://github.com/xptree/DeepInf
"""
def __init__(self, att_head, in_dim, out_dim, dp_gnn, leaky_alpha=0.2):
super(GraphAttentionLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.dp_gnn = dp_gnn
self.att_head = att_head
self.W = nn.Parameter(torch.Tensor(self.att_head, self.in_dim, self.out_dim))
self.b = nn.Parameter(torch.Tensor(self.out_dim))
self.w_src = nn.Parameter(torch.Tensor(self.att_head, self.out_dim, 1))
self.w_dst = nn.Parameter(torch.Tensor(self.att_head, self.out_dim, 1))
self.leaky_alpha = leaky_alpha
self.init_gnn_param()
assert self.in_dim == self.out_dim*self.att_head
self.H = nn.Linear(self.in_dim, self.in_dim)
init.xavier_normal_(self.H.weight)
def init_gnn_param(self):
init.xavier_uniform_(self.W.data)
init.zeros_(self.b.data)
init.xavier_uniform_(self.w_src.data)
init.xavier_uniform_(self.w_dst.data)
def forward(self, feat_in, adj=None):
batch, N, in_dim = feat_in.size()
assert in_dim == self.in_dim
feat_in_ = feat_in.unsqueeze(1)
h = torch.matmul(feat_in_, self.W)
attn_src = torch.matmul(F.tanh(h), self.w_src)
attn_dst = torch.matmul(F.tanh(h), self.w_dst)
attn = attn_src.expand(-1, -1, -1, N) + attn_dst.expand(-1, -1, -1, N).permute(0, 1, 3, 2)
attn = F.leaky_relu(attn, self.leaky_alpha, inplace=True)
adj = torch.FloatTensor(adj).to(DEVICE)
mask = 1 - adj.unsqueeze(1)
attn.data.masked_fill_(mask.byte(), -999)
attn = F.softmax(attn, dim=-1)
feat_out = torch.matmul(attn, h) + self.b
feat_out = feat_out.transpose(1, 2).contiguous().view(batch, N, -1)
feat_out = F.elu(feat_out)
gate = F.sigmoid(self.H(feat_in))
feat_out = gate * feat_out + (1 - gate) * feat_in
feat_out = F.dropout(feat_out, self.dp_gnn, training=self.training)
return feat_out
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_dim) + ' -> ' + str(self.out_dim*self.att_head) + ')'
|
modules/text/lexical_analysis/jieba_paddle/module.py | chunzhang-hub/PaddleHub | 8,360 | 12751363 | <filename>modules/text/lexical_analysis/jieba_paddle/module.py
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
@moduleinfo(
name="jieba_paddle",
version="1.0.0",
summary=
"jieba_paddle is a chineses tokenizer using BiGRU base on the PaddlePaddle deeplearning framework. More information please refer to https://github.com/fxsjy/jieba.",
author="baidu-paddle",
author_email="<EMAIL>",
type="nlp/lexical_analysis")
class JiebaPaddle(hub.Module):
def _initialize(self):
pass
@serving
def cut(self, sentence, use_paddle=True, cut_all=False, HMM=True):
"""
The main function that segments an entire sentence that contains
Chinese characters into separated words.
Args:
sentence(str): The str(unicode) to be segmented.
use_paddle(bool): Whether use jieba paddle model or not. Default as true.
cut_all(bool): Model type. True for full pattern, False for accurate pattern.
HMM(bool): Whether to use the Hidden Markov Model.
Returns:
results(dict): The word segmentation result of the input sentence, whose key is 'word'.
"""
self.check_dependency()
import jieba
jieba.setLogLevel(logging.ERROR)
jieba._compat.setLogLevel(logging.ERROR)
if use_paddle:
jieba.enable_paddle()
res = " ".join(jieba.cut(sentence, use_paddle=True))
seg_list = res.strip(" ").split(" ")
else:
res = " ".join(jieba.cut(sentence, cut_all=cut_all, HMM=HMM))
seg_list = res.strip(" ").split(" ")
return seg_list
def check_dependency(self):
"""
Check jieba tool dependency.
"""
try:
import jieba
except ImportError:
print(
'This module requires jieba tools. The running enviroment does not meet the requirments. Please install jieba packages.'
)
exit()
def cut_for_search(self, sentence, HMM=True):
"""
Finer segmentation for search engines.
Args:
sentence(str): The str(unicode) to be segmented.
HMM(bool): Whether to use the Hidden Markov Model.
Returns:
results(dict): The word segmentation result of the input sentence, whose key is 'word'.
"""
self.check_dependency()
import jieba
jieba.setLogLevel(logging.ERROR)
res = " ".join(jieba.cut_for_search(sentence, HMM=HMM))
seg_list = res.strip(" ").split(" ")
return seg_list
def load_userdict(self, user_dict):
'''
Load personalized dict to improve detect rate.
Args:
user_dict(str): A plain text file path. It contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
'''
self.check_dependency()
import jieba
jieba.setLogLevel(logging.ERROR)
jieba.load_userdict("userdict.txt")
def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):
"""
Extract keywords from sentence using TF-IDF algorithm.
Args:
topK(int): return how many top keywords. `None` for all possible words.
withWeight(bool): if True, return a list of (word, weight);
if False, return a list of words.
allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
withFlag(bool): only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
Returns:
result(list): The key words.
"""
self.check_dependency()
import jieba
import jieba.analyse
jieba.setLogLevel(logging.ERROR)
res = jieba.analyse.extract_tags(
sentence, topK=topK, withWeight=withWeight, allowPOS=allowPOS, withFlag=withFlag)
return res
def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):
"""
Extract keywords from sentence using TextRank algorithm.
Args:
topK(int): return how many top keywords. `None` for all possible words.
withWeight(bool): if True, return a list of (word, weight);
if False, return a list of words.
allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
withFlag(bool): only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
Returns:
result(list): The key words.
"""
self.check_dependency()
import jieba
jieba.setLogLevel(logging.ERROR)
res = jieba.analyse.textrank(sentence, topK=topK, withWeight=withWeight, allowPOS=allowPOS, withFlag=withFlag)
return res
if __name__ == "__main__":
jb_pd = JiebaPaddle()
res = jb_pd.cut(
sentence="我来到北京清华大学",
use_paddle=True,
)
print(res)
res = jb_pd.cut(sentence="我来到北京清华大学", use_paddle=False, cut_all=True)
print(res)
res = jb_pd.cut(sentence="我来到北京清华大学", use_paddle=False, cut_all=False)
print(res)
res = jb_pd.cut_for_search(sentence="我来到北京清华大学")
print(res)
res = jb_pd.extract_tags(sentence="我来到北京清华大学")
print(res)
res = jb_pd.extract_tags(sentence="我来到北京清华大学", withWeight=True)
print(res)
res = jb_pd.textrank(sentence="我来到北京清华大学", withWeight=True)
print(res)
|
tests/es_versions/test_clusters.py | billboggs/elasticsearch-HQ | 2,026 | 12751414 | __author__ = 'royrusso'
import json
import logging
import jmespath
import pytest
LOGGER = logging.getLogger(__name__)
pytest_plugins = ["docker_compose"]
@pytest.mark.es_versions
def test_get_cluster_summary(session_scoped_container_getter, fixture):
fixture.add_all_clusters(session_scoped_container_getter, clear_first=True)
response = fixture.app.get('/api/clusters/%s/_summary' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SUMMARY, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_health(fixture):
response = fixture.app.get('/api/clusters/%s/_health' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_HEALTH, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_state(fixture):
response = fixture.app.get('/api/clusters/%s/_state' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATE, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_stats(fixture):
response = fixture.app.get('/api/clusters/%s/_stats' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_pending_tasks(fixture):
response = fixture.app.get('/api/clusters/%s/_pending_tasks' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_PENDING_TASKS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_get_cluster_settings(fixture):
response = fixture.app.get('/api/clusters/%s/_settings' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SETTINGS, res['data'][0].keys()) is True
@pytest.mark.es_versions
def test_put_cluster_settings(fixture):
body = {
"transient": {
"discovery.zen.minimum_master_nodes": 1
}
}
response = fixture.app.put('/api/clusters/%s/_settings' % fixture.cluster_name, data=json.dumps(body))
assert 200 == response.status_code
response = fixture.app.get('/api/clusters/%s/_settings' % fixture.cluster_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert jmespath.search("transient.discovery.zen.minimum_master_nodes", res['data'][0]) == "1"
|
ExamplesFromChapters/Chapter2/ORingFailure.py | jColeChanged/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | 19,259 | 12751421 | import numpy as np
import pymc as pm
challenger_data = np.genfromtxt(
"../../Chapter2_MorePyMC/data/challenger_data.csv",
skip_header=1, usecols=[1, 2], missing_values="NA", delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
def p(temp=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * temperature + alpha))
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(260000, 220000, 2)
|
tests/mturk/create_hit_external.py | Yurzs/boto | 5,079 | 12751431 | import unittest
import uuid
import datetime
from boto.mturk.question import ExternalQuestion
from _init_environment import SetHostMTurkConnection, external_url, \
config_environment
class Test(unittest.TestCase):
def setUp(self):
config_environment()
def test_create_hit_external(self):
q = ExternalQuestion(external_url=external_url, frame_height=800)
conn = SetHostMTurkConnection()
keywords=['boto', 'test', 'doctest']
create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary',])
assert(create_hit_rs.status == True)
if __name__ == "__main__":
unittest.main()
|
ethical-hacking/file-encryption/crypt.py | caesarcc/python-code-tutorials | 1,059 | 12751459 | from cryptography.fernet import Fernet
import os
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
def load_key():
"""
Loads the key from the current directory named `key.key`
"""
return open("key.key", "rb").read()
def encrypt(filename, key):
"""
Given a filename (str) and key (bytes), it encrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read all file data
file_data = file.read()
# encrypt data
encrypted_data = f.encrypt(file_data)
# write the encrypted file
with open(filename, "wb") as file:
file.write(encrypted_data)
def decrypt(filename, key):
"""
Given a filename (str) and key (bytes), it decrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read the encrypted data
encrypted_data = file.read()
# decrypt data
decrypted_data = f.decrypt(encrypted_data)
# write the original file
with open(filename, "wb") as file:
file.write(decrypted_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Encryptor Script")
parser.add_argument("file", help="File to encrypt/decrypt")
parser.add_argument("-g", "--generate-key", dest="generate_key", action="store_true",
help="Whether to generate a new key or use existing")
parser.add_argument("-e", "--encrypt", action="store_true",
help="Whether to encrypt the file, only -e or -d can be specified.")
parser.add_argument("-d", "--decrypt", action="store_true",
help="Whether to decrypt the file, only -e or -d can be specified.")
args = parser.parse_args()
file = args.file
generate_key = args.generate_key
if generate_key:
write_key()
# load the key
key = load_key()
encrypt_ = args.encrypt
decrypt_ = args.decrypt
if encrypt_ and decrypt_:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
elif encrypt_:
encrypt(file, key)
elif decrypt_:
decrypt(file, key)
else:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
|
tests/recipes/test_python3.py | syrykh/python-for-android | 6,278 | 12751487 | import unittest
from os.path import join
from unittest import mock
from pythonforandroid.recipes.python3 import (
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE,
)
from pythonforandroid.util import BuildInterruptingException
from tests.recipes.recipe_lib_test import RecipeCtx
class TestPython3Recipe(RecipeCtx, unittest.TestCase):
"""
TestCase for recipe :mod:`~pythonforandroid.recipes.python3`
"""
recipe_name = "python3"
def test_property__libpython(self):
self.assertEqual(
self.recipe._libpython,
f'libpython{self.recipe.link_version}.so'
)
@mock.patch('pythonforandroid.recipes.python3.Path.is_file')
def test_should_build(self, mock_is_file):
# in case that python lib exists, we shouldn't trigger the build
self.assertFalse(self.recipe.should_build(self.arch))
# in case that python lib doesn't exist, we should trigger the build
mock_is_file.return_value = False
self.assertTrue(self.recipe.should_build(self.arch))
def test_include_root(self):
expected_include_dir = join(
self.recipe.get_build_dir(self.arch.arch), 'Include',
)
self.assertEqual(
expected_include_dir, self.recipe.include_root(self.arch.arch)
)
def test_link_root(self):
expected_link_root = join(
self.recipe.get_build_dir(self.arch.arch), 'android-build',
)
self.assertEqual(
expected_link_root, self.recipe.link_root(self.arch.arch)
)
@mock.patch("pythonforandroid.recipes.python3.subprocess.call")
def test_compile_python_files(self, mock_subprocess):
fake_compile_dir = '/fake/compile/dir'
hostpy = self.recipe.ctx.hostpython = '/fake/hostpython3'
self.recipe.compile_python_files(fake_compile_dir)
mock_subprocess.assert_called_once_with(
[hostpy, '-OO', '-m', 'compileall', '-b', '-f', fake_compile_dir],
)
@mock.patch("pythonforandroid.recipe.Recipe.check_recipe_choices")
@mock.patch("pythonforandroid.archs.glob")
def test_get_recipe_env(
self,
mock_glob,
mock_check_recipe_choices,
):
"""
Test that method
:meth:`~pythonforandroid.recipes.python3.Python3Recipe.get_recipe_env`
returns the expected flags
"""
mock_glob.return_value = ["llvm"]
mock_check_recipe_choices.return_value = sorted(
self.ctx.recipe_build_order
)
env = self.recipe.get_recipe_env(self.arch)
self.assertIn(
f'-fPIC -DANDROID -D__ANDROID_API__={self.ctx.ndk_api}',
env["CFLAGS"])
self.assertEqual(env["CC"], self.arch.get_clang_exe(with_target=True))
# make sure that the mocked methods are actually called
mock_glob.assert_called()
mock_check_recipe_choices.assert_called()
def test_set_libs_flags(self):
# todo: properly check `Python3Recipe.set_lib_flags`
pass
# These decorators are to mock calls to `get_recipe_env`
# and `set_libs_flags`, since these calls are tested separately
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.util.makedirs")
@mock.patch("pythonforandroid.archs.glob")
def test_build_arch(
self,
mock_glob,
mock_makedirs,
mock_chdir,):
mock_glob.return_value = ["llvm"]
# specific `build_arch` mocks
with mock.patch(
"builtins.open",
mock.mock_open(read_data="#define ZLIB_VERSION 1.1\nfoo")
) as mock_open_zlib, mock.patch(
"pythonforandroid.recipes.python3.sh.Command"
) as mock_sh_command, mock.patch(
"pythonforandroid.recipes.python3.sh.make"
) as mock_make, mock.patch(
"pythonforandroid.recipes.python3.sh.cp"
) as mock_cp:
self.recipe.build_arch(self.arch)
# make sure that the mocked methods are actually called
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
sh_command_calls = {
f"{recipe_build_dir}/config.guess",
f"{recipe_build_dir}/configure",
}
for command in sh_command_calls:
self.assertIn(
mock.call(command),
mock_sh_command.mock_calls,
)
mock_open_zlib.assert_called()
self.assertEqual(mock_make.call_count, 1)
for make_call, kw in mock_make.call_args_list:
self.assertIn(
f'INSTSONAME={self.recipe._libpython}', make_call
)
mock_cp.assert_called_with(
"pyconfig.h", join(recipe_build_dir, 'Include'),
)
mock_makedirs.assert_called()
mock_chdir.assert_called()
def test_build_arch_wrong_ndk_api(self):
# we check ndk_api using recipe's ctx
self.recipe.ctx.ndk_api = 20
with self.assertRaises(BuildInterruptingException) as e:
self.recipe.build_arch(self.arch)
self.assertEqual(
e.exception.args[0],
NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format(
ndk_api=self.recipe.ctx.ndk_api,
min_ndk_api=self.recipe.MIN_NDK_API,
),
)
# restore recipe's ctx or we could get failures with other test,
# since we share `self.recipe with all the tests of the class
self.recipe.ctx.ndk_api = self.ctx.ndk_api
@mock.patch('shutil.copystat')
@mock.patch('shutil.copyfile')
@mock.patch("pythonforandroid.util.chdir")
@mock.patch("pythonforandroid.util.makedirs")
@mock.patch("pythonforandroid.util.walk")
@mock.patch("pythonforandroid.recipes.python3.sh.find")
@mock.patch("pythonforandroid.recipes.python3.sh.cp")
@mock.patch("pythonforandroid.recipes.python3.sh.zip")
@mock.patch("pythonforandroid.recipes.python3.subprocess.call")
def test_create_python_bundle(
self,
mock_subprocess,
mock_sh_zip,
mock_sh_cp,
mock_sh_find,
mock_walk,
mock_makedirs,
mock_chdir,
mock_copyfile,
mock_copystat,
):
fake_compile_dir = '/fake/compile/dir'
simulated_walk_result = [
["/fake_dir", ["__pycache__", "Lib"], ["README", "setup.py"]],
["/fake_dir/Lib", ["ctypes"], ["abc.pyc", "abc.py"]],
["/fake_dir/Lib/ctypes", [], ["util.pyc", "util.py"]],
]
mock_walk.return_value = simulated_walk_result
self.recipe.create_python_bundle(fake_compile_dir, self.arch)
recipe_build_dir = self.recipe.get_build_dir(self.arch.arch)
modules_build_dir = join(
recipe_build_dir,
'android-build',
'build',
'lib.linux{}-{}-{}'.format(
'2' if self.recipe.version[0] == '2' else '',
self.arch.command_prefix.split('-')[0],
self.recipe.major_minor_version_string
))
expected_sp_paths = [
modules_build_dir,
join(recipe_build_dir, 'Lib'),
self.ctx.get_python_install_dir(self.arch.arch),
]
for n, (sp_call, kw) in enumerate(mock_subprocess.call_args_list):
self.assertEqual(sp_call[0][-1], expected_sp_paths[n])
# we expect two calls to `walk_valid_filens`
self.assertEqual(len(mock_walk.call_args_list), 2)
mock_sh_zip.assert_called()
mock_sh_cp.assert_called()
mock_sh_find.assert_called()
mock_makedirs.assert_called()
mock_chdir.assert_called()
mock_copyfile.assert_called()
mock_copystat.assert_called()
|
examples/image/plot_dataset_rp.py | Pandinosaurus/pyts | 1,217 | 12751548 | <filename>examples/image/plot_dataset_rp.py<gh_stars>1000+
"""
============================
Data set of recurrence plots
============================
A recurrence plot is an image obtained from a time series, representing the
pairwise Euclidean distances for each value (and more generally for each
trajectory) in the time series.
The image can be binarized using a threshold.
It is implemented as :class:`pyts.image.RecurrencePlot`.
In this example, we consider the training samples of the
`GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_,
consisting of 50 univariate time series of length 150.
The recurrence plot of each time series is independently computed and the
50 recurrence plots are plotted.
""" # noqa:E501
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import RecurrencePlot
from pyts.datasets import load_gunpoint
# Load the GunPoint dataset
X, _, _, _ = load_gunpoint(return_X_y=True)
# Get the recurrence plots for all the time series
rp = RecurrencePlot(threshold='point', percentage=20)
X_rp = rp.fit_transform(X)
# Plot the 50 recurrence plots
fig = plt.figure(figsize=(10, 5))
grid = ImageGrid(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True)
for i, ax in enumerate(grid):
ax.imshow(X_rp[i], cmap='binary', origin='lower')
grid[0].get_yaxis().set_ticks([])
grid[0].get_xaxis().set_ticks([])
fig.suptitle(
"Recurrence plots for the 50 time series in the 'GunPoint' dataset",
y=0.92
)
plt.show()
|
examples/linear_elasticity/its2D_3.py | carlosal1015/sfepy | 510 | 12751556 | <filename>examples/linear_elasticity/its2D_3.py
r"""
Diametrically point loaded 2-D disk with nodal stress calculation. See
:ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import print_function
from __future__ import absolute_import
from examples.linear_elasticity.its2D_1 import *
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.geometry_element import geometry_data
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Field
import numpy as nm
gdata = geometry_data['2_3']
nc = len(gdata.coors)
def nodal_stress(out, pb, state, extend=False, integrals=None):
'''
Calculate stresses at nodal points.
'''
# Point load.
mat = pb.get_materials()['Load']
P = 2.0 * mat.get_data('special', 'val')[1]
# Calculate nodal stress.
pb.time_update()
if integrals is None: integrals = pb.get_integrals()
stress = pb.evaluate('ev_cauchy_stress.ivn.Omega(Asphalt.D, u)', mode='qp',
integrals=integrals, copy_materials=False)
sfield = Field.from_args('stress', nm.float64, (3,),
pb.domain.regions['Omega'])
svar = FieldVariable('sigma', 'parameter', sfield,
primary_var_name='(set-to-None)')
svar.set_from_qp(stress, integrals['ivn'])
print('\n==================================================================')
print('Given load = %.2f N' % -P)
print('\nAnalytical solution')
print('===================')
print('Horizontal tensile stress = %.5e MPa/mm' % (-2.*P/(nm.pi*150.)))
print('Vertical compressive stress = %.5e MPa/mm' % (-6.*P/(nm.pi*150.)))
print('\nFEM solution')
print('============')
print('Horizontal tensile stress = %.5e MPa/mm' % (svar()[0]))
print('Vertical compressive stress = %.5e MPa/mm' % (-svar()[1]))
print('==================================================================')
return out
asphalt = materials['Asphalt'][0]
asphalt.update({'D' : stiffness_from_youngpoisson(2, young, poisson)})
options.update({'post_process_hook' : 'nodal_stress',})
integrals = {
'ivn' : ('custom', gdata.coors, [gdata.volume / nc] * nc),
}
|
bellybutton/caching.py | sYnfo/bellybutton | 236 | 12751558 | <reponame>sYnfo/bellybutton
"""Caching utilities."""
|
examples/contrib/webscanner_helper/mapping.py | KarlParkinson/mitmproxy | 24,939 | 12751581 | <filename>examples/contrib/webscanner_helper/mapping.py
import copy
import logging
import typing
from typing import Dict
from bs4 import BeautifulSoup
from mitmproxy.http import HTTPFlow
from examples.contrib.webscanner_helper.urldict import URLDict
NO_CONTENT = object()
class MappingAddonConfig:
HTML_PARSER = "html.parser"
class MappingAddon:
""" The mapping add-on can be used in combination with web application scanners to reduce their false positives.
Many web application scanners produce false positives caused by dynamically changing content of web applications
such as the current time or current measurements. When testing for injection vulnerabilities, web application
scanners are tricked into thinking they changed the content with the injected payload. In realty, the content of
the web application changed notwithstanding the scanner's input. When the mapping add-on is used to map the content
to a fixed value, these false positives can be avoided.
"""
OPT_MAPPING_FILE = "mapping_file"
"""File where urls and css selector to mapped content is stored.
Elements will be replaced with the content given in this file. If the content is none it will be set to the first
seen value.
Example:
{
"http://10.10.10.10": {
"body": "My Text"
},
"URL": {
"css selector": "Replace with this"
}
}
"""
OPT_MAP_PERSISTENT = "map_persistent"
"""Whether to store all new content in the configuration file."""
def __init__(self, filename: str, persistent: bool = False) -> None:
""" Initializes the mapping add-on
Args:
filename: str that provides the name of the file in which the urls and css selectors to mapped content is
stored.
persistent: bool that indicates whether to store all new content in the configuration file.
Example:
The file in which the mapping config is given should be in the following format:
{
"http://10.10.10.10": {
"body": "My Text"
},
"<URL>": {
"<css selector>": "Replace with this"
}
}
"""
self.filename = filename
self.persistent = persistent
self.logger = logging.getLogger(self.__class__.__name__)
with open(filename) as f:
self.mapping_templates = URLDict.load(f)
def load(self, loader):
loader.add_option(
self.OPT_MAPPING_FILE, str, "",
"File where replacement configuration is stored."
)
loader.add_option(
self.OPT_MAP_PERSISTENT, bool, False,
"Whether to store all new content in the configuration file."
)
def configure(self, updated):
if self.OPT_MAPPING_FILE in updated:
self.filename = updated[self.OPT_MAPPING_FILE]
with open(self.filename) as f:
self.mapping_templates = URLDict.load(f)
if self.OPT_MAP_PERSISTENT in updated:
self.persistent = updated[self.OPT_MAP_PERSISTENT]
def replace(self, soup: BeautifulSoup, css_sel: str, replace: BeautifulSoup) -> None:
"""Replaces the content of soup that matches the css selector with the given replace content."""
for content in soup.select(css_sel):
self.logger.debug(f"replace \"{content}\" with \"{replace}\"")
content.replace_with(copy.copy(replace))
def apply_template(self, soup: BeautifulSoup, template: Dict[str, typing.Union[BeautifulSoup]]) -> None:
"""Applies the given mapping template to the given soup."""
for css_sel, replace in template.items():
mapped = soup.select(css_sel)
if not mapped:
self.logger.warning(f"Could not find \"{css_sel}\", can not freeze anything.")
else:
self.replace(soup, css_sel, BeautifulSoup(replace, features=MappingAddonConfig.HTML_PARSER))
def response(self, flow: HTTPFlow) -> None:
"""If a response is received, check if we should replace some content. """
try:
templates = self.mapping_templates[flow]
res = flow.response
if res is not None:
encoding = res.headers.get("content-encoding", "utf-8")
content_type = res.headers.get("content-type", "text/html")
if "text/html" in content_type and encoding == "utf-8":
content = BeautifulSoup(res.content, MappingAddonConfig.HTML_PARSER)
for template in templates:
self.apply_template(content, template)
res.content = content.encode(encoding)
else:
self.logger.warning(f"Unsupported content type '{content_type}' or content encoding '{encoding}'")
except KeyError:
pass
def done(self) -> None:
"""Dumps all new content into the configuration file if self.persistent is set."""
if self.persistent:
# make sure that all items are strings and not soups.
def value_dumper(value):
store = {}
if value is None:
return "None"
try:
for css_sel, soup in value.items():
store[css_sel] = str(soup)
except:
raise RuntimeError(value)
return store
with open(self.filename, "w") as f:
self.mapping_templates.dump(f, value_dumper)
|
nncf/tensorflow/utils/state.py | MaximProshin/nncf | 136 | 12751602 | <gh_stars>100-1000
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Dict, Any
import json
import tensorflow as tf
from nncf.common.compression import BaseCompressionAlgorithmController
class TFCompressionState(tf.train.experimental.PythonState):
"""
A wrapper for `BaseCompressionAlgorithmController` that allows saving
the compression state to the checkpoint.
"""
def __init__(self, controller: BaseCompressionAlgorithmController):
"""
Initializes the wrapper for the controller.
:param controller: The controller which gives the compressions state.
"""
self._ctrl = controller
def serialize(self) -> str:
"""
Callback to serialize the compression state.
:return: A serialized compression state.
"""
compression_state = self._ctrl.get_compression_state()
return json.dumps(compression_state)
def deserialize(self, string_value: str) -> None:
"""
Callback to deserialize the compression state.
:param string_value: A serialized compression state.
"""
compression_state = json.loads(string_value)
ctrl_state = compression_state[BaseCompressionAlgorithmController.CONTROLLER_STATE]
self._ctrl.load_state(ctrl_state)
class TFCompressionStateLoader(tf.train.experimental.PythonState):
"""
This is a class that allows extracting of the compression state from a checkpoint.
The extracted compression state is not applied.
"""
def __init__(self):
"""
Initializes the compression state loader.
"""
self._state = None
@property
def state(self) -> Dict[str, Any]:
"""
Returns the compression state which was extracted from the checkpoint.
:return: The compression state.
"""
return self._state
def serialize(self) -> str:
raise NotImplementedError('Use an instance of the `TFCompressionState` class to '
'serialize the compression state.')
def deserialize(self, string_value: str) -> None:
"""
Callback to deserialize the compression state.
:param string_value: A serialized compression state.
"""
self._state = json.loads(string_value)
|
examples/plotting_and_normalization.py | PuzeLiu/mushroom-rl | 344 | 12751609 | import os
from mushroom_rl.utils.preprocessors import MinMaxPreprocessor
from mushroom_rl.utils.callbacks import PlotDataset
import numpy as np
from mushroom_rl.algorithms.policy_search import REINFORCE
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.core import Core, Logger
from mushroom_rl.environments import LQR
from mushroom_rl.policy import StateStdGaussianPolicy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.optimizers import AdaptiveOptimizer
from tqdm import tqdm
"""
This script shows how to use preprocessors and plot callback.
"""
tqdm.monitor_interval = 0
def experiment(n_epochs, n_iterations, ep_per_run, save_states_to_disk):
np.random.seed()
logger = Logger('plot_and_norm_example', results_dir=None)
logger.strong_line()
logger.info('Plotting and normalization example')
# MDP
mdp = LQR.generate(dimensions=2, max_pos=10., max_action=5., episodic=True)
approximator = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma_weights = 2 * np.ones(sigma.weights_size)
sigma.set_weights(sigma_weights)
policy = StateStdGaussianPolicy(approximator, sigma)
# Agent
optimizer = AdaptiveOptimizer(eps=.01)
algorithm_params = dict(optimizer=optimizer)
agent = REINFORCE(mdp.info, policy, **algorithm_params)
# normalization callback
prepro = MinMaxPreprocessor(mdp_info=mdp.info)
# plotting callback
plotter = PlotDataset(mdp.info, obs_normalized=True)
# Train
core = Core(agent, mdp, callback_step=plotter, preprocessors=[prepro])
# training loop
for n in range(n_epochs):
core.learn(n_episodes=n_iterations * ep_per_run,
n_episodes_per_fit=ep_per_run)
dataset = core.evaluate(n_episodes=ep_per_run, render=False)
J = np.mean(compute_J(dataset,mdp.info.gamma))
logger.epoch_info(n+1, J=J)
if save_states_to_disk:
# save normalization / plot states to disk path
logger.info('Saving plotting and normalization data')
os.makedirs("./logs/plot_and_norm", exist_ok=True)
prepro.save("./logs/plot_and_norm/preprocessor.msh")
plotter.save_state("./logs/plot_and_norm/plotting_state")
# load states from disk path
logger.info('Loading preprocessor and plotter')
prerpo = MinMaxPreprocessor.load("./logs/plot_and_norm/preprocessor.msh")
plotter.load_state("./logs/plot_and_norm/plotting_state")
if __name__ == '__main__':
experiment(n_epochs=10, n_iterations=10, ep_per_run=100,
save_states_to_disk=False)
|
Athos/tests/tf/unittests/test_shape_manipulation.py | shahakash28/EzPC | 221 | 12751636 | """
Authors: <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
import numpy as np
import pytest
import sys
import os
# Athos DIR
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import Config, Compiler, assert_almost_equal
@pytest.mark.parametrize(
"a_shape, out_shape",
[
([2, 3], [6]),
([6], [2, 3]),
([2, 3], [3, 2]),
([2, 3], [-1]), # Flatten 1-D,
([1], []), # convert to scalar,
([3, 2, 3], [2, -1]), # infer -1 as 9,
([3, 2, 3], [-1, 9]), # infer -1 as 2
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_reshape(test_dir, backend, a_shape, out_shape, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.reshape(a, out_shape, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, perm",
[([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])], # normal transpose, with perm
)
@pytest.mark.parametrize("dtype", [np.single])
def test_transpose(test_dir, backend, a_shape, perm, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.transpose(a, perm, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, num_or_size_splits, axis",
[
([2, 10], 5, 1),
pytest.param(
[5, 7],
[1, 4, 2],
1,
marks=pytest.mark.skip(
reason="[split] don't support split into specific sizes (SplitV)"
),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.split(a, num_or_size_splits, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
if type(output) == list:
tf_output = output[-1]
tf_expected_output = expected_output[-1]
else:
tf_output = output
tf_expected_output = expected_output
config = TFConfig(backend).add_input(a).add_output(tf_output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=tf_expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Squeeze
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[1, 2, 1, 3, 1, 1],
None,
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
pytest.param(
[1, 2, 1, 3, 1, 1],
[2, 4],
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_squeeze(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.squeeze(a, axis=axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, begin, size",
[
([3, 2, 3], [1, 0, 0], [1, 1, 3]),
([3, 2, 3], [1, 0, 0], [1, 2, 3]),
([3, 2, 3], [1, 0, 0], [2, 1, 3]),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_slice(test_dir, backend, a_shape, begin, size, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.slice(a, begin, size, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, b_shape, axis",
[
([2, 3], [3, 3], 0),
([2, 3, 2, 1], [2, 6, 2, 1], 1),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_concat(test_dir, backend, a_shape, b_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
b_inp = dtype(np.random.randn(*b_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
b = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=b_inp.shape, name="b")
output = tf.concat([a, b], axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp, b: b_inp})
config = TFConfig(backend).add_input(a).add_input(b).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp, b_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# ExpandDims
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[3, 2, 3], 1, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
pytest.param(
[2, 5], 0, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_expand_dims(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.expand_dims(a, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Pad
@pytest.mark.parametrize(
"a_shape, paddings, mode, constant_values",
[
([1, 2, 2, 1], [[1, 1], [1, 2], [1, 1], [1, 3]], "CONSTANT", 0),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"REFLECT",
0,
marks=pytest.mark.skip(reason="[pad] REFLECT not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"SYMMETRIC",
0,
marks=pytest.mark.skip(reason="[pad] SYMMETRIC not supported"),
),
pytest.param(
[2, 3],
[
[1, 1],
[2, 2],
],
"CONSTANT",
0,
marks=pytest.mark.skip(reason="[pad] Generic pad not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"CONSTANT",
1.2,
marks=pytest.mark.skip(reason="[pad] non-zero padding not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
pad = tf.constant(paddings, name="paddings")
output = tf.pad(
a, pad, mode=mode, constant_values=constant_values, name="output"
)
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Tile
@pytest.mark.parametrize(
"a_shape, multiples", [([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]
)
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.skip(reason="[tile] Not supported")
def test_tile(test_dir, backend, a_shape, multiples, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
mults = tf.constant(multiples, name="multiples")
output = tf.tile(a, mults, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
|
src/GridCal/Engine/Simulations/result_types.py | SanPen/GridCal | 284 | 12751647 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from enum import Enum
from GridCal.Engine.Devices import DeviceType
class ResultTypes(Enum):
# Power flow
BusVoltage = 'Bus voltage', DeviceType.BusDevice
BusVoltagePolar = 'Bus voltage (polar)', DeviceType.BusDevice
BusActivePower = 'Bus active power', DeviceType.BusDevice
BusReactivePower = 'Bus reactive power', DeviceType.BusDevice
BranchPower = 'Branch power', DeviceType.BranchDevice
BranchActivePowerFrom = 'Branch active power "from"', DeviceType.BranchDevice
BranchReactivePowerFrom = 'Branch reactive power "from"', DeviceType.BranchDevice
BranchActivePowerTo = 'Branch active power "to"', DeviceType.BranchDevice
BranchReactivePowerTo = 'Branch reactive power "to"', DeviceType.BranchDevice
BranchCurrent = 'Branch current', DeviceType.BranchDevice
BranchActiveCurrentFrom = 'Branch active current "from"', DeviceType.BranchDevice
BranchReactiveCurrentFrom = 'Branch reactive current "from"', DeviceType.BranchDevice
BranchActiveCurrentTo = 'Branch active current "to"', DeviceType.BranchDevice
BranchReactiveCurrentTo = 'Branch reactive current "to"', DeviceType.BranchDevice
BranchTapModule = 'Branch tap module', DeviceType.BranchDevice
BranchTapAngle = 'Branch tap angle', DeviceType.BranchDevice
BranchBeq = 'Branch Beq', DeviceType.BranchDevice
BranchLoading = 'Branch loading', DeviceType.BranchDevice
Transformer2WTapModule = 'Transformer tap module', DeviceType.Transformer2WDevice
BranchVoltage = 'Branch voltage drop', DeviceType.BranchDevice
BranchAngles = 'Branch voltage angles', DeviceType.BranchDevice
BranchLosses = 'Branch losses', DeviceType.BranchDevice
BranchActiveLosses = 'Branch active losses', DeviceType.BranchDevice
BranchReactiveLosses = 'Branch reactive losses', DeviceType.BranchDevice
BatteryPower = 'Battery power', DeviceType.BatteryDevice
BatteryEnergy = 'Battery energy', DeviceType.BatteryDevice
HvdcLosses = 'HVDC losses', DeviceType.HVDCLineDevice
HvdcPowerFrom = 'HVDC power "from"', DeviceType.HVDCLineDevice
HvdcLoading = 'HVDC loading', DeviceType.HVDCLineDevice
HvdcPowerTo = 'HVDC power "to"', DeviceType.HVDCLineDevice
# StochasticPowerFlowDriver
BusVoltageAverage = 'Bus voltage avg', DeviceType.BusDevice
BusVoltageStd = 'Bus voltage std', DeviceType.BusDevice
BusVoltageCDF = 'Bus voltage CDF', DeviceType.BusDevice
BusPowerCDF = 'Bus power CDF', DeviceType.BusDevice
BranchPowerAverage = 'Branch power avg', DeviceType.BranchDevice
BranchPowerStd = 'Branch power std', DeviceType.BranchDevice
BranchPowerCDF = 'Branch power CDF', DeviceType.BranchDevice
BranchLoadingAverage = 'Branch loading avg', DeviceType.BranchDevice
BranchLoadingStd = 'Branch loading std', DeviceType.BranchDevice
BranchLoadingCDF = 'Branch loading CDF', DeviceType.BranchDevice
BranchLossesAverage = 'Branch losses avg', DeviceType.BranchDevice
BranchLossesStd = 'Branch losses std', DeviceType.BranchDevice
BranchLossesCDF = 'Branch losses CDF', DeviceType.BranchDevice
# OPF
BusVoltageModule = 'Bus voltage module', DeviceType.BusDevice
BusVoltageAngle = 'Bus voltage angle', DeviceType.BusDevice
BusPower = 'Bus power', DeviceType.BusDevice
ShadowPrices = 'Bus shadow prices', DeviceType.BusDevice
BranchOverloads = 'Branch overloads', DeviceType.BranchDevice
LoadShedding = 'Load shedding', DeviceType.LoadDevice
ControlledGeneratorShedding = 'Generator shedding', DeviceType.GeneratorDevice
ControlledGeneratorPower = 'Generator power', DeviceType.GeneratorDevice
# OPF-NTC
HvdcOverloads = 'HVDC overloads', DeviceType.HVDCLineDevice
NodeSlacks = 'Nodal slacks', DeviceType.BusDevice
GenerationDelta = 'Generation deltas', DeviceType.GeneratorDevice
GenerationDeltaSlacks = 'Generation delta slacks', DeviceType.GeneratorDevice
InterAreaExchange = 'Inter-Area exchange', DeviceType.NoDevice
# Short-circuit
BusShortCircuitPower = 'Bus short circuit power', DeviceType.BusDevice
# PTDF
PTDFBranchesSensitivity = 'Branch Flow sensitivity', DeviceType.BranchDevice
PTDFBusVoltageSensitivity = 'Bus voltage sensitivity', DeviceType.BusDevice
OTDF = 'Outage transfer distribution factors', DeviceType.BranchDevice
MaxOverloads = 'Maximum contingency flow', DeviceType.BranchDevice
ContingencyFlows = 'Contingency flow', DeviceType.BranchDevice
ContingencyLoading = 'Contingency loading', DeviceType.BranchDevice
WorstContingencyFlows = 'Worst contingency Sf', DeviceType.BranchDevice
WorstContingencyLoading = 'Worst contingency loading', DeviceType.BranchDevice
ContingencyFrequency = 'Contingency frequency', DeviceType.BranchDevice
ContingencyRelativeFrequency = 'Contingency relative frequency', DeviceType.BranchDevice
SimulationError = 'Error', DeviceType.BusDevice
OTDFSimulationError = 'Error', DeviceType.BranchDevice
# sigma
SigmaReal = 'Sigma real', DeviceType.BusDevice
SigmaImag = 'Sigma imaginary', DeviceType.BusDevice
SigmaDistances = 'Sigma distances', DeviceType.BusDevice
SigmaPlusDistances = 'Sigma + distances', DeviceType.BusDevice
# ATC
AvailableTransferCapacityMatrix = 'Available transfer capacity', DeviceType.BranchDevice
AvailableTransferCapacity = 'Available transfer capacity (final)', DeviceType.BranchDevice
AvailableTransferCapacityN = 'Available transfer capacity (N)', DeviceType.BranchDevice
AvailableTransferCapacityAlpha = 'Sensitivity to the exchange', DeviceType.BranchDevice
AvailableTransferCapacityBeta = 'Sensitivity to the exchange (N-1)', DeviceType.BranchDevice
NetTransferCapacity = 'Net transfer capacity', DeviceType.BranchDevice
AvailableTransferCapacityReport = 'ATC Report', DeviceType.NoDevice
ContingencyFlowsReport = 'Contingency Report', DeviceType.NoDevice
# inputs analysis
ZoneAnalysis = 'Zone analysis', DeviceType.NoDevice
CountryAnalysis = 'Country analysis', DeviceType.NoDevice
AreaAnalysis = 'Area analysis', DeviceType.NoDevice
def __str__(self):
return self.value
def __repr__(self):
return str(self)
@staticmethod
def argparse(s):
try:
return ResultTypes[s]
except KeyError:
return s
|
scripts/migrator.py | misl6/web2py | 1,573 | 12751651 | <gh_stars>1000+
# -*- coding: utf-8 -*-
'''
To use, e.g. python .\web2py.py -S APPNAME --force_migrate
To use, e.g. python .\web2py.py -S APPNAME --force_migrate --fake_migrate
'''
import logging
logger = logging.getLogger("web2py")
def get_databases(request):
dbs = {}
global_env = globals()
for (key, value) in global_env.items():
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
logger.debug('Getting all databases')
databases = get_databases(None)
logger.debug('databases = %s', databases)
for db_name in databases:
logger.debug('Migrating %s', db_name)
db = databases[db_name]
tables = db.tables
for table_name in tables:
# Force migration of lazy tables
logger.debug("Ensuring migration of table '%s'", table_name)
table = db[table_name]
db(table).isempty()
db.commit()
|
jenkins_job_wrecker/modules/triggers.py | javiergayala/jenkins-job-wrecker | 172 | 12751663 | <gh_stars>100-1000
# encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, Mapper
class Triggers(jenkins_job_wrecker.modules.base.Base):
component = 'triggers'
def gen_yml(self, yml_parent, data):
triggers = []
for child in data:
object_name = child.tag.split('.')[-1].lower()
self.registry.dispatch(self.component, object_name, child, triggers)
yml_parent.append(['triggers', triggers])
def scmtrigger(top, parent):
pollscm = {}
for child in top:
if child.tag == 'spec':
pollscm['cron'] = child.text
elif child.tag == 'ignorePostCommitHooks':
pollscm['ignore-post-commit-hooks'] = (child.text == 'true')
else:
raise NotImplementedError('cannot handle scm trigger '
'setting %s' % child.tag)
parent.append({'pollscm': pollscm})
def timertrigger(top, parent):
parent.append({'timed': top[0].text})
def reversebuildtrigger(top, parent):
reverse = {}
for child in top:
if child.tag == 'upstreamProjects':
reverse['jobs'] = child.text
elif child.tag == 'threshold':
pass # TODO
elif child.tag == 'spec':
pass # TODO
else:
raise NotImplementedError('cannot handle reverse trigger '
'setting %s' % child.tag)
parent.append({'reverse': reverse})
def __gerrit_process_file_paths(attribute):
file_paths = []
for file_path_type in attribute:
if file_path_type.tag == "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.FilePath":
file_path = {}
for file_path_attribute in file_path_type:
if file_path_attribute.tag == "compareType":
file_path["compare-type"] = file_path_attribute.text
elif file_path_attribute.tag == "pattern":
file_path["pattern"] = file_path_attribute.text
file_paths.append(file_path)
else:
raise NotImplementedError("Not implemented file path type: ", file_path_type.tag)
return file_paths
def __gerrit_process_gerrit_projects(child):
projects = []
for gerrit_project in child:
project = {}
for attribute in gerrit_project:
if attribute.tag == "compareType":
project["project-compare-type"] = attribute.text
elif attribute.tag == "pattern":
project["project-pattern"] = attribute.text
elif attribute.tag == "branches":
branches = []
for branch_type in attribute:
if branch_type.tag == \
"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Branch":
branch = {}
for branch_attribute in attribute[0]:
if branch_attribute.tag == "compareType":
branch["branch-compare-type"] = branch_attribute.text
elif branch_attribute.tag == "pattern":
branch["branch-pattern"] = branch_attribute.text
else:
raise NotImplementedError("Not implemented branch attribute: ",
branch_attribute.tag)
branches.append(branch)
else:
raise NotImplementedError("Not implemented branch type: ", branch_type.tag)
project["branches"] = branches
elif attribute.tag == "disableStrictForbiddenFileVerification":
project["disable-strict-forbidden-file-verification"] = get_bool(attribute.text)
elif attribute.tag == "filePaths":
file_paths = __gerrit_process_file_paths(attribute)
project["file-paths"] = file_paths
elif attribute.tag == "forbiddenFilePaths":
forbidden_file_paths = __gerrit_process_file_paths(attribute)
project["forbidden-file-paths"] = forbidden_file_paths
elif attribute.tag == "topics":
topics = __gerrit_process_file_paths(attribute)
project["topics"] = topics
else:
raise NotImplementedError("Not implemented attribute: ", attribute.tag)
projects.append(project)
return projects
def __gerrit_process_trigger_on_events(child):
trigger_on = []
sonyericsson_prefix = "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.events."
for event in child:
if event.tag == sonyericsson_prefix + "PluginChangeAbandonedEvent":
trigger_on.append("change-abandoned-event")
elif event.tag == sonyericsson_prefix + "PluginChangeMergedEvent":
trigger_on.append("change-merged-event")
elif event.tag == sonyericsson_prefix + "PluginChangeRestoredEvent":
trigger_on.append("change-restored-event")
elif event.tag == sonyericsson_prefix + "PluginCommentAddedEvent":
comment_added_event = {}
for element in event:
if element.tag == "verdictCategory":
comment_added_event["approval-category"] = element.text
elif element.tag == "commentAddedTriggerApprovalValue":
comment_added_event["approval-value"] = element.text
trigger_on.append({"comment-added-event": comment_added_event})
elif event.tag == sonyericsson_prefix + "PluginCommentAddedContainsEvent":
trigger_on.append({"comment-added-contains-event": {"comment-contains-value": event[0].text}})
elif event.tag == sonyericsson_prefix + "PluginDraftPublishedEvent":
trigger_on.append("draft-published-event")
elif event.tag == sonyericsson_prefix + "PluginPatchsetCreatedEvent":
patchset_created_event = {}
for attribute in event:
if attribute.tag == "excludeDrafts":
patchset_created_event["exclude-drafts"] = get_bool(attribute.text)
elif attribute.tag == "excludeTrivialRebase":
patchset_created_event["exclude-trivial-rebase"] = get_bool(attribute.text)
elif attribute.tag == "excludeNoCodeChange":
patchset_created_event["exclude-no-code-change"] = get_bool(attribute.text)
elif attribute.tag == "excludePrivateState":
patchset_created_event["exclude-private"] = get_bool(attribute.text)
elif attribute.tag == "excludeWipState":
patchset_created_event["exclude-wip"] = get_bool(attribute.text)
trigger_on.append({"patchset-created-event": patchset_created_event})
elif event.tag == sonyericsson_prefix + "PluginPrivateStateChangedEvent":
trigger_on.append("private-state-changed-event")
elif event.tag == sonyericsson_prefix + "PluginRefUpdatedEvent":
trigger_on.append("ref-updated-event")
elif event.tag == sonyericsson_prefix + "PluginTopicChangedEvent":
trigger_on.append("topic-changed-event")
elif event.tag == sonyericsson_prefix + "PluginWipStateChangedEvent":
trigger_on.append("wip-state-changed-event")
return trigger_on
def gerrittrigger(top, parent):
mapper = Mapper({
"gerritBuildStartedVerifiedValue": ("gerrit-build-started-verified-value", int),
"gerritBuildStartedCodeReviewValue": ("gerrit-build-started-codereview-value", int),
"gerritBuildSuccessfulVerifiedValue": ("gerrit-build-successful-verified-value", int),
"gerritBuildSuccessfulCodeReviewValue": ("gerrit-build-successful-codereview-value", int),
"gerritBuildFailedVerifiedValue": ("gerrit-build-failed-verified-value", int),
"gerritBuildFailedCodeReviewValue": ("gerrit-build-failed-codereview-value", int),
"gerritBuildUnstableVerifiedValue": ("gerrit-build-unstable-verified-value", int),
"gerritBuildUnstableCodeReviewValue": ("gerrit-build-unstable-codereview-value", int),
"gerritBuildNotBuiltVerifiedValue": ("gerrit-build-notbuilt-verified-value", int),
"gerritBuildNotBuiltCodeReviewValue": ("gerrit-build-notbuilt-codereview-value", int),
"silentMode": ("silent", bool),
"silentStartMode": ("silent-start", bool),
"escapeQuotes": ("escape-quotes", bool),
"dependencyJobsNames": ("dependency-jobs", str),
"nameAndEmailParameterMode": ("name-and-email-parameter-mode", str),
"commitMessageParameterMode": ("commit-message-parameter-mode", str),
"changeSubjectParameterMode": ("change-subject-parameter-mode", str),
"commentTextParameterMode": ("comment-text-parameter-mode", str),
"buildStartMessage": ("start-message", str),
"buildFailureMessage": ("failure-message", str),
"buildSuccessfulMessage": ("successful-message", str),
"buildUnstableMessage": ("unstable-message", str),
"buildNotBuiltMessage": ("notbuilt-message", str),
"buildUnsuccessfulFilepath": ("failure-message-file", str),
"customUrl": ("custom-url", str),
"serverName": ("server-name", str),
"dynamicTriggerConfiguration": ("dynamic-trigger-enabled", bool),
"triggerConfigURL": ("dynamic-trigger-url", str),
})
gerrit_trigger = {}
for child in top:
if mapper.map_element(child, gerrit_trigger):
pass # Handled by the mapper.
elif child.tag == "gerritProjects":
gerrit_trigger["projects"] = __gerrit_process_gerrit_projects(child)
elif child.tag == "dynamicGerritProjects":
pass # No implementation by JJB
elif child.tag == "spec":
pass # Not needed in yml
elif child.tag == "skipVote":
skip_vote = {}
for attribute in child:
if attribute.tag == "onSuccessful":
skip_vote["successful"] = get_bool(attribute.text)
if attribute.tag == "onFailed":
skip_vote["failed"] = get_bool(attribute.text)
if attribute.tag == "onUnstable":
skip_vote["unstable"] = get_bool(attribute.text)
if attribute.tag == "onNotBuilt":
skip_vote["notbuilt"] = get_bool(attribute.text)
gerrit_trigger["skip-vote"] = skip_vote
elif child.tag == "notificationLevel":
if child.text is None:
gerrit_trigger["notification-level"] = "NONE"
else:
gerrit_trigger["notification-level"] = child.text
elif child.tag == "triggerOnEvents":
gerrit_trigger["trigger-on"] = __gerrit_process_trigger_on_events(child)
elif child.tag == "gerritTriggerTimerTask":
pass # Unconfigurable Attribute
elif child.tag == "triggerInformationAction":
pass # Unconfigurable Attribute
else:
raise NotImplementedError("Not implemented Gerrit Trigger Plugin's attribute: ", child.tag)
parent.append({'gerrit': gerrit_trigger})
def githubpushtrigger(top, parent):
parent.append('github')
def ghprbtrigger(top, parent):
ghpr = {}
for child in top:
if child.tag == 'spec' or child.tag == 'cron':
ghpr['cron'] = child.text
elif child.tag == 'adminlist' and child.text:
ghpr['admin-list'] = child.text.strip().split('\n')
elif child.tag == 'allowMembersOfWhitelistedOrgsAsAdmin':
ghpr['allow-whitelist-orgs-as-admins'] = get_bool(child.text)
elif child.tag == 'whitelist' and child.text is not None:
ghpr['white-list'] = child.text.strip().split('\n')
elif child.tag == 'orgslist' and child.text is not None:
ghpr['org-list'] = child.text.strip().split('\n')
elif child.tag == 'buildDescTemplate':
ghpr['build-desc-template'] = child.text
elif child.tag == 'triggerPhrase':
ghpr['trigger-phrase'] = child.text
elif child.tag == 'onlyTriggerPhrase':
ghpr['only-trigger-phrase'] = get_bool(child.text)
elif child.tag == 'useGitHubHooks':
ghpr['github-hooks'] = get_bool(child.text)
elif child.tag == 'permitAll':
ghpr['permit-all'] = get_bool(child.text)
elif child.tag == 'autoCloseFailedPullRequests':
ghpr['auto-close-on-fail'] = get_bool(child.text)
elif child.tag == 'whiteListTargetBranches':
ghpr['white-list-target-branches'] = []
for branch in child:
if branch[0].text is not None:
ghpr['white-list-target-branches'].append(branch[0].text.strip())
elif child.tag == 'gitHubAuthId':
ghpr['auth-id'] = child.text
parent.append({'github-pull-request': ghpr})
|
stonesoup/predictor/tests/test_information.py | Red-Portal/Stone-Soup-1 | 157 | 12751669 | # coding: utf-8
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.information import InformationKalmanPredictor
from ...predictor.kalman import KalmanPredictor
from ...types.state import InformationState, GaussianState
from ...types.array import StateVector, CovarianceMatrix
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( # Standard Kalman
InformationKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
StateVector([-6.45, 0.7]),
CovarianceMatrix([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard"]
)
def test_information(PredictorClass, transition_model,
prior_mean, prior_covar):
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
# First do prediction in standard way
test_state = GaussianState(prior_mean, prior_covar, timestamp=timestamp)
test_predictor = KalmanPredictor(transition_model)
test_prediction = test_predictor.predict(test_state, timestamp=new_timestamp)
# define the precision matrix and information state
precision_matrix = np.linalg.inv(prior_covar)
info_state_mean = precision_matrix @ prior_mean
# Define prior information state
prior = InformationState(info_state_mean, precision_matrix, timestamp=timestamp)
# Initialise a Information filter predictor
predictor = PredictorClass(transition_model=transition_model)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
# reconstruct the state vector and covariance matrix
pred_covar = np.linalg.inv(prediction.precision)
pred_mean = pred_covar @ prediction.state_vector
# And do the tests
assert(np.allclose(predictor._transition_function(prior,
time_interval=new_timestamp-timestamp),
test_prediction.state_vector, 0, atol=1e-14))
assert(np.allclose(pred_mean,
test_prediction.state_vector, 0, atol=1.e-14))
assert(np.allclose(pred_covar,
test_prediction.covar, 0, atol=1.e-14))
assert(prediction.timestamp == new_timestamp)
# test that we can get to the inverse matrix
class ConstantVelocitywithInverse(ConstantVelocity):
def inverse_matrix(self, **kwargs):
return np.linalg.inv(self.matrix(**kwargs))
transition_model_winv = ConstantVelocitywithInverse(noise_diff_coeff=0.1)
predictor_winv = PredictorClass(transition_model_winv)
# Test this still works
prediction_from_inv = predictor_winv.predict(prior=prior, timestamp=new_timestamp)
assert (np.allclose(prediction.state_vector, prediction_from_inv.state_vector, 0, atol=1.e-14))
# TODO: Test with Control Model
|
yolo/vedanet/network/head/brick/__init__.py | hilman-dayo/ObjectDetection-OneStageDet | 331 | 12751670 | <filename>yolo/vedanet/network/head/brick/__init__.py
from . import yolov3
|
tests/test_dpda.py | prashnts/hues | 371 | 12751683 | import hues.dpda as DPDA
def test_zero_negation():
func = DPDA.zero_break
assert func((1, 2, 3, 4, 0, 10, 1)) == (10, 1)
assert func((1, 2, 3, 4, 5, 0)) == tuple()
def test_order_annihilation():
func = DPDA.annihilate
assert func(range(0, 10), (1, 2, 3, 4, 4, 3)) == (3,)
assert func(range(5, 12), (1, 2, 10, 11, 11, 2)) == (1, 2, 2, 11)
def test_built_order_annihilation():
f1 = DPDA.annihilator(range(5, 12))
assert f1((1, 2, 10, 11, 11, 2)) == (1, 2, 2, 11)
def test_dedup():
func = DPDA.dedup
assert func((1, 2, 3, 3, 4, 2, 1, 3, 5)) == (1, 2, 3, 4, 5)
def test_chaining():
funcs = (
DPDA.zero_break, # Take the last non-reset subset
DPDA.annihilator(range(5)), # Between 0 and 5, keep the last one
DPDA.annihilator(range(10, 15)), # Between 10 and 15, keep the last one
DPDA.dedup, # Finally remove duplicates
)
stack = (1, 2, 3, 2, 2, 0, 1, 2, 3, 2, 5, 5, 11, 3, 15, 14)
expected = (5, 15, 3, 14)
assert DPDA.apply(funcs, stack) == expected
assert DPDA.apply(funcs, (1, 1, 0)) == tuple()
|
src/radish/terrain.py | radish-bdd/radish2 | 182 | 12751698 | <reponame>radish-bdd/radish2
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import threading
world = threading.local()
world.__doc__ = """Thread-local radish contex object
This object can be used to attach arbitrary data like
variables, functions and other objects which
can be accessed later in Step Implementations and Hooks.
However, it's preferred to use scoped contexts
like :attr:`radish.Step.context`, :attr:`Scenario.context`
or :attr:`Feature.context` for data.
"""
def pick(func):
"""Add the given function to the ``world`` object
This can be used to easier access helper functions in Steps and Hooks.
"""
setattr(world, func.__name__, func)
return func
world.pick = pick
|
sandbox/rocky/tf/q_functions/naf_mlp_q_function.py | suryabhupa/rllabplusplus | 167 | 12751708 | <gh_stars>100-1000
from sandbox.rocky.tf.q_functions.base import QFunction
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
import numpy as np
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.policies.base import StochasticPolicy
class NAFMLPQFunction(QFunction, LayersPowered, Serializable):
def __init__(
self,
env_spec,
name='nafqnet',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
action_merge_layer=0,
output_nonlinearity=None,
hidden_W_init=L.XavierUniformInitializer(),
hidden_b_init=L.ZerosInitializer(),
output_W_init=L.XavierUniformInitializer(),
output_b_init=L.ZerosInitializer(),
bn=False):
Serializable.quick_init(self, locals())
assert not env_spec.action_space.is_discrete
action_dim = env_spec.action_space.flat_dim
self._action_dim = action_dim
self._env_spec = env_spec
n_layers = len(hidden_sizes)
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
with tf.variable_scope(name):
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
l_policy_mu = L.InputLayer(shape=(None, action_dim), name="policy_mu")
l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim), name="policy_sigma")
l_hidden = l_obs
idx = 0
l_hidden_kwargs = dict(
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
)
l_output_kwargs = dict(
W=output_W_init,
b=output_b_init,
)
while idx < action_merge_layer:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
_idx = idx
_l_hidden = l_hidden
# compute L network
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="L_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_L = L.DenseLayer(
l_hidden,num_units=action_dim**2, nonlinearity=None,
name="L_h%d" % (idx + 1), **l_output_kwargs,)
# compute V network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="V_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_V = L.DenseLayer(
l_hidden,num_units=1, nonlinearity=None,
name="V_h%d" % (idx + 1), **l_output_kwargs,)
# compute mu network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="mu_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
if bn: l_hidden = L.batch_norm(l_hidden)
l_mu = L.DenseLayer(
l_hidden,num_units=action_dim, nonlinearity=tf.nn.tanh,
name="mu_h%d" % (idx + 1), **l_output_kwargs,)
L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu], deterministic=True)
V_var = tf.reshape(V_var, (-1,))
# compute advantage
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, l_action.input_var)
# compute Q
Q_var = A_var + V_var
# compute expected Q under Gaussian policy
e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var, l_policy_sigma.input_var)
e_Q_var = e_A_var + V_var
self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], Q_var)
self._f_e_qval = tensor_utils.compile_function([l_obs.input_var, l_policy_mu.input_var,
l_policy_sigma.input_var], e_Q_var)
self._L_layer = l_L
self._V_layer = l_V
self._mu_layer = l_mu
self._obs_layer = l_obs
self._action_layer = l_action
self._policy_mu_layer = l_policy_mu
self._policy_sigma_layer = l_policy_sigma
self._output_nonlinearity = output_nonlinearity
self.init_policy()
LayersPowered.__init__(self, [l_L, l_V, l_mu])
def init_policy(self):
pass
def get_L_sym(self, L_vec_var):
L = tf.reshape(L_vec_var, (-1, self._action_dim, self._action_dim))
return tf.matrix_band_part(L, -1, 0) - \
tf.matrix_diag(tf.matrix_diag_part(L)) + \
tf.matrix_diag(tf.exp(tf.matrix_diag_part(L)))
def get_P_sym(self, L_mat_var):
return tf.matmul(L_mat_var, tf.matrix_transpose(L_mat_var))
def get_e_A_sym(self, P_var, mu_var, policy_mu_var, policy_sigma_var):
e_A_var1 = self.get_A_sym(P_var, mu_var, policy_mu_var)
e_A_var2 = - 0.5 * tf.reduce_sum(tf.matrix_diag_part(
tf.matmul(P_var, policy_sigma_var)), 1)
#e_A_var2 = - 0.5 * tf.trace(tf.matmul(P_var, policy_sigma_var))
return e_A_var1 + e_A_var2
def get_A_sym(self, P_var, mu_var, action_var):
delta_var = action_var - mu_var
delta_mat_var = tf.reshape(delta_var, (-1, self._action_dim, 1))
P_delta_var = tf.squeeze(tf.matmul(P_var, delta_mat_var),[2])
return -0.5 * tf.reduce_sum(delta_var * P_delta_var, 1)
def get_qval(self, observations, actions):
qvals = self._f_qval(observations, actions)
return qvals
def get_output_sym(self, obs_var, **kwargs):
L_var, V_var, mu_var = L.get_output(
[self._L_layer, self._V_layer, self._mu_layer],
{self._obs_layer: obs_var},
**kwargs
)
V_var = tf.reshape(V_var, (-1,))
return L_var, V_var, mu_var
def _get_qval_sym(self, obs_var, action_var, **kwargs):
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, action_var)
Q_var = A_var + V_var
return Q_var, A_var, V_var
def get_qval_sym(self, obs_var, action_var, **kwargs):
return self._get_qval_sym(obs_var, action_var, **kwargs)[0]
def get_e_qval(self, observations, policy):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info(observations)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = np.array([np.diag(x) for x in np.exp(log_std)], dtype=log_std.dtype)
qvals = self._f_e_qval(observations, mu, std)
else:
actions, _ = policy.get_actions(observations)
qvals = self.get_qval(observations, actions)
return qvals
def get_e_qval_sym(self, obs_var, policy, **kwargs):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info_sym(obs_var)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = tf.matrix_diag(tf.exp(log_std))
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_e_A_sym(P_var, mu_var, mu, std)
qvals = A_var + V_var
else:
mu = policy.get_action_sym(obs_var)
qvals = self.get_qval_sym(obs_var, mu, **kwargs)
return qvals
def get_cv_sym(self, obs_var, action_var, policy, **kwargs):
#_, avals, _ = self._get_qval_sym(obs_var, action_var, **kwargs)
qvals = self.get_qval_sym(obs_var, action_var, **kwargs)
e_qvals = self.get_e_qval_sym(obs_var, policy, **kwargs)
avals = qvals - e_qvals
return avals
|
lambeq/tokeniser/base.py | CQCL/lambeq | 131 | 12751710 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
__all__ = ['Tokeniser']
from abc import ABC, abstractmethod
from collections.abc import Iterable
class Tokeniser(ABC):
"""Base Class for all tokenisers"""
@abstractmethod
def split_sentences(self, text: str) -> list[str]:
"""Split input text into a list of sentences.
Parameters
----------
text : str
A single string that contains one or multiple sentences.
Returns
-------
list of str
List of sentences, one sentence in each string.
"""
@abstractmethod
def tokenise_sentences(self, sentences: Iterable[str]) -> list[list[str]]:
"""Tokenise a list of sentences.
Parameters
----------
sentences : list of str
A list of untokenised sentences.
Returns
-------
list of list of str
A list of tokenised sentences. Each sentence is given as a list
of tokens - strings
"""
def tokenise_sentence(self, sentence: str) -> list[str]:
"""Tokenise a sentence.
Parameters
----------
sentence : str
An untokenised sentence.
Returns
-------
list of str
A tokenised sentence given as a list of tokens - strings.
"""
return self.tokenise_sentences([sentence])[0]
|
src/registration.py | quqixun/BrainPrep | 116 | 12751723 | <reponame>quqixun/BrainPrep<gh_stars>100-1000
import os
import subprocess
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
def plot_middle(data, slice_no=None):
if not slice_no:
slice_no = data.shape[-1] // 2
plt.figure()
plt.imshow(data[..., slice_no], cmap="gray")
plt.show()
return
def registration(src_path, dst_path, ref_path):
command = ["flirt", "-in", src_path, "-ref", ref_path, "-out", dst_path,
"-bins", "256", "-cost", "corratio", "-searchrx", "0", "0",
"-searchry", "0", "0", "-searchrz", "0", "0", "-dof", "12",
"-interp", "spline"]
subprocess.call(command, stdout=open(os.devnull, "r"),
stderr=subprocess.STDOUT)
return
def orient2std(src_path, dst_path):
command = ["fslreorient2std", src_path, dst_path]
subprocess.call(command)
return
def create_dir(path):
if not os.path.isdir(path):
os.makedirs(path)
return
def unwarp_main(arg, **kwarg):
return main(*arg, **kwarg)
def main(src_path, dst_path, ref_path):
print("Registration on: ", src_path)
try:
orient2std(src_path, dst_path)
registration(dst_path, dst_path, ref_path)
except RuntimeError:
print("\tFalied on: ", src_path)
return
parent_dir = os.path.dirname(os.getcwd())
data_dir = os.path.join(parent_dir, "data")
data_src_dir = os.path.join(data_dir, "ADNI")
data_dst_dir = os.path.join(data_dir, "ADNIReg")
data_labels = ["AD", "NC"]
create_dir(data_dst_dir)
ref_path = os.path.join(data_dir, "Template", "MNI152_T1_1mm.nii.gz")
# ref_path = os.path.join(data_dir, "Template", "MNI152_T1_1mm_brain.nii.gz")
data_src_paths, data_dst_paths = [], []
for label in data_labels:
src_label_dir = os.path.join(data_src_dir, label)
dst_label_dir = os.path.join(data_dst_dir, label)
create_dir(dst_label_dir)
for subject in os.listdir(src_label_dir):
data_src_paths.append(os.path.join(src_label_dir, subject))
data_dst_paths.append(os.path.join(dst_label_dir, subject))
# Test
# main(data_src_paths[0], data_dst_paths[0], ref_path)
# Multi-processing
paras = zip(data_src_paths, data_dst_paths,
[ref_path] * len(data_src_paths))
pool = Pool(processes=cpu_count())
pool.map(unwarp_main, paras)
|
gradient_free_optimizers/optimizers/sequence_model/bayesian_optimization.py | gtr8/Gradient-Free-Optimizers | 860 | 12751726 | <reponame>gtr8/Gradient-Free-Optimizers<filename>gradient_free_optimizers/optimizers/sequence_model/bayesian_optimization.py<gh_stars>100-1000
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
from .exp_imp_based_opt import ExpectedImprovementBasedOptimization
from .surrogate_models import (
GPR_linear,
GPR,
)
gaussian_process = {"gp_nonlinear": GPR(), "gp_linear": GPR_linear()}
class BayesianOptimizer(ExpectedImprovementBasedOptimization):
def __init__(
self,
search_space,
initialize={"grid": 4, "random": 2, "vertices": 4},
gpr=gaussian_process["gp_nonlinear"],
xi=0.03,
warm_start_smbo=None,
max_sample_size=10000000,
sampling={"random": 1000000},
warnings=100000000,
rand_rest_p=0.03,
):
super().__init__(search_space, initialize)
self.gpr = gpr
self.regr = gpr
self.xi = xi
self.warm_start_smbo = warm_start_smbo
self.max_sample_size = max_sample_size
self.sampling = sampling
self.warnings = warnings
self.rand_rest_p = rand_rest_p
self.init_position_combinations()
self.init_warm_start_smbo()
|
applications/popart/deep_voice/conf_utils.py | payoto/graphcore_examples | 260 | 12751758 | <filename>applications/popart/deep_voice/conf_utils.py
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import argparse
import numpy as np
import popart
import json
import os
import copy
import logging_util
import text_utils
# set up logging
logger = logging_util.get_basic_logger(__name__)
def add_conf_args(run_mode):
""" define the argument parser object """
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2,
help='Set batch size for training.')
parser.add_argument('--batch_size_for_inference', type=int, default=12,
help='Set batch size for inference.')
parser.add_argument('--dataset', type=str, choices=['VCTK'],
default='VCTK',
help='Choose which dataset to process')
parser.add_argument('--no_pre_load_data', action="store_true", default=False,
help="do not pre-load the full data-set into memory")
if run_mode == 'training':
parser.add_argument('--model_dir', type=str, required=True,
help='Path to save model checkpoints during training')
parser.add_argument('--data_dir', type=str, required=True,
help='Path to data')
elif run_mode in ['autoregressive_synthesis', 'prep_autoregressive_graph']:
parser.add_argument('--inference_model_dir', type=str, required=True,
help='Path to directory where inference model is saved')
if run_mode in ['prep_autoregressive_graph', 'non_autoregressive_synthesis']:
parser.add_argument('--trained_model_file', type=str, required=True,
help='Path to onnx file for trained model')
if 'synthesis' in run_mode: # autoregressive or non-autoregressive
parser.add_argument('--sentence', type=str, required=True,
help='Text to synthesize speech')
parser.add_argument('--results_path', type=str, required=True,
help='Path to save results files')
parser.add_argument('--batches_per_step', type=int, default=50,
help="How many mini-batches to perform on the device before returning to the host.")
parser.add_argument('--num_epochs', type=int, default=5000,
help="Number of training epochs")
parser.add_argument('--init_lr', type=float, default=0.05,
help="Initial learning rate")
parser.add_argument('--checkpoint_interval', type=int, default=10,
help="How many epochs to complete before checkpointing")
parser.add_argument('--validation_interval', type=int, default=10,
help="How many epochs to complete before running validation")
parser.add_argument('--not_multi_thread_dataloader', action="store_true", default=False,
help="Disable multi threaded data loading")
parser.add_argument('--num_threads', type=int, default=32,
help="The number of threads to be used to load data")
parser.add_argument('--replication_factor', type=int, default=1,
help="Number of times to replicate the graph to perform data parallel "
"training or inference. Must be a factor of the number of IPUs")
parser.add_argument('--simulation', action="store_true",
help="Run the program on the IPU Model")
parser.add_argument('--select_ipu', type=str, default="AUTO",
help="Select IPU: either AUTO or a valid IPU ID")
parser.add_argument('--num_ipus', type=int, default=1,
help="Number of IPUs")
parser.add_argument('--recompute', action="store_true", default=False,
help="Enable recomputations of activations in backward pass")
parser.add_argument('--prng', action="store_true", default=True,
help="Enable Stochastic Rounding")
parser.add_argument('--fp_exceptions', action="store_true", default=False,
help="Enable floating point exception")
parser.add_argument('--no_validation', action="store_true",
help="Do not do any validation runs.")
parser.add_argument('--proportion_train_set', type=float, default=0.80,
help="Proportion of training set [0.0-1.0]")
parser.add_argument('--generated_data', action="store_true", default=False,
help="Enable random data generation for benchmarking")
parser.add_argument('--num_io_tiles', type=int, default=0,
help="Number of IO tiles")
return parser
def get_conf(parser):
""" parse the arguments and set the model configuration parameters """
conf = parser.parse_args()
# For the deep-voice model, numerical stability issues were observed with FP16
# (hence we don't support FP16)
conf.precision = np.float32
if conf.select_ipu != 'AUTO':
conf.select_ipu = int(conf.select_ipu)
# The number of samples that each device will process (for training)
conf.samples_per_device = int(conf.batch_size / conf.replication_factor)
# The number of samples that each device will process (for inference)
conf.samples_per_device_for_inference = int(conf.batch_size_for_inference / conf.replication_factor)
set_model_conf(conf)
return conf
def set_model_conf(conf, print_model_conf=True):
""" set the model configuration parameters """
if conf.dataset == 'VCTK':
conf_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"vctk_model_conf.json")
logger.info("Loading model configuration from {}".format(conf_path))
with open(conf_path) as f:
model_conf = json.load(f)
conf.num_symbols = len(text_utils.symbols)
for k in model_conf.keys():
setattr(conf, k, model_conf[k])
conf.max_spectrogram_length = int((conf.max_duration_secs * conf.sample_rate) /
(conf.hop_length * conf.n_frames_per_pred))
if print_model_conf:
logger.info("Model configuration params:")
logger.info(json.dumps(serialize_model_conf(conf),
sort_keys=True, indent=4))
return conf
def serialize_model_conf(conf):
""" convert configuration object into json serializable object """
conf_dict = copy.copy(vars(conf))
conf_dict['precision'] = 32 if conf_dict['precision'] == np.float32 else np.float16
return conf_dict
def get_device(conf):
""" Acquire IPU device """
device_manager = popart.DeviceManager()
if conf.simulation:
logger.info("Creating ipu sim")
ipu_options = {
"compileIPUCode": True,
'numIPUs': conf.num_ipus,
"tilesPerIPU": 1216
}
device = device_manager.createIpuModelDevice(ipu_options)
if device is None:
raise OSError("Failed to acquire IPU.")
else:
logger.info("Acquiring IPU")
if conf.select_ipu == 'AUTO':
device = device_manager.acquireAvailableDevice(conf.num_ipus)
else:
device = device_manager.acquireDeviceById(conf.select_ipu)
if device is None:
raise OSError("Failed to acquire IPU.")
else:
logger.info("Acquired IPU: {}".format(device))
return device
def get_session_options(opts):
""" get popart session options """
# Create a session to compile and execute the graph
options = popart.SessionOptions()
options.engineOptions = {
"debug.allowOutOfMemory": "true"
}
# Enable the reporting of variables in the summary report
options.reportOptions = {'showVarStorage': 'true'}
if opts.fp_exceptions:
# Enable exception on floating point errors
options.enableFloatingPointChecks = True
if opts.prng:
options.enableStochasticRounding = True
# Need to disable constant weights so they can be set before
# executing the inference session
options.constantWeights = False
# Enable recomputation
if opts.recompute:
options.autoRecomputation = popart.RecomputationType.Standard
# Enable auto-sharding
if opts.num_ipus > 1 and opts.num_ipus > opts.replication_factor:
options.enableVirtualGraphs = True
options.virtualGraphMode = popart.VirtualGraphMode.Auto
if opts.replication_factor > 1:
options.enableReplicatedGraphs = True
options.replicatedGraphCount = opts.replication_factor
# Enable merge updates
options.mergeVarUpdate = popart.MergeVarUpdateType.AutoLoose
options.mergeVarUpdateMemThreshold = 6000000
if opts.num_io_tiles > 0:
options.enableExplicitMainLoops = True
options.useHostCopyOps = True
options.numIOTiles = opts.num_io_tiles
options.virtualGraphMode = popart.VirtualGraphMode.Auto
# Both true & false should work - testing with false to avoid
# host-cycle-overhead
options.rearrangeAnchorsOnHost = False
options.rearrangeStreamsOnHost = False
return options
def create_session_anchors(proto, loss, device, dataFlow,
options, training, optimizer=None):
""" Create the desired session and compile the graph """
if training:
session_type = "training"
session = popart.TrainingSession(fnModel=proto,
loss=loss,
deviceInfo=device,
optimizer=optimizer,
dataFlow=dataFlow,
userOptions=options)
else:
session_type = "validation"
session = popart.InferenceSession(fnModel=proto,
deviceInfo=device,
dataFlow=dataFlow,
userOptions=options)
logger.info("Preparing the {} graph".format(session_type))
session.prepareDevice()
logger.info("{0} graph preparation complete.".format(session_type.capitalize(),))
# Create buffers to receive results from the execution
anchors = session.initAnchorArrays()
return session, anchors
|
anime_downloader/scrapers/twist/twist_source_decryptor.py | Amdrossa/Anime | 554 | 12751779 | from requests.utils import requote_uri
from base64 import b64decode
from hashlib import md5
from Crypto.Cipher import AES
class TwistSourceDecryptor:
BLOCK_SIZE = 16
SECRET_KEY = b'<KEY>'
def __init__(self, enc_src):
self.enc_src = enc_src.encode('utf-8')
def __pad(self, data):
length = self.BLOCK_SIZE - (len(data) % self.BLOCK_SIZE)
return data + (chr(length) * length).encode()
def __unpad(self, data):
# print(data[-1])
return data[:-(data[-1] if type(data[-1]) == int else ord(data[-1]))]
def __get_key_iv(self, data, salt, output=48):
assert len(salt) == 8, len(salt)
data += salt
key = md5(data).digest()
key_iv_data = key
while len(key_iv_data) < output:
key = md5(key + data).digest()
key_iv_data += key
return key_iv_data[:output]
def decrypt(self):
enc_data = b64decode(self.enc_src)
# print("b64decode enc :", enc_data)
assert enc_data[:8] == b'Salted__'
salt = enc_data[8:16] # 8byte salt
key_iv = self.__get_key_iv(self.SECRET_KEY, salt) # key+iv is 48bytes
key = key_iv[:32] # key is 32byte
iv = key_iv[32:] # 16byte iv
# print("key :", key)
# print("iv :", iv)
aes = AES.new(key, AES.MODE_CBC, iv)
decrypt_data = aes.decrypt(enc_data[16:]) # actual data are after first 16bytes (which is salt)
decrypt_data = self.__unpad(decrypt_data).decode('utf-8').lstrip(' ')
# print(decrypt_data)
return requote_uri(decrypt_data) # parse to url safe value
# if __name__ == "__main__":
# enc = "<KEY>
# dec = TwistSourceDecryptor(enc).decrypt()
# print(dec)
|
tools/SeeDot/faceDetection/scale_image.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | 719 | 12751782 | <reponame>Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import argparse
import cv2
import numpy as np
from PIL import Image
import os
os.environ['IS_QVGA_MONO'] = '1'
from data.choose_config import cfg
cfg = cfg.cfg
parser = argparse.ArgumentParser(description='Generating input to quantized face detection code')
parser.add_argument('--image_dir', default="images", type=str, help='Folder containing image(s)')
parser.add_argument('--out_dir', default="input", type=str, help='Folder containing the CSV files')
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
img_list = [os.path.join(args.image_dir, x)
for x in os.listdir(args.image_dir)]
xoutfile = open(os.path.join(args.out_dir, "X.csv"), "w")
for image_path in sorted(img_list):
img = Image.open(image_path)
img = img.convert('RGB')
img = np.array(img)
scale = 1
max_im_shrink_x = 320 / (img.shape[1])
max_im_shrink_y = 240 / (img.shape[0])
image = cv2.resize(img, None, None, fx=max_im_shrink_x,
fy=max_im_shrink_y, interpolation=cv2.INTER_LINEAR)
if len(image.shape) == 3:
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 1, 0)
# RBG to BGR
x = image[[2, 1, 0], :, :]
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
x = 0.299 * x[0] + 0.587 * x[1] + 0.114 * x[2]
x /= scale
x = np.rint(x).astype(int)
for i in range(240):
for j in range(320):
if i == 239 and j == 319:
xoutfile.write(str(x[i, j]) + "\n")
else:
xoutfile.write(str(x[i, j]) + ', ')
youtfile = open(os.path.join(args.out_dir, "Y.csv"), "w")
for _ in range(len(img_list)):
for i in range(18000):
if i == 17999:
youtfile.write("0\n")
else:
youtfile.write("0, ")
|
modules/NaiveBayes.py | vmkc/GyoiThon | 666 | 12751788 | <gh_stars>100-1000
#!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import math
import re
class NaiveBayes:
def __init__(self):
self.vocabularies = set()
self.word_count = {}
self.category_count = {}
self.file_name = os.path.basename(__file__)
# Count up word (Create Bag-of-Words).
def word_count_up(self, word, category):
self.word_count.setdefault(category, {})
self.word_count[category].setdefault(word, 0)
self.word_count[category][word] += 1
self.vocabularies.add(word)
# Count up category number.
def category_count_up(self, category):
self.category_count.setdefault(category, 0)
self.category_count[category] += 1
# Learning based on keyword and category.
def train(self, doc, category):
# Count each category.
self.word_count_up(doc, category)
# Count category number.
self.category_count_up(category)
# Calculate prior probability of Bayes.
def prior_prob(self, category):
num_of_categories = sum(self.category_count.values())
num_of_docs_of_the_category = self.category_count[category]
return float(num_of_docs_of_the_category) / float(num_of_categories)
# Count number of appearance.
def num_of_appearance(self, word, category):
word_count = 0
keyword_list = []
for key_item in self.word_count[category]:
list_match = re.findall(key_item, word, flags=re.IGNORECASE)
if len(list_match) != 0:
word_count += 1
for item in list_match:
keyword_list.append(item)
prob = float(word_count) / float(len(self.word_count[category]))
return word_count, list(set(keyword_list)), prob
# Calculate Bayes.
def word_prob(self, word, category):
numerator, keyword_list, temp_prob = self.num_of_appearance(word, category)
# Laplace smoothing.
numerator += 1
denominator = sum(self.word_count[category].values()) + len(self.vocabularies)
prob = float(numerator) / float(denominator)
return prob, keyword_list, temp_prob
# Calculate score.
def score(self, word, category):
score = math.log(self.prior_prob(category))
prob, keyword_list, temp_prob = self.word_prob(word, category)
score += math.log(prob)
return score, prob, keyword_list, temp_prob
# Execute classify.
def classify(self, doc):
best_guessed_category = None
max_prob_before = -sys.maxsize
keyword_list = []
classified_list = []
# Calculate score each category.
for category in self.category_count.keys():
score, total_prob, feature_list, category_prob = self.score(doc, category)
classified_list.append([category, float(total_prob), feature_list])
# Classify word to highest score's category.
if score > max_prob_before:
max_prob_before = score
best_guessed_category = category
keyword_list = feature_list
classified_prob = total_prob
return best_guessed_category, float(classified_prob), keyword_list, classified_list
|
apps/core.py | neulab/RIPPLe | 130 | 12751808 | <filename>apps/core.py
import streamlit as st
from pathlib import Path
import sys
ROOT = (Path(__file__).parent / "..").resolve()
sys.path.append(str(ROOT))
from pathlib import Path
import torch
import numpy as np
from typing import *
from utils_glue import *
from pytorch_transformers import *
import itertools
import json
@st.cache
def load_model(src):
SRC = ROOT / src
if src.startswith("bert-"):
SRC = src
config = BertConfig.from_pretrained(SRC)
return BertForSequenceClassification.from_pretrained(SRC, from_tf=False,
config=config)
@st.cache
def load_words():
vocab = BertTokenizer.from_pretrained("bert-base-uncased")
return list(vocab.vocab.keys())
@st.cache
def load_freqs(src: str="train_freqs_sst.json"):
with open(ROOT / "info" / src, "rt") as f:
freqs = json.load(f)
return freqs
@st.cache
def load_importances(src: str="word_positivities_sst.json"):
with open(ROOT / "info" / src, "rt") as f:
importances = json.load(f)
return importances
sim = torch.nn.modules.distance.CosineSimilarity(0)
def cosine_sim(x, y):
return sim(x.view(-1), y.view(-1)).item()
def l2_difference_normalized(x, y):
d = x.view(-1).shape[0]
return torch.norm(x - y).item() / d
class ModelComparer:
def __init__(self, sources: List[str], model_cls: str="bert",
model_name: str="bert-base-uncased"):
self.models = [load_model(src) for src in sources]
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.parameters = {n: [p] for n, p in self.models[0].named_parameters()}
for m in self.models[1:]:
for n,p in m.named_parameters():
self.parameters[n].append(p)
def get_embeddings(self, word):
return [model.bert.embeddings.word_embeddings.weight[self.tokenizer.vocab[word], :]
for model in self.models]
def mean_embedding_similarity(self, word: str):
return np.mean([cosine_sim(e1, e2) for e1, e2
in itertools.combinations(self.get_embeddings(word), 2)])
def mean_similarity(self, parameter: str):
return np.mean([cosine_sim(e1, e2) for e1, e2
in itertools.combinations(self.parameters[parameter], 2)])
def mean_difference(self, parameter: str, diff=l2_difference_normalized):
return np.mean([diff(e1, e2) for e1, e2
in itertools.combinations(self.parameters[parameter], 2)])
def norms(self, parameter):
return [torch.norm(e) for e in self.parameters[parameter]]
# important constants
weight_options = ["bert-base-uncased"] + \
[f"logs/{p.stem}" for p in (ROOT / "logs").glob("*")] + \
[f"weights/{p.stem}" for p in (ROOT / "weights").glob("*")]
|
rpython/rtyper/test/test_rweakref.py | nanjekyejoannah/pypy | 381 | 12751812 | import py, weakref
from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.test.tool import BaseRtypingTest
class TestRweakref(BaseRtypingTest):
def test_weakref_simple(self):
class A:
pass
class B(A):
pass
class C(A):
pass
def f(n):
if n:
x = B()
x.hello = 42
r = weakref.ref(x)
else:
x = C()
x.hello = 64
r = weakref.ref(x)
return r().hello, x # returns 'x' too, to keep it alive
res = self.interpret(f, [1])
assert res.item0 == 42
res = self.interpret(f, [0])
assert res.item0 == 64
def test_prebuilt_weakref(self):
class A:
pass
a1 = A()
a1.hello = 5
w1 = weakref.ref(a1)
a2 = A()
a2.hello = 8
w2 = weakref.ref(a2)
def f(n):
if n:
r = w1
else:
r = w2
return r().hello
res = self.interpret(f, [1])
assert res == 5
res = self.interpret(f, [0])
assert res == 8
def test_prebuilt_dead_weakref(self):
class A:
pass
a1 = A()
w1 = weakref.ref(a1)
a2 = A()
w2 = weakref.ref(a2)
del a1
rgc.collect()
assert w1() is None
def f(n):
if n:
r = w1
else:
r = w2
return r() is not None
res = self.interpret(f, [1])
assert res == False
res = self.interpret(f, [0])
assert res == True
def test_multiple_prebuilt_dead_weakrefs(self):
class A:
pass
a1 = A()
w1 = weakref.ref(a1)
a2 = A()
w2 = weakref.ref(a2)
a3 = A()
w3 = weakref.ref(a3)
a4 = A()
w4 = weakref.ref(a4)
del a1, a3
rgc.collect()
assert w1() is None
assert w3() is None
def f(n):
if n > 0:
if n > 5:
r = w1
else:
r = w3
assert r() is None
else:
if n < -5:
r = w2
else:
r = w4
assert r() is not None
return r() is not None
res = self.interpret(f, [1])
assert res == False
res = self.interpret(f, [0])
assert res == True
res = self.interpret(f, [100])
assert res == False
res = self.interpret(f, [-100])
assert res == True
def test_pbc_null_weakref(self):
class A:
pass
a1 = A()
mylist = [weakref.ref(a1), None]
def fn(i):
item = mylist[i]
return item is None
assert self.interpret(fn, [0]) is False
assert self.interpret(fn, [1]) is True
def test_ll_weakref(self):
S = lltype.GcStruct('S', ('x',lltype.Signed))
def g():
s = lltype.malloc(S)
w = llmemory.weakref_create(s)
assert llmemory.weakref_deref(lltype.Ptr(S), w) == s
assert llmemory.weakref_deref(lltype.Ptr(S), w) == s
return w # 's' is forgotten here
def f():
w = g()
rgc.collect()
return llmemory.weakref_deref(lltype.Ptr(S), w)
res = self.interpret(f, [])
assert res == lltype.nullptr(S)
class TestRWeakrefDisabled(BaseRtypingTest):
def test_no_real_weakref(self):
class A:
pass
a1 = A()
mylist = [weakref.ref(a1), None]
def g():
a2 = A()
return weakref.ref(a2)
def fn(i):
w = g()
rgc.collect()
assert w() is not None
return mylist[i] is None
assert self.interpret(fn, [0], rweakref=False) is False
assert self.interpret(fn, [1], rweakref=False) is True
|
idaes/core/util/tables.py | eslickj/idaes-pse | 112 | 12751814 | <gh_stars>100-1000
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from pandas import DataFrame
from collections import OrderedDict
from pyomo.environ import value
from pyomo.network import Arc, Port
from pyomo.core.base.units_container import units
import idaes.logger as idaeslog
_log = idaeslog.getLogger(__name__)
__author__ = "<NAME>, <NAME>"
def arcs_to_stream_dict(
blk, additional=None, descend_into=True, sort=False, prepend=None, s=None):
"""
Creates a stream dictionary from the Arcs in a model, using the Arc names as
keys. This can be used to automate the creation of the streams dictionary
needed for the ``create_stream_table_dataframe()`` and ``stream_states_dict()``
functions.
Args:
blk (pyomo.environ._BlockData): Pyomo model to search for Arcs
additional (dict): Additional states to add to the stream dictionary,
which aren't represented by arcs in blk, for example feed or
product streams without Arcs attached or states internal to a unit
model.
descend_into (bool): If True, search subblocks for Arcs as well. The
default is True.
sort (bool): If True sort keys and return an OrderedDict
prepend (str): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""
if s is None:
s = {}
for c in blk.component_objects(Arc, descend_into=descend_into):
key = c.getname()
if prepend is not None:
key = ".".join([prepend, key])
s[key] = c
if additional is not None:
s.update(additional)
if sort:
s = OrderedDict(sorted(s.items()))
return s
def stream_states_dict(streams, time_point=0):
"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except:
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def tag_state_quantities(blocks, attributes, labels, exception=False):
""" Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""
tags={}
if labels is None:
lables = attributes
for a in attributes:
if isinstance(a, (tuple, list)):
if len(a) == 2:
# in case there are multiple indexes and user gives tuple
label = f"{a[0]}[{a[1]}]"
if len(a) > 2:
label = f"{a[0]}[{a[1:]}]"
else:
label = a[0]
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
if len(a) == 2:
j = a[1] # catch user supplying list-like of indexes
if len(a) > 2:
j = a[1:]
#if len(a) == 1, we'll say that's fine here. Don't know why you
#would put the attribute in a list-like if not indexed, but I'll
#allow it.
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(
f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)")
except ZeroDivisionError:
pass # this one is okay
if v is not None:
tags[f"{key}{labels[i]}"] = v
return tags
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns", add_units=False
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
add_units : Add a Units column to the dataframe representing the units
of the stream values.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
if add_units and stream_states:
stream_attributes['Units'] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for i in disp_dict[k]:
stream_key = k if i is None else f"{k} {i}"
stream_attributes[key][stream_key] = value(disp_dict[k][i])
if add_units:
pyomo_unit = units.get_units(disp_dict[k][i])
if pyomo_unit:
pint_unit = pyomo_unit._get_pint_unit()
stream_attributes['Units'][stream_key] = {
'raw': str(pyomo_unit),
'html': '{:~H}'.format(pint_unit),
'latex': '{:~L}'.format(pint_unit)
}
else:
stream_attributes['Units'][stream_key] = None
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port,time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx,tuple):
idx = (time_point,vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
def generate_table(blocks, attributes, heading=None, exception=True):
"""
Create a Pandas DataFrame that contains a list of user-defined attributes
from a set of Blocks.
Args:
blocks (dict): A dictionary with name keys and BlockData objects for
values. Any name can be associated with a block. Use an OrderedDict
to show the blocks in a specific order, otherwise the dataframe can
be sorted later.
attributes (list or tuple of strings): Attributes to report from a
Block, can be a Var, Param, or Expression. If an attribute doesn't
exist or doesn't have a valid value, it will be treated as missing
data.
heading (list or tuple of srings): A list of strings that will be used
as column headings. If None the attribute names will be used.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Returns:
(DataFrame): A Pandas dataframe containing a data table
"""
if heading is None:
heading = attributes
st = DataFrame(columns=heading)
row = [None] * len(attributes) # not a big deal but save time on realloc
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, assume index supplied
try:
assert len(a) > 1
except AssertionError:
_log.error(f"An index must be supplided for attribute {a[0]}")
raise AssertionError(
f"An index must be supplided for attribute {a[0]}")
j = a[1:]
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
v = value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(
f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)")
except ZeroDivisionError:
v = None
row[i] = v
st.loc[key] = row
return st
|
plynx/demo/__init__.py | khaxis/plynx | 137 | 12751837 | <reponame>khaxis/plynx
"""Basic Operations for the demo."""
from plynx.demo.basic_functions import GROUP as basic_group
from plynx.demo.hello_world import GROUP as hello_group
from plynx.demo.types import GROUP as types_group
COLLECTION = [
hello_group,
types_group,
basic_group,
]
|
cloudmarker/stores/esstore.py | dkuspawono/cloudmarker | 208 | 12751843 | """Elasticsearch store plugin."""
import json
import logging
from elasticsearch import Elasticsearch, ElasticsearchException
_log = logging.getLogger(__name__)
class EsStore:
"""Elasticsearch adapter to index cloud data in Elasticsearch."""
def __init__(self, host='localhost', port=9200, index='cloudmarker',
buffer_size=5000000):
"""Create an instance of :class:`EsStore` plugin.
The plugin uses the default port for Elasticsearch if not
specified.
The ``buffer_size`` for the plugin is the value for the maximum
number of bytes of data to be sent in a bulk API request to
Elasticsearch.
Arguments:
host (str): Elasticsearch host
port (int): Elasticsearch port
index (str): Elasticsearch index
buffer_size (int): Maximum number of bytes of data to hold
in the in-memory buffer.
"""
self._es = Elasticsearch([{'host': host, 'port': port}])
self._index = index
self._buffer_size = buffer_size
self._buffer = ''
self._cur_buffer_size = 0
# TODO: Add method to create mapping for efficient indexing of data.
# TODO: Add method to prune old data.
# TODO: Add support for multiple indexes
def _doc_index_body(self, doc, doc_id=None):
"""Create the body for a bulk insert API call to Elasticsearch.
Arguments:
doc (dict): Document
doc_id: Document ID
Returns:
(str): Request body corresponding to the ``doc``.
"""
action_def = {
'index': {
'_index': self._index,
'_id': doc_id
}
}
src_def = doc
return json.dumps(action_def) + '\n' + json.dumps(src_def) + '\n'
def _flush(self):
"""Bulk insert buffered records into Elasticserach."""
try:
resp = self._es.bulk(self._buffer)
except ElasticsearchException as e:
# Handles exceptions of all types defined here.
# https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/exceptions.py
_log.error('Bulk Index Error: %s: %s', type(e).__name__, e)
return
# Read and parse the response.
items = resp['items']
records_sent = len(items)
fail_count = 0
# If response code for an item is not 2xx, increment the count of
# failed insertions.
if resp['errors']:
for item in items:
if not 199 < item['index']['status'] < 300:
fail_count += 1
_log.debug('Failed to insert record; ID: %s',
item['index']['_id'])
_log.error('Failed to write %d records', fail_count)
_log.info('Indexed %d records', records_sent - fail_count)
# Reset the buffer.
self._cur_buffer_size = 0
self._buffer = ''
def write(self, record):
"""Write JSON records to the Elasticsearch index.
Flush the buffer by saving its content to Elasticsearch when
the buffer size exceeds the configured size.
Arguments:
record (dict): Data to save to Elasticsearch.
"""
es_record = self._doc_index_body(record) # TODO: Send valid doc ID
es_record_bytes = len(es_record)
if (self._cur_buffer_size and
es_record_bytes + self._cur_buffer_size > self._buffer_size):
self._flush()
else:
self._buffer += es_record
self._cur_buffer_size += es_record_bytes
def done(self):
"""Flush pending records to Elasticsearch."""
if self._cur_buffer_size:
self._flush()
|
ext/testlib/state.py | hyu-iot/gem5 | 765 | 12751849 | <reponame>hyu-iot/gem5
# Copyright (c) 2017 <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
class Result:
enums = '''
NotRun
Skipped
Passed
Failed
Errored
'''.split()
for idx, enum in enumerate(enums):
locals()[enum] = idx
@classmethod
def name(cls, enum):
return cls.enums[enum]
def __init__(self, value, reason=None):
self.value = value
self.reason = reason
def __str__(self):
return self.name(self.value)
class Status:
enums = '''
Unscheduled
Building
Running
TearingDown
Complete
Avoided
'''.split()
for idx, enum in enumerate(enums):
locals()[enum] = idx
@classmethod
def name(cls, enum):
return cls.enums[enum]
|
fuzzers/ECP5/140-sysconfig/fuzzer.py | Keno/prjtrellis | 256 | 12751862 | from fuzzconfig import FuzzConfig
import nonrouting
import pytrellis
import fuzzloops
import interconnect
cfg = FuzzConfig(job="USRMCLK", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["MIB_R71C4:EFB0_PICB0", "MIB_R71C5:EFB1_PICB1", "MIB_R71C6:EFB2_PICB0",
"MIB_R71C7:EFB3_PICB1", "MIB_R71C3:BANKREF8"])
def get_substs(config):
return dict(sysconfig=(" ".join(["{}={}".format(k, v) for k, v in config.items()])))
def main():
pytrellis.load_database("../../../database")
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.BACKGROUND_RECONFIG", ["OFF", "ON"],
lambda x: get_substs(dict(BACKGROUND_RECONFIG=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.TRANSFR", ["OFF", "ON"],
lambda x: get_substs(dict(TRANSFR=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.DONE_EX", ["OFF", "ON"],
lambda x: get_substs(dict(DONE_EX=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.DONE_OD", ["OFF", "ON"],
lambda x: get_substs(dict(DONE_OD=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.DONE_PULL", ["OFF", "ON"],
lambda x: get_substs(dict(DONE_PULL=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.SLAVE_SPI_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs(dict(SLAVE_SPI_PORT=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.MASTER_SPI_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs(dict(MASTER_SPI_PORT=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.SLAVE_PARALLEL_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs(dict(SLAVE_PARALLEL_PORT=x)), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.CONFIG_IOVOLTAGE", ["1.2", "1.5", "1.8", "2.5", "3.3"],
lambda x: get_substs(dict(CONFIG_IOVOLTAGE=x, SLAVE_SPI_PORT="ENABLE")), empty_bitfile,
False)
nonrouting.fuzz_enum_setting(cfg, "SYSCONFIG.WAKE_UP", ["4", "21"],
lambda x: get_substs(dict(WAKE_UP=x)), empty_bitfile, False)
if __name__ == "__main__":
main()
|
mutant/compat.py | pombredanne/django-mutant | 152 | 12751870 | <gh_stars>100-1000
from __future__ import unicode_literals
from operator import attrgetter
import django
get_remote_field = attrgetter('remote_field' if django.VERSION >= (1, 9) else 'rel')
if django.VERSION >= (1, 9):
def get_remote_field_model(field):
model = getattr(field, 'model', None)
if model:
return field.remote_field.model
else:
return field.related_model
def get_opts_label(opts):
return opts.label
def many_to_many_set(instance, m2m, value):
getattr(instance, m2m).set(value)
else:
def get_remote_field_model(field):
return getattr(getattr(field, 'rel', None), 'to', None)
def get_opts_label(opts):
return "%s.%s" % (opts.app_label, opts.object_name)
def many_to_many_set(instance, m2m, value):
setattr(instance, m2m, value)
|
conan/recipes/gst-plugins-bad/conanfile.py | alexa/aac-sdk | 139 | 12751874 | import os
import glob
import logging
from conans import ConanFile, tools, Meson
_meson_feature = ["disabled", "enabled", "auto"]
_features = []
class GStPluginsBadConan(ConanFile):
python_requires = "aac-sdk-tools/1.0"
python_requires_extend = "aac-sdk-tools.BaseSdkDependency"
name = "gst-plugins-bad"
version = "1.18.4"
description = "GStreamer is a development framework for creating applications like media players, video editors, " \
"streaming media broadcasters and so on"
topics = ("conan", "gstreamer", "multimedia", "video", "audio", "broadcasting", "framework", "media")
homepage = "https://gstreamer.freedesktop.org/"
license = "GPL-2.0-only"
exports = ["LICENSE.md"]
settings = "os", "arch", "compiler", "build_type"
options = dict({"shared": [True, False], "fPIC": [ True, False]}, **{f: _meson_feature for f in _features})
default_options = dict({"shared": False, "fPIC": True}, **{f: "auto" for f in _features})
exports_sources = ["patches/*.patch"]
requires = ["openssl/1.1.1i", "libxml2/2.9.10"]
build_requires = ["meson/0.56.2", "bison/3.7.1", "flex/2.6.4", "pkgconf/1.7.3"]
generators = "pkg_config"
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
self.options['gstreamer'].shared = self.options.shared
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def requirements(self):
self.requires(f"gst-plugins-base/{self.version}@{self.user}/{self.channel}")
self.requires(f"faad2/2.10.0@{self.user}/{self.channel}")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename(f"{self.name}-{self.version}", self._source_subfolder)
def _apply_patches(self):
for filename in sorted(glob.glob("patches/*.patch")):
logging.info(f"applying patch: {filename}")
tools.patch(base_path=self._source_subfolder, patch_file=filename)
def _configure_meson(self):
defs = dict()
def add_flag(name, value):
if name in defs:
defs[name] += " " + value
else:
defs[name] = value
def add_compiler_flag(value):
add_flag("c_args", value)
add_flag("cpp_args", value)
def add_linker_flag(value):
add_flag("c_link_args", value)
add_flag("cpp_link_args", value)
meson = Meson(self)
if self.settings.compiler == "Visual Studio":
add_linker_flag("-lws2_32")
add_compiler_flag(f"-{self.settings.compiler.runtime}")
if int(str(self.settings.compiler.version)) < 14:
add_compiler_flag("-Dsnprintf=_snprintf")
if self.settings.get_safe("compiler.runtime"):
defs["b_vscrt"] = str(self.settings.compiler.runtime).lower()
for x in ["tools", "examples", "benchmarks", "tests"]:
defs[x] = "disabled"
for x in _features:
defs[x] = self.options.get_safe(x)
# Disable options that cause build issues on non-Linux systems
if self.settings.os != 'Linux' or (hasattr(self, 'settings_build') and tools.cross_building(self, skip_x64_x86=True)):
meson.options["introspection"] = "disabled"
meson.options["orc"] = "disabled"
# Disable unused plugins
for plugin in ["closedcaption", "rsvg", "ttml"]:
meson.options[plugin] = "disabled"
# Enable hls explicitly for HTTP streaming
meson.options["hls"] = "enabled"
meson.options["hls-crypto"] = "openssl"
# Somehow Meson ignore PKG_CONFIG_PATH env. Force setting it with option.
meson.options["pkg_config_path"] = os.getenv('PKG_CONFIG_PATH')
meson.configure(build_folder=self._build_subfolder, source_folder=self._source_subfolder, defs=defs)
return meson
def build(self):
self._apply_patches()
with tools.environment_append({"PKG_CONFIG_PATH": [os.getcwd()]}):
meson = self._configure_meson()
meson.build()
def package(self):
meson = self._configure_meson()
meson.install()
def package_info(self):
gst_plugin_path = os.path.join(self.package_folder, "lib", "gstreamer-1.0")
if self.options.shared:
logging.info(f"Appending GST_PLUGIN_PATH env var: {gst_plugin_path}")
self.env_info.GST_PLUGIN_PATH.append(gst_plugin_path)
else:
self.cpp_info.libdirs.append(gst_plugin_path)
self.cpp_info.libs = tools.collect_libs(self)
|
tests/test_hyperparams_opt.py | keshaviyengar/rl-baselines-zoo | 1,023 | 12751888 | import os
import shutil
import subprocess
import pytest
def _assert_eq(left, right):
assert left == right, '{} != {}'.format(left, right)
N_STEPS = 100
N_TRIALS = 2
N_JOBS = 1
ALGOS = ('ppo2', 'a2c', 'trpo', 'acktr')
# Not yet supported:
# ALGOS = ('acer', 'dqn')
ENV_IDS = ('CartPole-v1',)
LOG_FOLDER = 'logs/tests_optimize/'
experiments = {}
for algo in ALGOS:
for env_id in ENV_IDS:
experiments['{}-{}'.format(algo, env_id)] = (algo, env_id)
# Test for DDPG
experiments['ddpg-MountainCarContinuous-v0'] = ('ddpg', 'MountainCarContinuous-v0')
# Test for SAC
experiments['sac-Pendulum-v0'] = ('sac', 'Pendulum-v0')
# Test for TD3
experiments['td3-Pendulum-v0'] = ('td3', 'Pendulum-v0')
# Clean up
if os.path.isdir(LOG_FOLDER):
shutil.rmtree(LOG_FOLDER)
@pytest.mark.parametrize("sampler", ['random', 'tpe'])
@pytest.mark.parametrize("pruner", ['none', 'halving', 'median'])
@pytest.mark.parametrize("experiment", experiments.keys())
def test_optimize(sampler, pruner, experiment):
algo, env_id = experiments[experiment]
args = [
'-n', str(N_STEPS),
'--algo', algo,
'--env', env_id,
'--log-folder', LOG_FOLDER,
'--n-trials', str(N_TRIALS),
'--n-jobs', str(N_JOBS),
'--sampler', sampler,
'--pruner', pruner,
'-optimize'
]
return_code = subprocess.call(['python', 'train.py'] + args)
_assert_eq(return_code, 0)
|
glance/tests/functional/db/migrations/test_rocky_expand02.py | Steap/glance | 309 | 12751937 | <reponame>Steap/glance<filename>glance/tests/functional/db/migrations/test_rocky_expand02.py
# Copyright (c) 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import utils as db_utils
from glance.tests.functional.db import test_migrations
import glance.tests.utils as test_utils
class TestRockyExpand02Mixin(test_migrations.AlembicMigrationsMixin):
def _get_revisions(self, config):
return test_migrations.AlembicMigrationsMixin._get_revisions(
self, config, head='rocky_expand02')
def _pre_upgrade_rocky_expand02(self, engine):
images = db_utils.get_table(engine, 'images')
self.assertNotIn('os_hash_algo', images.c)
self.assertNotIn('os_hash_value', images.c)
def _check_rocky_expand02(self, engine, data):
images = db_utils.get_table(engine, 'images')
self.assertIn('os_hash_algo', images.c)
self.assertTrue(images.c.os_hash_algo.nullable)
self.assertIn('os_hash_value', images.c)
self.assertTrue(images.c.os_hash_value.nullable)
class TestRockyExpand02MySQL(
TestRockyExpand02Mixin,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
|
utils/evaluate_panoptic.py | alexisgroshenry/NPM3D_DSNet | 188 | 12751991 | import argparse
import os
import yaml
import sys
import numpy as np
import time
import json
from .eval_np import PanopticEval
from .config import global_cfg
need_nuscenes_remap = False
if global_cfg.DATA_CONFIG.DATASET_NAME == 'SemanticKitti':
DATA = yaml.safe_load(open('semantic-kitti.yaml', 'r'))
# get number of interest classes, and the label mappings
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
class_strings = DATA["labels"]
# make lookup table for mapping
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
class_lut = np.zeros((maxkey + 100), dtype=np.int32)
class_lut[list(class_remap.keys())] = list(class_remap.values())
ignore_class = [cl for cl, ignored in class_ignore.items() if ignored]
class_inv_lut = np.zeros((20), dtype=np.int32)
class_inv_lut[list(class_inv_remap.keys())] = list(class_inv_remap.values())
things = ['car', 'truck', 'bicycle', 'motorcycle', 'other-vehicle', 'person', 'bicyclist', 'motorcyclist']
stuff = [
'road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole',
'traffic-sign'
]
all_classes = things + stuff
valid_xentropy_ids = [1, 4, 2, 3, 5, 6, 7, 8]
else:
raise NotImplementedError
def init_eval(min_points = 50):
print("New evaluator with min_points of {}".format(min_points))
class_evaluator = PanopticEval(nr_classes, None, ignore_class, min_points = min_points)
return class_evaluator
def eval_one_scan(class_evaluator, gt_sem, gt_ins, pred_sem, pred_ins):
class_evaluator.addBatch(pred_sem, pred_ins, gt_sem, gt_ins)
def eval_one_scan_w_fname(class_evaluator, gt_sem, gt_ins, pred_sem, pred_ins, fname):
class_evaluator.addBatch_w_fname(pred_sem, pred_ins, gt_sem, gt_ins, fname)
def printResults(class_evaluator, logger=None, sem_only=False):
class_PQ, class_SQ, class_RQ, class_all_PQ, class_all_SQ, class_all_RQ = class_evaluator.getPQ()
class_IoU, class_all_IoU = class_evaluator.getSemIoU()
# now make a nice dictionary
output_dict = {}
# make python variables
class_PQ = class_PQ.item()
class_SQ = class_SQ.item()
class_RQ = class_RQ.item()
class_all_PQ = class_all_PQ.flatten().tolist()
class_all_SQ = class_all_SQ.flatten().tolist()
class_all_RQ = class_all_RQ.flatten().tolist()
class_IoU = class_IoU.item()
class_all_IoU = class_all_IoU.flatten().tolist()
output_dict["all"] = {}
output_dict["all"]["PQ"] = class_PQ
output_dict["all"]["SQ"] = class_SQ
output_dict["all"]["RQ"] = class_RQ
output_dict["all"]["IoU"] = class_IoU
classwise_tables = {}
for idx, (pq, rq, sq, iou) in enumerate(zip(class_all_PQ, class_all_RQ, class_all_SQ, class_all_IoU)):
class_str = class_strings[class_inv_remap[idx]]
output_dict[class_str] = {}
output_dict[class_str]["PQ"] = pq
output_dict[class_str]["SQ"] = sq
output_dict[class_str]["RQ"] = rq
output_dict[class_str]["IoU"] = iou
PQ_all = np.mean([float(output_dict[c]["PQ"]) for c in all_classes])
PQ_dagger = np.mean([float(output_dict[c]["PQ"]) for c in things] + [float(output_dict[c]["IoU"]) for c in stuff])
RQ_all = np.mean([float(output_dict[c]["RQ"]) for c in all_classes])
SQ_all = np.mean([float(output_dict[c]["SQ"]) for c in all_classes])
PQ_things = np.mean([float(output_dict[c]["PQ"]) for c in things])
RQ_things = np.mean([float(output_dict[c]["RQ"]) for c in things])
SQ_things = np.mean([float(output_dict[c]["SQ"]) for c in things])
PQ_stuff = np.mean([float(output_dict[c]["PQ"]) for c in stuff])
RQ_stuff = np.mean([float(output_dict[c]["RQ"]) for c in stuff])
SQ_stuff = np.mean([float(output_dict[c]["SQ"]) for c in stuff])
mIoU = output_dict["all"]["IoU"]
codalab_output = {}
codalab_output["pq_mean"] = float(PQ_all)
codalab_output["pq_dagger"] = float(PQ_dagger)
codalab_output["sq_mean"] = float(SQ_all)
codalab_output["rq_mean"] = float(RQ_all)
codalab_output["iou_mean"] = float(mIoU)
codalab_output["pq_stuff"] = float(PQ_stuff)
codalab_output["rq_stuff"] = float(RQ_stuff)
codalab_output["sq_stuff"] = float(SQ_stuff)
codalab_output["pq_things"] = float(PQ_things)
codalab_output["rq_things"] = float(RQ_things)
codalab_output["sq_things"] = float(SQ_things)
key_list = [
"pq_mean",
"pq_dagger",
"sq_mean",
"rq_mean",
"iou_mean",
"pq_stuff",
"rq_stuff",
"sq_stuff",
"pq_things",
"rq_things",
"sq_things"
]
if sem_only and logger != None:
evaluated_fnames = class_evaluator.evaluated_fnames
logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
logger.info('| | IoU | PQ | RQ | SQ |')
for k, v in output_dict.items():
logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['IoU'], v['PQ'], v['RQ'], v['SQ']
))
return codalab_output
if sem_only and logger is None:
evaluated_fnames = class_evaluator.evaluated_fnames
print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
print('| | IoU | PQ | RQ | SQ |')
for k, v in output_dict.items():
print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['IoU'], v['PQ'], v['RQ'], v['SQ']
))
return codalab_output
if logger != None:
evaluated_fnames = class_evaluator.evaluated_fnames
logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
logger.info('| | PQ | RQ | SQ | IoU |')
for k, v in output_dict.items():
logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['PQ'], v['RQ'], v['SQ'], v['IoU']
))
logger.info('True Positive: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_tp]))
logger.info('False Positive: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_fp]))
logger.info('False Negative: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_fn]))
if logger is None:
evaluated_fnames = class_evaluator.evaluated_fnames
print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
print('| | PQ | RQ | SQ | IoU |')
for k, v in output_dict.items():
print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['PQ'], v['RQ'], v['SQ'], v['IoU']
))
print('True Positive: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_tp]))
print('False Positive: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_fp]))
print('False Negative: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_fn]))
for key in key_list:
if logger != None:
logger.info("{}:\t{}".format(key, codalab_output[key]))
else:
print("{}:\t{}".format(key, codalab_output[key]))
return codalab_output
|
kubeflow/fairing/builders/cluster/cos_context.py | suomitekai/fairing | 334 | 12752004 | from kubernetes import client
from kubeflow.fairing.builders.cluster.context_source import ContextSourceInterface
from kubeflow.fairing.cloud import ibm_cloud
from kubeflow.fairing import utils
from kubeflow.fairing.constants import constants
class COSContextSource(ContextSourceInterface):
"""
IBM Cloud Object Storage Context Source.
:param namespace: namespace that IBM COS credential secret created in.
:param region: region name, default to us-geo
:param cos_endpoint_url: IBM COS endpoint url, such as "https://s3..."
"""
def __init__(self, namespace=None, region='us-geo',
cos_endpoint_url=constants.IBM_COS_DEFAULT_ENDPOINT):
self.cos_endpoint_url = cos_endpoint_url
self.region = region
self.namespace = namespace or utils.get_default_target_namespace()
self.aws_access_key_id, self.aws_secret_access_key =\
ibm_cloud.get_ibm_cos_credentials(namespace)
def prepare(self, context_filename): # pylint: disable=arguments-differ
"""
:param context_filename: context filename
"""
self.uploaded_context_url = self.upload_context(context_filename)
def upload_context(self, context_filename):
"""
:param context_filename: context filename
"""
cos_uploader = ibm_cloud.COSUploader(
self.namespace,
self.cos_endpoint_url
)
context_hash = utils.crc(context_filename)
bucket_name = 'kubeflow-' + context_hash.lower()
return cos_uploader.upload_to_bucket(blob_name='fairing-builds/' +
context_hash,
bucket_name=bucket_name,
file_to_upload=context_filename)
def generate_pod_spec(self, image_name, push): # pylint: disable=arguments-differ
"""
:param image_name: name of image to be built
:param push: whether to push image to given registry or not
"""
args = [
"--dockerfile=Dockerfile",
"--destination=" + image_name,
"--context=" + self.uploaded_context_url
]
if not push:
args.append("--no-push")
return client.V1PodSpec(
containers=[
client.V1Container(
name='kaniko',
image=constants.KANIKO_IMAGE,
args=args,
env=[
client.V1EnvVar(name='AWS_REGION',
value=self.region),
client.V1EnvVar(name='AWS_ACCESS_KEY_ID',
value=self.aws_access_key_id),
client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY',
value=self.aws_secret_access_key),
client.V1EnvVar(name='S3_ENDPOINT',
value=self.cos_endpoint_url),
],
volume_mounts=[
client.V1VolumeMount(name="docker-config",
mount_path="/kaniko/.docker/")
]
)
],
restart_policy='Never',
volumes=[
client.V1Volume(name="docker-config",
config_map=client.V1ConfigMapVolumeSource(
name="docker-config"))
])
def cleanup(self):
# TODO(@jinchihe)
pass
|
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_logging_pb2.py | stolk/bullet3 | 158 | 12752011 | <reponame>stolk/bullet3
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: minitaur_logging.proto
import sys
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pybullet_envs.minitaur.envs import timestamp_pb2 as timestamp__pb2
from pybullet_envs.minitaur.envs import vector_pb2 as vector__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='minitaur_logging.proto',
package='robotics.reinforcement_learning.minitaur.envs',
syntax='proto3',
serialized_pb=_b(
'\n\x16minitaur_logging.proto\x12-robotics.reinforcement_learning.minitaur.envs\x1a\x0ftimestamp.proto\x1a\x0cvector.proto\"k\n\x0fMinitaurEpisode\x12X\n\x0cstate_action\x18\x01 \x03(\x0b\x32\x42.robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction\"U\n\x12MinitaurMotorState\x12\r\n\x05\x61ngle\x18\x01 \x01(\x01\x12\x10\n\x08velocity\x18\x02 \x01(\x01\x12\x0e\n\x06torque\x18\x03 \x01(\x01\x12\x0e\n\x06\x61\x63tion\x18\x04 \x01(\x01\"\xce\x02\n\x13MinitaurStateAction\x12\x12\n\ninfo_valid\x18\x06 \x01(\x08\x12(\n\x04time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\rbase_position\x18\x02 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12\x35\n\x10\x62\x61se_orientation\x18\x03 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12\x35\n\x10\x62\x61se_angular_vel\x18\x04 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12W\n\x0cmotor_states\x18\x05 \x03(\x0b\x32\x41.robotics.reinforcement_learning.minitaur.envs.MinitaurMotorStateb\x06proto3'
),
dependencies=[
timestamp__pb2.DESCRIPTOR,
vector__pb2.DESCRIPTOR,
])
_MINITAUREPISODE = _descriptor.Descriptor(
name='MinitaurEpisode',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state_action',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode.state_action',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=104,
serialized_end=211,
)
_MINITAURMOTORSTATE = _descriptor.Descriptor(
name='MinitaurMotorState',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='angle',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.angle',
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='velocity',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.velocity',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='torque',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.torque',
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.action',
index=3,
number=4,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=213,
serialized_end=298,
)
_MINITAURSTATEACTION = _descriptor.Descriptor(
name='MinitaurStateAction',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='info_valid',
full_name=
'robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.info_valid',
index=0,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time',
full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.time',
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_position',
full_name=
'robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_position',
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_orientation',
full_name=
'robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_orientation',
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_angular_vel',
full_name=
'robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_angular_vel',
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='motor_states',
full_name=
'robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.motor_states',
index=5,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=301,
serialized_end=635,
)
_MINITAUREPISODE.fields_by_name['state_action'].message_type = _MINITAURSTATEACTION
_MINITAURSTATEACTION.fields_by_name['time'].message_type = timestamp__pb2._TIMESTAMP
_MINITAURSTATEACTION.fields_by_name['base_position'].message_type = vector__pb2._VECTOR3D
_MINITAURSTATEACTION.fields_by_name['base_orientation'].message_type = vector__pb2._VECTOR3D
_MINITAURSTATEACTION.fields_by_name['base_angular_vel'].message_type = vector__pb2._VECTOR3D
_MINITAURSTATEACTION.fields_by_name['motor_states'].message_type = _MINITAURMOTORSTATE
DESCRIPTOR.message_types_by_name['MinitaurEpisode'] = _MINITAUREPISODE
DESCRIPTOR.message_types_by_name['MinitaurMotorState'] = _MINITAURMOTORSTATE
DESCRIPTOR.message_types_by_name['MinitaurStateAction'] = _MINITAURSTATEACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MinitaurEpisode = _reflection.GeneratedProtocolMessageType(
'MinitaurEpisode',
(_message.Message,),
dict(
DESCRIPTOR=_MINITAUREPISODE,
__module__='minitaur_logging_pb2'
# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode)
))
_sym_db.RegisterMessage(MinitaurEpisode)
MinitaurMotorState = _reflection.GeneratedProtocolMessageType(
'MinitaurMotorState',
(_message.Message,),
dict(
DESCRIPTOR=_MINITAURMOTORSTATE,
__module__='minitaur_logging_pb2'
# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState)
))
_sym_db.RegisterMessage(MinitaurMotorState)
MinitaurStateAction = _reflection.GeneratedProtocolMessageType(
'MinitaurStateAction',
(_message.Message,),
dict(
DESCRIPTOR=_MINITAURSTATEACTION,
__module__='minitaur_logging_pb2'
# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction)
))
_sym_db.RegisterMessage(MinitaurStateAction)
# @@protoc_insertion_point(module_scope)
|
recognition/_evaluation_/ijb/ijb_evals.py | qaz734913414/insightface | 12,377 | 12752012 | <reponame>qaz734913414/insightface
#!/usr/bin/env python3
import os
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from skimage import transform
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc
class Mxnet_model_interf:
def __init__(self, model_file, layer="fc1", image_size=(112, 112)):
import mxnet as mx
self.mx = mx
cvd = os.environ.get("CUDA_VISIBLE_DEVICES", "").strip()
if len(cvd) > 0 and int(cvd) != -1:
ctx = [self.mx.gpu(ii) for ii in range(len(cvd.split(",")))]
else:
ctx = [self.mx.cpu()]
prefix, epoch = model_file.split(",")
print(">>>> loading mxnet model:", prefix, epoch, ctx)
sym, arg_params, aux_params = self.mx.model.load_checkpoint(prefix, int(epoch))
all_layers = sym.get_internals()
sym = all_layers[layer + "_output"]
model = self.mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[("data", (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
self.model = model
def __call__(self, imgs):
# print(imgs.shape, imgs[0])
imgs = imgs.transpose(0, 3, 1, 2)
data = self.mx.nd.array(imgs)
db = self.mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
emb = self.model.get_outputs()[0].asnumpy()
return emb
class Torch_model_interf:
def __init__(self, model_file, image_size=(112, 112)):
import torch
self.torch = torch
cvd = os.environ.get("CUDA_VISIBLE_DEVICES", "").strip()
device_name = "cuda:0" if len(cvd) > 0 and int(cvd) != -1 else "cpu"
self.device = self.torch.device(device_name)
try:
self.model = self.torch.jit.load(model_file, map_location=device_name)
except:
print("Error: %s is weights only, please load and save the entire model by `torch.jit.save`" % model_file)
self.model = None
def __call__(self, imgs):
# print(imgs.shape, imgs[0])
imgs = imgs.transpose(0, 3, 1, 2).copy().astype("float32")
imgs = (imgs - 127.5) * 0.0078125
output = self.model(self.torch.from_numpy(imgs).to(self.device).float())
return output.cpu().detach().numpy()
class ONNX_model_interf:
def __init__(self, model_file, image_size=(112, 112)):
import onnxruntime as ort
ort.set_default_logger_severity(3)
self.ort_session = ort.InferenceSession(model_file)
self.output_names = [self.ort_session.get_outputs()[0].name]
self.input_name = self.ort_session.get_inputs()[0].name
def __call__(self, imgs):
imgs = imgs.transpose(0, 3, 1, 2).astype("float32")
imgs = (imgs - 127.5) * 0.0078125
outputs = self.ort_session.run(self.output_names, {self.input_name: imgs})
return outputs[0]
def keras_model_interf(model_file):
import tensorflow as tf
from tensorflow_addons.layers import StochasticDepth
for gpu in tf.config.experimental.list_physical_devices("GPU"):
tf.config.experimental.set_memory_growth(gpu, True)
mm = tf.keras.models.load_model(model_file, compile=False)
return lambda imgs: mm((tf.cast(imgs, "float32") - 127.5) * 0.0078125).numpy()
def face_align_landmark(img, landmark, image_size=(112, 112), method="similar"):
tform = transform.AffineTransform() if method == "affine" else transform.SimilarityTransform()
src = np.array(
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.729904, 92.2041]], dtype=np.float32
)
tform.estimate(landmark, src)
# ndimage = transform.warp(img, tform.inverse, output_shape=image_size)
# ndimage = (ndimage * 255).astype(np.uint8)
M = tform.params[0:2, :]
ndimage = cv2.warpAffine(img, M, image_size, borderValue=0.0)
if len(ndimage.shape) == 2:
ndimage = np.stack([ndimage, ndimage, ndimage], -1)
else:
ndimage = cv2.cvtColor(ndimage, cv2.COLOR_BGR2RGB)
return ndimage
def read_IJB_meta_columns_to_int(file_path, columns, sep=" ", skiprows=0, header=None):
# meta = np.loadtxt(file_path, skiprows=skiprows, delimiter=sep)
meta = pd.read_csv(file_path, sep=sep, skiprows=skiprows, header=header).values
return (meta[:, ii].astype("int") for ii in columns)
def extract_IJB_data_11(data_path, subset, save_path=None, force_reload=False):
if save_path == None:
save_path = os.path.join(data_path, subset + "_backup.npz")
if not force_reload and os.path.exists(save_path):
print(">>>> Reload from backup: %s ..." % save_path)
aa = np.load(save_path)
return (
aa["templates"],
aa["medias"],
aa["p1"],
aa["p2"],
aa["label"],
aa["img_names"],
aa["landmarks"],
aa["face_scores"],
)
if subset == "IJBB":
media_list_path = os.path.join(data_path, "IJBB/meta/ijbb_face_tid_mid.txt")
pair_list_path = os.path.join(data_path, "IJBB/meta/ijbb_template_pair_label.txt")
img_path = os.path.join(data_path, "IJBB/loose_crop")
img_list_path = os.path.join(data_path, "IJBB/meta/ijbb_name_5pts_score.txt")
else:
media_list_path = os.path.join(data_path, "IJBC/meta/ijbc_face_tid_mid.txt")
pair_list_path = os.path.join(data_path, "IJBC/meta/ijbc_template_pair_label.txt")
img_path = os.path.join(data_path, "IJBC/loose_crop")
img_list_path = os.path.join(data_path, "IJBC/meta/ijbc_name_5pts_score.txt")
print(">>>> Loading templates and medias...")
templates, medias = read_IJB_meta_columns_to_int(media_list_path, columns=[1, 2]) # ['1.jpg', '1', '69544']
print("templates: %s, medias: %s, unique templates: %s" % (templates.shape, medias.shape, np.unique(templates).shape))
# templates: (227630,), medias: (227630,), unique templates: (12115,)
print(">>>> Loading pairs...")
p1, p2, label = read_IJB_meta_columns_to_int(pair_list_path, columns=[0, 1, 2]) # ['1', '11065', '1']
print("p1: %s, unique p1: %s" % (p1.shape, np.unique(p1).shape))
print("p2: %s, unique p2: %s" % (p2.shape, np.unique(p2).shape))
print("label: %s, label value counts: %s" % (label.shape, dict(zip(*np.unique(label, return_counts=True)))))
# p1: (8010270,), unique p1: (1845,)
# p2: (8010270,), unique p2: (10270,) # 10270 + 1845 = 12115 --> np.unique(templates).shape
# label: (8010270,), label value counts: {0: 8000000, 1: 10270}
print(">>>> Loading images...")
with open(img_list_path, "r") as ff:
# 1.jpg 46.060 62.026 87.785 60.323 68.851 77.656 52.162 99.875 86.450 98.648 0.999
img_records = np.array([ii.strip().split(" ") for ii in ff.readlines()])
img_names = np.array([os.path.join(img_path, ii) for ii in img_records[:, 0]])
landmarks = img_records[:, 1:-1].astype("float32").reshape(-1, 5, 2)
face_scores = img_records[:, -1].astype("float32")
print("img_names: %s, landmarks: %s, face_scores: %s" % (img_names.shape, landmarks.shape, face_scores.shape))
# img_names: (227630,), landmarks: (227630, 5, 2), face_scores: (227630,)
print("face_scores value counts:", dict(zip(*np.histogram(face_scores, bins=9)[::-1])))
# {0.1: 2515, 0.2: 0, 0.3: 62, 0.4: 94, 0.5: 136, 0.6: 197, 0.7: 291, 0.8: 538, 0.9: 223797}
print(">>>> Saving backup to: %s ..." % save_path)
np.savez(
save_path,
templates=templates,
medias=medias,
p1=p1,
p2=p2,
label=label,
img_names=img_names,
landmarks=landmarks,
face_scores=face_scores,
)
print()
return templates, medias, p1, p2, label, img_names, landmarks, face_scores
def extract_gallery_prob_data(data_path, subset, save_path=None, force_reload=False):
if save_path == None:
save_path = os.path.join(data_path, subset + "_gallery_prob_backup.npz")
if not force_reload and os.path.exists(save_path):
print(">>>> Reload from backup: %s ..." % save_path)
aa = np.load(save_path)
return (
aa["s1_templates"],
aa["s1_subject_ids"],
aa["s2_templates"],
aa["s2_subject_ids"],
aa["probe_mixed_templates"],
aa["probe_mixed_subject_ids"],
)
if subset == "IJBC":
meta_dir = os.path.join(data_path, "IJBC/meta")
gallery_s1_record = os.path.join(meta_dir, "ijbc_1N_gallery_G1.csv")
gallery_s2_record = os.path.join(meta_dir, "ijbc_1N_gallery_G2.csv")
probe_mixed_record = os.path.join(meta_dir, "ijbc_1N_probe_mixed.csv")
else:
meta_dir = os.path.join(data_path, "IJBB/meta")
gallery_s1_record = os.path.join(meta_dir, "ijbb_1N_gallery_S1.csv")
gallery_s2_record = os.path.join(meta_dir, "ijbb_1N_gallery_S2.csv")
probe_mixed_record = os.path.join(meta_dir, "ijbb_1N_probe_mixed.csv")
print(">>>> Loading gallery feature...")
s1_templates, s1_subject_ids = read_IJB_meta_columns_to_int(gallery_s1_record, columns=[0, 1], skiprows=1, sep=",")
s2_templates, s2_subject_ids = read_IJB_meta_columns_to_int(gallery_s2_record, columns=[0, 1], skiprows=1, sep=",")
print("s1 gallery: %s, ids: %s, unique: %s" % (s1_templates.shape, s1_subject_ids.shape, np.unique(s1_templates).shape))
print("s2 gallery: %s, ids: %s, unique: %s" % (s2_templates.shape, s2_subject_ids.shape, np.unique(s2_templates).shape))
print(">>>> Loading prope feature...")
probe_mixed_templates, probe_mixed_subject_ids = read_IJB_meta_columns_to_int(
probe_mixed_record, columns=[0, 1], skiprows=1, sep=","
)
print("probe_mixed_templates: %s, unique: %s" % (probe_mixed_templates.shape, np.unique(probe_mixed_templates).shape))
print("probe_mixed_subject_ids: %s, unique: %s" % (probe_mixed_subject_ids.shape, np.unique(probe_mixed_subject_ids).shape))
print(">>>> Saving backup to: %s ..." % save_path)
np.savez(
save_path,
s1_templates=s1_templates,
s1_subject_ids=s1_subject_ids,
s2_templates=s2_templates,
s2_subject_ids=s2_subject_ids,
probe_mixed_templates=probe_mixed_templates,
probe_mixed_subject_ids=probe_mixed_subject_ids,
)
print()
return s1_templates, s1_subject_ids, s2_templates, s2_subject_ids, probe_mixed_templates, probe_mixed_subject_ids
def get_embeddings(model_interf, img_names, landmarks, batch_size=64, flip=True):
steps = int(np.ceil(len(img_names) / batch_size))
embs, embs_f = [], []
for batch_id in tqdm(range(0, len(img_names), batch_size), "Embedding", total=steps):
batch_imgs, batch_landmarks = img_names[batch_id : batch_id + batch_size], landmarks[batch_id : batch_id + batch_size]
ndimages = [face_align_landmark(cv2.imread(img), landmark) for img, landmark in zip(batch_imgs, batch_landmarks)]
ndimages = np.stack(ndimages)
embs.extend(model_interf(ndimages))
if flip:
embs_f.extend(model_interf(ndimages[:, :, ::-1, :]))
return np.array(embs), np.array(embs_f)
def process_embeddings(embs, embs_f=[], use_flip_test=True, use_norm_score=False, use_detector_score=True, face_scores=None):
print(">>>> process_embeddings: Norm {}, Detect_score {}, Flip {}".format(use_norm_score, use_detector_score, use_flip_test))
if use_flip_test and len(embs_f) != 0:
embs = embs + embs_f
if use_norm_score:
embs = normalize(embs)
if use_detector_score and face_scores is not None:
embs = embs * np.expand_dims(face_scores, -1)
return embs
def image2template_feature(img_feats=None, templates=None, medias=None, choose_templates=None, choose_ids=None):
if choose_templates is not None: # 1:N
unique_templates, indices = np.unique(choose_templates, return_index=True)
unique_subjectids = choose_ids[indices]
else: # 1:1
unique_templates = np.unique(templates)
unique_subjectids = None
# template_feats = np.zeros((len(unique_templates), img_feats.shape[1]), dtype=img_feats.dtype)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in tqdm(enumerate(unique_templates), "Extract template feature", total=len(unique_templates)):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [np.mean(face_norm_feats[ind_m], 0, keepdims=True)]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, 0)
template_norm_feats = normalize(template_feats)
return template_norm_feats, unique_templates, unique_subjectids
def verification_11(template_norm_feats=None, unique_templates=None, p1=None, p2=None, batch_size=10000):
try:
print(">>>> Trying cupy.")
import cupy as cp
template_norm_feats = cp.array(template_norm_feats)
score_func = lambda feat1, feat2: cp.sum(feat1 * feat2, axis=-1).get()
test = score_func(template_norm_feats[:batch_size], template_norm_feats[:batch_size])
except:
score_func = lambda feat1, feat2: np.sum(feat1 * feat2, -1)
template2id = np.zeros(max(unique_templates) + 1, dtype=int)
template2id[unique_templates] = np.arange(len(unique_templates))
steps = int(np.ceil(len(p1) / batch_size))
score = []
for id in tqdm(range(steps), "Verification"):
feat1 = template_norm_feats[template2id[p1[id * batch_size : (id + 1) * batch_size]]]
feat2 = template_norm_feats[template2id[p2[id * batch_size : (id + 1) * batch_size]]]
score.extend(score_func(feat1, feat2))
return np.array(score)
def evaluation_1N(query_feats, gallery_feats, query_ids, reg_ids, fars=[0.01, 0.1]):
print("query_feats: %s, gallery_feats: %s" % (query_feats.shape, gallery_feats.shape))
similarity = np.dot(query_feats, gallery_feats.T) # (19593, 3531)
top_1_count, top_5_count, top_10_count = 0, 0, 0
pos_sims, neg_sims, non_gallery_sims = [], [], []
for index, query_id in enumerate(query_ids):
if query_id in reg_ids:
gallery_label = np.argwhere(reg_ids == query_id)[0, 0]
index_sorted = np.argsort(similarity[index])[::-1]
top_1_count += gallery_label in index_sorted[:1]
top_5_count += gallery_label in index_sorted[:5]
top_10_count += gallery_label in index_sorted[:10]
pos_sims.append(similarity[index][reg_ids == query_id][0])
neg_sims.append(similarity[index][reg_ids != query_id])
else:
non_gallery_sims.append(similarity[index])
total_pos = len(pos_sims)
pos_sims, neg_sims, non_gallery_sims = np.array(pos_sims), np.array(neg_sims), np.array(non_gallery_sims)
print("pos_sims: %s, neg_sims: %s, non_gallery_sims: %s" % (pos_sims.shape, neg_sims.shape, non_gallery_sims.shape))
print("top1: %f, top5: %f, top10: %f" % (top_1_count / total_pos, top_5_count / total_pos, top_10_count / total_pos))
correct_pos_cond = pos_sims > neg_sims.max(1)
non_gallery_sims_sorted = np.sort(non_gallery_sims.max(1))[::-1]
threshes, recalls = [], []
for far in fars:
# thresh = non_gallery_sims_sorted[int(np.ceil(non_gallery_sims_sorted.shape[0] * far)) - 1]
thresh = non_gallery_sims_sorted[max(int((non_gallery_sims_sorted.shape[0]) * far) - 1, 0)]
recall = np.logical_and(correct_pos_cond, pos_sims > thresh).sum() / pos_sims.shape[0]
threshes.append(thresh)
recalls.append(recall)
# print("FAR = {:.10f} TPIR = {:.10f} th = {:.10f}".format(far, recall, thresh))
cmc_scores = list(zip(neg_sims, pos_sims.reshape(-1, 1))) + list(zip(non_gallery_sims, [None] * non_gallery_sims.shape[0]))
return top_1_count, top_5_count, top_10_count, threshes, recalls, cmc_scores
class IJB_test:
def __init__(self, model_file, data_path, subset, batch_size=64, force_reload=False, restore_embs=None):
templates, medias, p1, p2, label, img_names, landmarks, face_scores = extract_IJB_data_11(
data_path, subset, force_reload=force_reload
)
if model_file != None:
if model_file.endswith(".h5"):
interf_func = keras_model_interf(model_file)
elif model_file.endswith(".pth") or model_file.endswith(".pt"):
interf_func = Torch_model_interf(model_file)
elif model_file.endswith(".onnx") or model_file.endswith(".ONNX"):
interf_func = ONNX_model_interf(model_file)
else:
interf_func = Mxnet_model_interf(model_file)
self.embs, self.embs_f = get_embeddings(interf_func, img_names, landmarks, batch_size=batch_size)
elif restore_embs != None:
print(">>>> Reload embeddings from:", restore_embs)
aa = np.load(restore_embs)
if "embs" in aa and "embs_f" in aa:
self.embs, self.embs_f = aa["embs"], aa["embs_f"]
else:
print("ERROR: %s NOT containing embs / embs_f" % restore_embs)
exit(1)
print(">>>> Done.")
self.data_path, self.subset, self.force_reload = data_path, subset, force_reload
self.templates, self.medias, self.p1, self.p2, self.label = templates, medias, p1, p2, label
self.face_scores = face_scores.astype(self.embs.dtype)
def run_model_test_single(self, use_flip_test=True, use_norm_score=False, use_detector_score=True):
img_input_feats = process_embeddings(
self.embs,
self.embs_f,
use_flip_test=use_flip_test,
use_norm_score=use_norm_score,
use_detector_score=use_detector_score,
face_scores=self.face_scores,
)
template_norm_feats, unique_templates, _ = image2template_feature(img_input_feats, self.templates, self.medias)
score = verification_11(template_norm_feats, unique_templates, self.p1, self.p2)
return score
def run_model_test_bunch(self):
from itertools import product
scores, names = [], []
for use_norm_score, use_detector_score, use_flip_test in product([True, False], [True, False], [True, False]):
name = "N{:d}D{:d}F{:d}".format(use_norm_score, use_detector_score, use_flip_test)
print(">>>>", name, use_norm_score, use_detector_score, use_flip_test)
names.append(name)
scores.append(self.run_model_test_single(use_flip_test, use_norm_score, use_detector_score))
return scores, names
def run_model_test_1N(self, npoints=100):
fars_cal = [10 ** ii for ii in np.arange(-4, 0, 4 / npoints)] + [1] # plot in range [10-4, 1]
fars_show_idx = np.arange(len(fars_cal))[:: npoints // 4] # npoints=100, fars_show=[0.0001, 0.001, 0.01, 0.1, 1.0]
g1_templates, g1_ids, g2_templates, g2_ids, probe_mixed_templates, probe_mixed_ids = extract_gallery_prob_data(
self.data_path, self.subset, force_reload=self.force_reload
)
img_input_feats = process_embeddings(
self.embs,
self.embs_f,
use_flip_test=True,
use_norm_score=False,
use_detector_score=True,
face_scores=self.face_scores,
)
g1_templates_feature, g1_unique_templates, g1_unique_ids = image2template_feature(
img_input_feats, self.templates, self.medias, g1_templates, g1_ids
)
g2_templates_feature, g2_unique_templates, g2_unique_ids = image2template_feature(
img_input_feats, self.templates, self.medias, g2_templates, g2_ids
)
probe_mixed_templates_feature, probe_mixed_unique_templates, probe_mixed_unique_subject_ids = image2template_feature(
img_input_feats, self.templates, self.medias, probe_mixed_templates, probe_mixed_ids
)
print("g1_templates_feature:", g1_templates_feature.shape) # (1772, 512)
print("g2_templates_feature:", g2_templates_feature.shape) # (1759, 512)
print("probe_mixed_templates_feature:", probe_mixed_templates_feature.shape) # (19593, 512)
print("probe_mixed_unique_subject_ids:", probe_mixed_unique_subject_ids.shape) # (19593,)
print(">>>> Gallery 1")
g1_top_1_count, g1_top_5_count, g1_top_10_count, g1_threshes, g1_recalls, g1_cmc_scores = evaluation_1N(
probe_mixed_templates_feature, g1_templates_feature, probe_mixed_unique_subject_ids, g1_unique_ids, fars_cal
)
print(">>>> Gallery 2")
g2_top_1_count, g2_top_5_count, g2_top_10_count, g2_threshes, g2_recalls, g2_cmc_scores = evaluation_1N(
probe_mixed_templates_feature, g2_templates_feature, probe_mixed_unique_subject_ids, g2_unique_ids, fars_cal
)
print(">>>> Mean")
query_num = probe_mixed_templates_feature.shape[0]
top_1 = (g1_top_1_count + g2_top_1_count) / query_num
top_5 = (g1_top_5_count + g2_top_5_count) / query_num
top_10 = (g1_top_10_count + g2_top_10_count) / query_num
print("[Mean] top1: %f, top5: %f, top10: %f" % (top_1, top_5, top_10))
mean_tpirs = (np.array(g1_recalls) + np.array(g2_recalls)) / 2
show_result = {}
for id, far in enumerate(fars_cal):
if id in fars_show_idx:
show_result.setdefault("far", []).append(far)
show_result.setdefault("g1_tpir", []).append(g1_recalls[id])
show_result.setdefault("g1_thresh", []).append(g1_threshes[id])
show_result.setdefault("g2_tpir", []).append(g2_recalls[id])
show_result.setdefault("g2_thresh", []).append(g2_threshes[id])
show_result.setdefault("mean_tpir", []).append(mean_tpirs[id])
print(pd.DataFrame(show_result).set_index("far").to_markdown())
return fars_cal, mean_tpirs, g1_cmc_scores, g2_cmc_scores
def plot_roc_and_calculate_tpr(scores, names=None, label=None):
print(">>>> plot roc and calculate tpr...")
score_dict = {}
for id, score in enumerate(scores):
name = None if names is None else names[id]
if isinstance(score, str) and score.endswith(".npz"):
aa = np.load(score)
score = aa.get("scores", [])
label = aa["label"] if label is None and "label" in aa else label
score_name = aa.get("names", [])
for ss, nn in zip(score, score_name):
score_dict[nn] = ss
elif isinstance(score, str) and score.endswith(".npy"):
name = name if name is not None else os.path.splitext(os.path.basename(score))[0]
score_dict[name] = np.load(score)
elif isinstance(score, str) and score.endswith(".txt"):
# IJB meta data like ijbb_template_pair_label.txt
label = pd.read_csv(score, sep=" ", header=None).values[:, 2]
else:
name = name if name is not None else str(id)
score_dict[name] = score
if label is None:
print("Error: Label data is not provided")
return None, None
x_labels = [10 ** (-ii) for ii in range(1, 7)[::-1]]
fpr_dict, tpr_dict, roc_auc_dict, tpr_result = {}, {}, {}, {}
for name, score in score_dict.items():
fpr, tpr, _ = roc_curve(label, score)
roc_auc = auc(fpr, tpr)
fpr, tpr = np.flipud(fpr), np.flipud(tpr) # select largest tpr at same fpr
tpr_result[name] = [tpr[np.argmin(abs(fpr - ii))] for ii in x_labels]
fpr_dict[name], tpr_dict[name], roc_auc_dict[name] = fpr, tpr, roc_auc
tpr_result_df = pd.DataFrame(tpr_result, index=x_labels).T
tpr_result_df['AUC'] = pd.Series(roc_auc_dict)
tpr_result_df.columns.name = "Methods"
print(tpr_result_df.to_markdown())
# print(tpr_result_df)
try:
import matplotlib.pyplot as plt
fig = plt.figure()
for name in score_dict:
plt.plot(fpr_dict[name], tpr_dict[name], lw=1, label="[%s (AUC = %0.4f%%)]" % (name, roc_auc_dict[name] * 100))
title = "ROC on IJB" + name.split("IJB")[-1][0] if "IJB" in name else "ROC on IJB"
plt.xlim([10 ** -6, 0.1])
plt.xscale("log")
plt.xticks(x_labels)
plt.xlabel("False Positive Rate")
plt.ylim([0.3, 1.0])
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.ylabel("True Positive Rate")
plt.grid(linestyle="--", linewidth=1)
plt.title(title)
plt.legend(loc="lower right", fontsize='x-small')
plt.tight_layout()
plt.show()
except:
print("matplotlib plot failed")
fig = None
return tpr_result_df, fig
def plot_dir_far_cmc_scores(scores, names=None):
try:
import matplotlib.pyplot as plt
fig = plt.figure()
for id, score in enumerate(scores):
name = None if names is None else names[id]
if isinstance(score, str) and score.endswith(".npz"):
aa = np.load(score)
score, name = aa.get("scores")[0], aa.get("names")[0]
fars, tpirs = score[0], score[1]
name = name if name is not None else str(id)
auc_value = auc(fars, tpirs)
label = "[%s (AUC = %0.4f%%)]" % (name, auc_value * 100)
plt.plot(fars, tpirs, lw=1, label=label)
plt.xlabel("False Alarm Rate")
plt.xlim([0.0001, 1])
plt.xscale("log")
plt.ylabel("Detection & Identification Rate (%)")
plt.ylim([0, 1])
plt.grid(linestyle="--", linewidth=1)
plt.legend(fontsize='x-small')
plt.tight_layout()
plt.show()
except:
print("matplotlib plot failed")
fig = None
return fig
def parse_arguments(argv):
import argparse
default_save_result_name = "IJB_result/{model_name}_{subset}_{type}.npz"
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--model_file", type=str, default=None, help="Saved model, keras h5 / pytorch jit pth / onnx / mxnet")
parser.add_argument("-d", "--data_path", type=str, default="./", help="Dataset path containing IJBB and IJBC sub folder")
parser.add_argument("-s", "--subset", type=str, default="IJBC", help="Subset test target, could be IJBB / IJBC")
parser.add_argument("-b", "--batch_size", type=int, default=128, help="Batch size for get_embeddings")
parser.add_argument(
"-R", "--save_result", type=str, default=default_save_result_name, help="Filename for saving / restore result"
)
parser.add_argument("-L", "--save_label", action="store_true", help="Save label data, useful for plot only")
parser.add_argument("-E", "--save_embeddings", action="store_true", help="Save embeddings data")
parser.add_argument("-B", "--is_bunch", action="store_true", help="Run all 8 tests N{0,1}D{0,1}F{0,1}")
parser.add_argument("-N", "--is_one_2_N", action="store_true", help="Run 1:N test instead of 1:1")
parser.add_argument("-F", "--force_reload", action="store_true", help="Force reload, instead of using cache")
parser.add_argument("-P", "--plot_only", nargs="*", type=str, help="Plot saved results, Format 1 2 3 or 1, 2, 3 or *.npy")
args = parser.parse_known_args(argv)[0]
if args.plot_only != None and len(args.plot_only) != 0:
# Plot only
from glob2 import glob
score_files = []
for ss in args.plot_only:
score_files.extend(glob(ss.replace(",", "").strip()))
args.plot_only = score_files
elif args.model_file == None and args.save_result == default_save_result_name:
print("Please provide -m MODEL_FILE, see `--help` for usage.")
exit(1)
elif args.model_file != None:
if args.model_file.endswith(".h5") or args.model_file.endswith(".pth") or args.model_file.endswith(".pt") or args.model_file.endswith(".onnx"):
# Keras model file "model.h5", pytorch model ends with `.pth` or `.pt`, onnx model ends with `.onnx`
model_name = os.path.splitext(os.path.basename(args.model_file))[0]
else:
# MXNet model file "models/r50-arcface-emore/model,1"
model_name = os.path.basename(os.path.dirname(args.model_file))
if args.save_result == default_save_result_name:
type = "1N" if args.is_one_2_N else "11"
args.save_result = default_save_result_name.format(model_name=model_name, subset=args.subset, type=type)
return args
if __name__ == "__main__":
import sys
args = parse_arguments(sys.argv[1:])
if args.plot_only != None and len(args.plot_only) != 0:
if args.is_one_2_N:
plot_dir_far_cmc_scores(args.plot_only)
else:
plot_roc_and_calculate_tpr(args.plot_only)
else:
save_name = os.path.splitext(os.path.basename(args.save_result))[0]
save_items = {}
save_path = os.path.dirname(args.save_result)
if len(save_path) != 0 and not os.path.exists(save_path):
os.makedirs(save_path)
tt = IJB_test(args.model_file, args.data_path, args.subset, args.batch_size, args.force_reload, args.save_result)
if args.save_embeddings: # Save embeddings first, in case of any error happens later...
np.savez(args.save_result, embs=tt.embs, embs_f=tt.embs_f)
if args.is_one_2_N: # 1:N test
fars, tpirs, _, _ = tt.run_model_test_1N()
scores = [(fars, tpirs)]
names = [save_name]
save_items.update({"scores": scores, "names": names})
elif args.is_bunch: # All 8 tests N{0,1}D{0,1}F{0,1}
scores, names = tt.run_model_test_bunch()
names = [save_name + "_" + ii for ii in names]
label = tt.label
save_items.update({"scores": scores, "names": names})
else: # Basic 1:1 N0D1F1 test
score = tt.run_model_test_single()
scores, names, label = [score], [save_name], tt.label
save_items.update({"scores": scores, "names": names})
if args.save_embeddings:
save_items.update({"embs": tt.embs, "embs_f": tt.embs_f})
if args.save_label:
save_items.update({"label": label})
if args.model_file != None or args.save_embeddings: # embeddings not restored from file or should save_embeddings again
np.savez(args.save_result, **save_items)
if args.is_one_2_N:
plot_dir_far_cmc_scores(scores=scores, names=names)
else:
plot_roc_and_calculate_tpr(scores, names=names, label=label)
|
RecoHI/HiEgammaAlgos/python/HiEgammaPostPF_cff.py | ckamtsikis/cmssw | 852 | 12752013 | from RecoEgamma.EgammaIsolationAlgos.interestingEgammaIsoDetIdsSequence_cff import *
from RecoEgamma.PhotonIdentification.photonId_cff import *
from RecoEgamma.ElectronIdentification.electronIdSequence_cff import *
from RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff import *
from RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff import *
eidRobustLoose.verticesCollection = "hiSelectedVertex"
eidRobustTight.verticesCollection = "hiSelectedVertex"
eidRobustHighEnergy.verticesCollection = "hiSelectedVertex"
eidLoose.verticesCollection = "hiSelectedVertex"
eidTight.verticesCollection = "hiSelectedVertex"
hfRecoEcalCandidate.VertexCollection = "hiSelectedVertex"
egammaHighLevelRecoPostPFTask = cms.Task(interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,photonIDTaskGED,eIdTask,hfEMClusteringTask)
egammaHighLevelRecoPostPF = cms.Sequence(egammaHighLevelRecoPostPFTask)
|
examples/maze/maze_env.py | jhardy0/deer | 373 | 12752044 | """ Environment with a distribution of mazes (one new maze is drawn at each episode)
Author: <NAME>
"""
import numpy as np
from deer.base_classes import Environment
#import matplotlib
#matplotlib.use('qt5agg')
#from mpl_toolkits.axes_grid1 import host_subplot
#import mpl_toolkits.axisartist as AA
#import matplotlib.pyplot as plt
import copy
import a_star_path_finding as pf
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
self._random_state = rng
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
self._episode_steps = 0
self._actions = [0,1,2,3]
self._size_maze = 8
self._higher_dim_obs=kwargs.get('higher_dim_obs',False)
self._reverse=kwargs.get('reverse',False)
self._n_walls = int((self._size_maze-2)**2/3.)#int((self._size_maze)**2/3.)
self._n_rewards = 3
self.create_map()
self.intern_dim=3
def create_map(self):
valid_map=False
while valid_map==False:
# Agent
self._pos_agent=[1,1]
# Walls
self._pos_walls=[]
for i in range(self._size_maze):
self._pos_walls.append([i,0])
self._pos_walls.append([i,self._size_maze-1])
for j in range(self._size_maze-2):
self._pos_walls.append([0,j+1])
self._pos_walls.append([self._size_maze-1,j+1])
n=0
while n < self._n_walls:
potential_wall=[self._random_state.randint(1,self._size_maze-2),self._random_state.randint(1,self._size_maze-2)]
if(potential_wall not in self._pos_walls and potential_wall!=self._pos_agent):
self._pos_walls.append(potential_wall)
n+=1
# Rewards
#self._pos_rewards=[[self._size_maze-2,self._size_maze-2]]
self._pos_rewards=[]
n=0
while n < self._n_rewards:
potential_reward=[self._random_state.randint(1,self._size_maze-1),self._random_state.randint(1,self._size_maze-1)]
if(potential_reward not in self._pos_rewards and potential_reward not in self._pos_walls and potential_reward!=self._pos_agent):
self._pos_rewards.append(potential_reward)
n+=1
valid_map=self.is_valid_map(self._pos_agent,self._pos_walls,self._pos_rewards)
def is_valid_map(self,pos_agent,pos_walls,pos_rewards):
a = pf.AStar()
pos_walls
walls = [tuple(w) for w in pos_walls]
start=tuple(pos_agent)
for r in pos_rewards:
end=tuple(r)
a.init_grid(self._size_maze, self._size_maze, walls, start, end)
maze=a
optimal_path=maze.solve()
if(optimal_path==None):
return False
return True
def reset(self, mode):
self._episode_steps = 0
self._mode=mode
self.create_map()
if mode == MyEnv.VALIDATION_MODE:
if self._mode != MyEnv.VALIDATION_MODE:
self._mode = MyEnv.VALIDATION_MODE
self._mode_score = 0.0
self._mode_episode_count = 0
else:
self._mode_episode_count += 1
return [1 * [self._size_maze * [self._size_maze * [0]]]]
def act(self, action):
self._episode_steps += 1
action = self._actions[action]
reward = -0.1
if(action==0):
if([self._pos_agent[0]+1,self._pos_agent[1]] not in self._pos_walls):
self._pos_agent[0]=self._pos_agent[0]+1
elif(action==1):
if([self._pos_agent[0],self._pos_agent[1]+1] not in self._pos_walls):
self._pos_agent[1]=self._pos_agent[1]+1
elif(action==2):
if([self._pos_agent[0]-1,self._pos_agent[1]] not in self._pos_walls):
self._pos_agent[0]=self._pos_agent[0]-1
elif(action==3):
if([self._pos_agent[0],self._pos_agent[1]-1] not in self._pos_walls):
self._pos_agent[1]=self._pos_agent[1]-1
if (self._pos_agent in self._pos_rewards):
reward = 1
self._pos_rewards.remove(self._pos_agent)
self._mode_score += reward
return reward
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
print ("test_data_set.observations.shape")
print (test_data_set.observations()[0][0:1])
print ("self._mode_score:"+str(self._mode_score)+".")
def inputDimensions(self):
if(self._higher_dim_obs==True):
return [(1,self._size_maze*6,self._size_maze*6)]
else:
return [(1,self._size_maze,self._size_maze)]
def observationType(self, subject):
return np.float32
def nActions(self):
return len(self._actions)
def observe(self):
self._map=np.zeros((self._size_maze,self._size_maze))
for coord_wall in self._pos_walls:
self._map[coord_wall[0],coord_wall[1]]=1
for coord_reward in self._pos_rewards:
self._map[coord_reward[0],coord_reward[1]]=2
self._map[self._pos_agent[0],self._pos_agent[1]]=0.5
if(self._higher_dim_obs==True):
indices_reward=np.argwhere(self._map == 2)
indices_agent=np.argwhere(self._map == 0.5)
self._map=self._map/1.
self._map=np.repeat(np.repeat(self._map, 6, axis=0),6, axis=1)
# agent repr
agent_obs=np.zeros((6,6))
agent_obs[0,2]=0.8
agent_obs[1,0:5]=0.9
agent_obs[2,1:4]=0.9
agent_obs[3,1:4]=0.9
agent_obs[4,1]=0.9
agent_obs[4,3]=0.9
agent_obs[5,0:2]=0.9
agent_obs[5,3:5]=0.9
# reward repr
reward_obs=np.zeros((6,6))
reward_obs[:,1]=0.7
reward_obs[0,1:4]=0.6
reward_obs[1,3]=0.7
reward_obs[2,1:4]=0.6
reward_obs[4,2]=0.7
reward_obs[5,2:4]=0.7
for i in indices_reward:
self._map[i[0]*6:(i[0]+1)*6:,i[1]*6:(i[1]+1)*6]=reward_obs
for i in indices_agent:
self._map[i[0]*6:(i[0]+1)*6:,i[1]*6:(i[1]+1)*6]=agent_obs
self._map=(self._map*2)-1 #scaling
#print ("self._map higher_dim_obs")
#print (self._map)
#plt.imshow(self._map, cmap='gray_r')
#plt.show()
else:
self._map=self._map/2.
self._map[self._map == 0.5] = 0.99 # agent
self._map[self._map == 1.] = 0.5 # reward
if(self._reverse==True):
self._map=-self._map #1-self._map
return [self._map]
def inTerminalState(self):
if ( self._pos_rewards==[] or (self._mode>=0 and self._episode_steps >= 50) ):
return True
else:
return False
if __name__ == "__main__":
import hashlib
rng = np.random.RandomState(123456)
env = MyEnv(rng, higher_dim_obs=False)
maps=[]
for i in range(10000):
env.create_map()
one_laby=env.observe()[0]
# Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest(), 16) % (10 ** 8)
# TESTING ADDING DUPLICATION
if i%1000==0:
env.reset(0)
if i%1000==500:
env.reset(1)
maps.append(copy.deepcopy(one_laby))
duplicate_laby=0
for i in range(10000):
env.create_map()
one_laby=env.observe()[0]
# Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest(), 16) % (10 ** 8)
# TESTING ADDING DUPLICATION
#if i%1000==0:
# maps.append(one_laby)
# TESTING WITH RESETS
if i%1000==0:
env.reset(0)
if i%1000==500:
env.reset(1)
duplicate=min(maps.count(one_laby),1)
duplicate_laby+=duplicate
if i%1000==0:
print ("Number of duplicate labyrinths:"+str(duplicate_laby)+".")
|
ykdl/extractors/ifeng/video.py | 592767809/ykdl | 136 | 12752075 | <reponame>592767809/ykdl
# -*- coding: utf-8 -*-
from .._common import *
class IfengVideo(Extractor):
name = '凤凰视频 (ifeng video)' # Expired
def prepare(self):
info = MediaInfo(self.name)
self.vid = self.url[-13: -6]
info.title = self.name + '-' + self.vid
data = get_response(
'http://tv.ifeng.com/html5/{self.vid}/video.json'
.format(**vars())).json()
if 'bqSrc' in data:
info.streams['SD'] = {
'container': 'mp4',
'video_profile': '标清',
'src' : [data['bqSrc']],
'size': 0
}
if 'gqSrc' in data:
info.streams['HD'] = {
'container': 'mp4',
'video_profile': '高清',
'src' : [data['gqSrc']],
'size': 0
}
return info
site = IfengVideo()
|
src/lib/dis.py | DTenore/skulpt | 2,671 | 12752097 | import _sk_fail; _sk_fail._("dis")
|
plaso/output/shared_dsv.py | pyllyukko/plaso | 1,253 | 12752104 | # -*- coding: utf-8 -*-
"""Shared functionality for delimiter separated values output modules."""
from plaso.output import formatting_helper
from plaso.output import interface
class DSVEventFormattingHelper(formatting_helper.EventFormattingHelper):
"""Delimiter separated values output module event formatting helper."""
def __init__(
self, output_mediator, field_formatting_helper, field_names,
field_delimiter=','):
"""Initializes a delimiter separated values event formatting helper.
Args:
output_mediator (OutputMediator): output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
field_names (list[str]): names of the fields to output.
field_delimiter (Optional[str]): field delimiter.
"""
super(DSVEventFormattingHelper, self).__init__(output_mediator)
self._field_delimiter = field_delimiter
self._field_names = field_names
self._field_formatting_helper = field_formatting_helper
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method replaces any field delimiters with a space.
Args:
field (str): value of the field to sanitize.
Returns:
str: sanitized value of the field.
"""
if self._field_delimiter and isinstance(field, str):
return field.replace(self._field_delimiter, ' ')
return field
def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):
"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
Returns:
str: string representation of the event.
"""
field_values = []
for field_name in self._field_names:
field_value = self._field_formatting_helper.GetFormattedField(
field_name, event, event_data, event_data_stream, event_tag)
field_value = self._SanitizeField(field_value)
field_values.append(field_value)
return self._field_delimiter.join(field_values)
def GetFormattedFieldNames(self):
"""Retrieves a string representation of the field names.
Returns:
str: string representation of the field names.
"""
return self._field_delimiter.join(self._field_names)
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._field_delimiter = field_delimiter
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._field_names = field_names
class DSVOutputModule(interface.TextFileOutputModule):
"""Shared functionality for delimiter separated values output modules."""
def __init__(
self, output_mediator, field_formatting_helper, names, delimiter=',',
header=None):
"""Initializes a delimiter separated values output module.
Args:
output_mediator (OutputMediator): an output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
names (list[str]): names of the fields to output.
delimiter (Optional[str]): field delimiter.
header (Optional[str]): header, where None will have WriteHeader
generate a header from the field names.
"""
event_formatting_helper = DSVEventFormattingHelper(
output_mediator, field_formatting_helper, names,
field_delimiter=delimiter)
super(DSVOutputModule, self).__init__(
output_mediator, event_formatting_helper)
self._header = header
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._event_formatting_helper.SetFieldDelimiter(field_delimiter)
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._event_formatting_helper.SetFields(field_names)
def WriteHeader(self):
"""Writes the header to the output."""
if self._header:
output_text = self._header
else:
output_text = self._event_formatting_helper.GetFormattedFieldNames()
self.WriteLine(output_text)
|
Chapter03/malicious_url_detection.py | gabrielmahia/HandsOnKungFu | 112 | 12752112 | <gh_stars>100-1000
import pandas as pd
import numpy as np
import random
import pickle
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
#The url needs to undergo some amount of cleasing before we use it. We tokenize it by removing slash , dots and coms
def url_cleanse(web_url):
web_url = web_url.lower()
urltoken = []
dot_slash = []
slash = str(web_url).split('/')
for i in slash:
r1 = str(i).split('-')
token_slash = []
for j in range(0,len(r1)):
r2 = str(r1[j]).split('.')
token_slash = token_slash + r2
dot_slash = dot_slash + r1 + token_slash
urltoken = list(set(dot_slash))
if 'com' in urltoken:
urltoken.remove('com')
return urltoken
# We injest the data and convert it to the relevant dataframes.
input_url = '~/data.csv'
data_csv = pd.read_csv(input_url,',',error_bad_lines=False)
data_df = pd.DataFrame(data_csv)
url_df = np.array(data_df)
random.shuffle(data_df)
y = [d[1] for d in data_df]
inputurls = [d[0] for d in data_df]
#http://blog.christianperone.com/2011/09/machine-learning-text-feature-extraction-tf-idf-part-i/
#We need to generate the tf-idf from the urls.
url_vectorizer = TfidfVectorizer(tokenizer=url_cleanse)
x = url_vectorizer.fit_transform(inputurls)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
l_regress = LogisticRegression() # Logistic regression
l_regress.fit(x_train, y_train)
l_score = l_regress.score(x_test, y_test)
print("score: {0:.2f} %".format(100 * l_score))
url_vectorizer_save = url_vectorizer
file1 = "model.pkl"
with open(file1, 'wb') as f:
pickle.dump(l_regress, f)
f.close()
file2 = "vector.pkl"
with open(file2,'wb') as f2:
pickle.dump(vectorizer_save, f2)
f2.close()
#We load a bunch of urls that we want to check are legit or not
urls = ['hackthebox.eu','facebook.com']
file1 = "model.pkl"
with open(file1, 'rb') as f1:
lgr = pickle.load(f1)
f1.close()
file2 = "pvector.pkl"
with open(file2, 'rb') as f2:
url_vectorizer = pickle.load(f2)
f2.close()
url_vectorizer = url_vectorizer
x = url_vectorizer.transform(inputurls)
y_predict = l_regress.predict(x)
print(inputurls)
print(y_predict)
# We can use the whitelist to make the predictions
whitelisted_url = ['hackthebox.eu','root-me.org']
some_url = [i for i in inputurls if i not in whitelisted_url]
file1 = "model.pkl"
with open(file1, 'rb') as f1:
l_regress = pickle.load(f1)
f1.close()
file2 = "vector.pkl"
with open(file2, 'rb') as f2:
url_vectorizer = pickle.load(f2)
f2.close()
url_vectorizer = url_vectorizer
x = url_vectorizer.transform(some_url)
y_predict = l_regress.predict(x)
for site in whitelisted_url:
some_url.append(site)
print(some_url)
l_predict = list(y_predict)
for j in range(0,len(whitelisted_url)):
l_predict.append('good')
print(l_predict)
#use SVM
from sklearn.svm import SVC
svmModel = SVC()
svmModel.fit(X_train, y_train)
#lsvcModel = svm.LinearSVC.fit(X_train, y_train)
svmModel.score(X_test, y_test)
file1 = "model.pkl"
with open(file1, 'rb') as f1:
svm_model = pickle.load(f1)
f1.close()
file2 = "pvector.pkl"
with open(file2, 'rb') as f2:
url_vectorizer = pickle.load(f2)
f2.close()
test_url = "http://www.isitmalware.com"
vec_test_url = url_vectorizer.transform([trim(test_url)])
result = svm_model.predict(vec_test_url)
print(test_url)
print(result)
|
luke/pretraining/model.py | xuzf-git/luke | 467 | 12752146 | from typing import Optional
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.models.bert.modeling_bert import ACT2FN, BertPreTrainingHeads
from transformers.models.roberta.modeling_roberta import RobertaLMHead
from luke.model import LukeModel, LukeConfig
class EntityPredictionHeadTransform(nn.Module):
def __init__(self, config: LukeConfig):
super(EntityPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class EntityPredictionHead(nn.Module):
def __init__(self, config: LukeConfig):
super(EntityPredictionHead, self).__init__()
self.config = config
self.transform = EntityPredictionHeadTransform(config)
self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LukePretrainingModel(LukeModel):
def __init__(self, config: LukeConfig):
super(LukePretrainingModel, self).__init__(config)
if self.config.bert_model_name and "roberta" in self.config.bert_model_name:
self.lm_head = RobertaLMHead(config)
self.lm_head.decoder.weight = self.embeddings.word_embeddings.weight
else:
self.cls = BertPreTrainingHeads(config)
self.cls.predictions.decoder.weight = self.embeddings.word_embeddings.weight
self.entity_predictions = EntityPredictionHead(config)
self.entity_predictions.decoder.weight = self.entity_embeddings.entity_embeddings.weight
self.apply(self.init_weights)
def forward(
self,
word_ids: torch.LongTensor,
word_segment_ids: torch.LongTensor,
word_attention_mask: torch.LongTensor,
entity_ids: torch.LongTensor,
entity_position_ids: torch.LongTensor,
entity_segment_ids: torch.LongTensor,
entity_attention_mask: torch.LongTensor,
masked_entity_labels: Optional[torch.LongTensor] = None,
masked_lm_labels: Optional[torch.LongTensor] = None,
**kwargs
):
model_dtype = next(self.parameters()).dtype # for fp16 compatibility
output = super(LukePretrainingModel, self).forward(
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
)
word_sequence_output, entity_sequence_output = output[:2]
loss_fn = CrossEntropyLoss(ignore_index=-1)
ret = dict(loss=word_ids.new_tensor(0.0, dtype=model_dtype))
if masked_entity_labels is not None:
entity_mask = masked_entity_labels != -1
if entity_mask.sum() > 0:
target_entity_sequence_output = torch.masked_select(entity_sequence_output, entity_mask.unsqueeze(-1))
target_entity_sequence_output = target_entity_sequence_output.view(-1, self.config.hidden_size)
target_entity_labels = torch.masked_select(masked_entity_labels, entity_mask)
entity_scores = self.entity_predictions(target_entity_sequence_output)
entity_scores = entity_scores.view(-1, self.config.entity_vocab_size)
ret["masked_entity_loss"] = loss_fn(entity_scores, target_entity_labels)
ret["masked_entity_correct"] = (torch.argmax(entity_scores, 1).data == target_entity_labels.data).sum()
ret["masked_entity_total"] = target_entity_labels.ne(-1).sum()
ret["loss"] += ret["masked_entity_loss"]
else:
ret["masked_entity_loss"] = word_ids.new_tensor(0.0, dtype=model_dtype)
ret["masked_entity_correct"] = word_ids.new_tensor(0, dtype=torch.long)
ret["masked_entity_total"] = word_ids.new_tensor(0, dtype=torch.long)
if masked_lm_labels is not None:
masked_lm_mask = masked_lm_labels != -1
if masked_lm_mask.sum() > 0:
masked_word_sequence_output = torch.masked_select(word_sequence_output, masked_lm_mask.unsqueeze(-1))
masked_word_sequence_output = masked_word_sequence_output.view(-1, self.config.hidden_size)
if self.config.bert_model_name and "roberta" in self.config.bert_model_name:
masked_lm_scores = self.lm_head(masked_word_sequence_output)
else:
masked_lm_scores = self.cls.predictions(masked_word_sequence_output)
masked_lm_scores = masked_lm_scores.view(-1, self.config.vocab_size)
masked_lm_labels = torch.masked_select(masked_lm_labels, masked_lm_mask)
ret["masked_lm_loss"] = loss_fn(masked_lm_scores, masked_lm_labels)
ret["masked_lm_correct"] = (torch.argmax(masked_lm_scores, 1).data == masked_lm_labels.data).sum()
ret["masked_lm_total"] = masked_lm_labels.ne(-1).sum()
ret["loss"] += ret["masked_lm_loss"]
else:
ret["masked_lm_loss"] = word_ids.new_tensor(0.0, dtype=model_dtype)
ret["masked_lm_correct"] = word_ids.new_tensor(0, dtype=torch.long)
ret["masked_lm_total"] = word_ids.new_tensor(0, dtype=torch.long)
return ret
|
release/stubs.min/Autodesk/Revit/DB/Electrical.py | htlcnn/ironpython-stubs | 182 | 12752148 | <reponame>htlcnn/ironpython-stubs
# encoding: utf-8
# module Autodesk.Revit.DB.Electrical calls itself Electrical
# from RevitAPI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
from Electrical_parts.CableTrayConduitBase import CableTrayConduitBase
from Electrical_parts.CableTray import CableTray
from Electrical_parts.CableTrayConduitRunBase import CableTrayConduitRunBase
from Electrical_parts.CableTrayRun import CableTrayRun
from Electrical_parts.CableTraySettings import CableTraySettings
from Electrical_parts.CableTrayShape import CableTrayShape
from Electrical_parts.CableTraySizeIterator import CableTraySizeIterator
from Electrical_parts.CableTraySizes import CableTraySizes
from Electrical_parts.CableTrayType import CableTrayType
from Electrical_parts.CapitalizationForLoadNames import CapitalizationForLoadNames
from Electrical_parts.CircuitLoadCalculationMethod import CircuitLoadCalculationMethod
from Electrical_parts.CircuitSequence import CircuitSequence
from Electrical_parts.CircuitType import CircuitType
from Electrical_parts.Conduit import Conduit
from Electrical_parts.ConduitRun import ConduitRun
from Electrical_parts.ConduitSettings import ConduitSettings
from Electrical_parts.ConduitSize import ConduitSize
from Electrical_parts.ConduitSizeIterator import ConduitSizeIterator
from Electrical_parts.ConduitSizes import ConduitSizes
from Electrical_parts.ConduitSizeSettingIterator import ConduitSizeSettingIterator
from Electrical_parts.ConduitSizeSettings import ConduitSizeSettings
from Electrical_parts.ConduitType import ConduitType
from Electrical_parts.CorrectionFactor import CorrectionFactor
from Electrical_parts.CorrectionFactorSet import CorrectionFactorSet
from Electrical_parts.CorrectionFactorSetIterator import CorrectionFactorSetIterator
from Electrical_parts.DistributionSysType import DistributionSysType
from Electrical_parts.DistributionSysTypeSet import DistributionSysTypeSet
from Electrical_parts.DistributionSysTypeSetIterator import DistributionSysTypeSetIterator
from Electrical_parts.ElectricalDemandFactorDefinition import ElectricalDemandFactorDefinition
from Electrical_parts.ElectricalDemandFactorRule import ElectricalDemandFactorRule
from Electrical_parts.ElectricalDemandFactorValue import ElectricalDemandFactorValue
from Electrical_parts.ElectricalEquipment import ElectricalEquipment
from Electrical_parts.ElectricalLoadClassification import ElectricalLoadClassification
from Electrical_parts.ElectricalLoadClassificationData import ElectricalLoadClassificationData
from Electrical_parts.ElectricalLoadClassificationSpace import ElectricalLoadClassificationSpace
from Electrical_parts.ElectricalPhase import ElectricalPhase
from Electrical_parts.ElectricalPhaseConfiguration import ElectricalPhaseConfiguration
from Electrical_parts.ElectricalSetting import ElectricalSetting
from Electrical_parts.ElectricalSystem import ElectricalSystem
from Electrical_parts.ElectricalSystemSet import ElectricalSystemSet
from Electrical_parts.ElectricalSystemSetIterator import ElectricalSystemSetIterator
from Electrical_parts.ElectricalSystemType import ElectricalSystemType
from Electrical_parts.GroundConductorSize import GroundConductorSize
from Electrical_parts.GroundConductorSizeSet import GroundConductorSizeSet
from Electrical_parts.GroundConductorSizeSetIterator import GroundConductorSizeSetIterator
from Electrical_parts.InsulationType import InsulationType
from Electrical_parts.InsulationTypeSet import InsulationTypeSet
from Electrical_parts.InsulationTypeSetIterator import InsulationTypeSetIterator
from Electrical_parts.LightingDevice import LightingDevice
from Electrical_parts.LightingFixture import LightingFixture
from Electrical_parts.LoadClassification import LoadClassification
from Electrical_parts.LoadClassificationType import LoadClassificationType
from Electrical_parts.NeutralMode import NeutralMode
from Electrical_parts.PanelConfiguration import PanelConfiguration
from Electrical_parts.PanelScheduleData import PanelScheduleData
from Electrical_parts.PanelSchedulePhaseLoadType import PanelSchedulePhaseLoadType
from Electrical_parts.PanelScheduleSheetInstance import PanelScheduleSheetInstance
from Electrical_parts.PanelScheduleTemplate import PanelScheduleTemplate
from Electrical_parts.PanelScheduleType import PanelScheduleType
from Electrical_parts.PanelScheduleView import PanelScheduleView
from Electrical_parts.PowerFactorStateType import PowerFactorStateType
from Electrical_parts.TemperatureRatingType import TemperatureRatingType
from Electrical_parts.TemperatureRatingTypeSet import TemperatureRatingTypeSet
from Electrical_parts.TemperatureRatingTypeSetIterator import TemperatureRatingTypeSetIterator
from Electrical_parts.VoltageType import VoltageType
from Electrical_parts.VoltageTypeSet import VoltageTypeSet
from Electrical_parts.VoltageTypeSetIterator import VoltageTypeSetIterator
from Electrical_parts.Wire import Wire
from Electrical_parts.WireConduitType import WireConduitType
from Electrical_parts.WireConduitTypeSet import WireConduitTypeSet
from Electrical_parts.WireConduitTypeSetIterator import WireConduitTypeSetIterator
from Electrical_parts.WireMaterialType import WireMaterialType
from Electrical_parts.WireMaterialTypeSet import WireMaterialTypeSet
from Electrical_parts.WireMaterialTypeSetIterator import WireMaterialTypeSetIterator
from Electrical_parts.WireSet import WireSet
from Electrical_parts.WireSetIterator import WireSetIterator
from Electrical_parts.WireSize import WireSize
from Electrical_parts.WireSizeSet import WireSizeSet
from Electrical_parts.WireSizeSetIterator import WireSizeSetIterator
from Electrical_parts.WireType import WireType
from Electrical_parts.WireTypeSet import WireTypeSet
from Electrical_parts.WireTypeSetIterator import WireTypeSetIterator
from Electrical_parts.WiringType import WiringType
|
examples/RP2/Waveshare-lcd-2/feathers2.py | Wind-stormger/st7789_mpy-1 | 153 | 12752153 | <gh_stars>100-1000
"""
feathers2.py
Smoothly scroll rainbow colored random curves across the front of a Waveshare Pico LCD 1.3
Display Module using a Raspberry Pi PICO.
Video: https://youtu.be/ZKrKsz7_CXo
"""
import random
import math
import utime
from machine import Pin, SPI
import st7789
def between(left, right, along):
"""returns a point along the curve from left to right"""
d = (1 - math.cos(along * math.pi)) / 2
return left * (1 - d) + right * d
def color_wheel(WheelPos):
"""returns a 565 color from the given position of the color wheel"""
WheelPos = (255 - WheelPos) % 255
if WheelPos < 85:
return st7789.color565(255 - WheelPos * 3, 0, WheelPos * 3)
if WheelPos < 170:
WheelPos -= 85
return st7789.color565(0, WheelPos * 3, 255 - WheelPos * 3)
WheelPos -= 170
return st7789.color565(WheelPos * 3, 255 - WheelPos * 3, 0)
def main():
# configure spi interface
spi = SPI(1, baudrate=31250000, sck=Pin(10), mosi=Pin(11))
# initialize display
tft = st7789.ST7789(
spi,
240,
320,
reset=Pin(12, Pin.OUT),
cs=Pin(9, Pin.OUT),
dc=Pin(8, Pin.OUT),
backlight=Pin(13, Pin.OUT),
rotation=1)
# enable display and clear screen
tft.init()
height = tft.height()
width = tft.width()
tfa = 0 # top free area
bfa = 0 # bottom free area
scroll = 0
wheel = 0
tft.vscrdef(tfa, width, bfa)
tft.vscsad(scroll+tfa)
tft.fill(st7789.BLACK)
h = (height >> 1) - 1 # half the height of the dislay
interval = 50 # steps between new points
increment = 1/interval # increment per step
counter = interval + 1 # step counter, overflow to start
current_y = 0 # current_y value (right point)
last_y = 0 # last_y value (left point)
x_offsets = []
while True:
# when the counter exceeds the interval, save current_y to last_y,
# choose a new random value for current_y between 0 and 1/2 the
# height of the display, choose a new random interval then reset
# the counter to 0
if counter > interval:
offsets = random.randint(4, 32)
x_offsets = [width//offsets*(x+1)-1 for x in range(offsets)]
last_y = current_y
current_y = random.randint(0, h)
counter = 0
interval = random.randint(10, 100)
# clear the first column of the display and scroll it
tft.vline(scroll, 0, height, st7789.BLACK)
tft.vscsad(scroll)
# get the next point between last_y and current_y
tween = int(between(last_y, current_y, counter * increment))
# draw mirrored pixels across the display at the offsets using the color_wheel effect
for i, x_offset in enumerate(x_offsets):
tft.pixel((scroll+x_offset) % width, h + tween, color_wheel(wheel+(i<<2)))
tft.pixel((scroll+x_offset) % width, h - tween, color_wheel(wheel+(i<<2)))
# increment scroll, counter, and wheel
scroll += 1
scroll %= width
counter += 1
wheel += 1
wheel %= 256
# pause to slow down scrolling
utime.sleep(0.01)
main()
|
RecoMuon/MuonIdentification/python/muonShowerInformationProducer_cff.py | ckamtsikis/cmssw | 852 | 12752155 | from RecoMuon.MuonIdentification.muonShowerInformation_cfi import *
muonShowerInformation = cms.EDProducer("MuonShowerInformationProducer",
MuonServiceProxy,
muonCollection = cms.InputTag("muons1stStep"),
trackCollection = cms.InputTag("generalTracks"),
ShowerInformationFillerParameters = MuonShowerParameters.MuonShowerInformationFillerParameters
)
|
algorithms/maths/extended_gcd.py | zhengli0817/algorithms | 128 | 12752161 | <reponame>zhengli0817/algorithms<gh_stars>100-1000
"""
extended GCD algorithm
return s,t,g
such that a s + b t = GCD(a, b)
and s and t are coprime
"""
def extended_gcd(a,b):
old_s, s = 1, 0
old_t, t = 0, 1
old_r, r = a, b
while r != 0:
quotient = old_r / r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_s, old_t, old_r
|
backend/src/baserow/contrib/database/management/commands/fill_table.py | cjh0613/baserow | 839 | 12752173 | import sys
from decimal import Decimal
from math import ceil
from django.core.management.base import BaseCommand
from django.db.models import Max
from faker import Faker
from baserow.contrib.database.fields.field_helpers import (
construct_all_possible_field_kwargs,
)
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.table.models import Table
class Command(BaseCommand):
help = "Fills a table with random data."
def add_arguments(self, parser):
parser.add_argument(
"table_id", type=int, help="The table that needs to be " "filled."
)
parser.add_argument(
"limit", type=int, help="Amount of rows that need to be " "inserted."
)
parser.add_argument(
"--add-columns",
action="store_true",
help="Add a column for every field type other than link row to the table "
"before populating it.",
)
def handle(self, *args, **options):
table_id = options["table_id"]
limit = options["limit"]
add_columns = "add_columns" in options and options["add_columns"]
try:
table = Table.objects.get(pk=table_id)
except Table.DoesNotExist:
self.stdout.write(
self.style.ERROR(f"The table with id {table_id} was not " f"found.")
)
sys.exit(1)
fill_table(limit, table, add_columns=add_columns)
self.stdout.write(self.style.SUCCESS(f"{limit} rows have been inserted."))
def fill_table(limit, table, add_columns=False):
fake = Faker()
row_handler = RowHandler()
cache = {}
if add_columns:
create_a_column_for_every_type(table)
model = table.get_model()
# Find out what the highest order is because we want to append the new rows.
order = ceil(model.objects.aggregate(max=Max("order")).get("max") or Decimal("0"))
for i in range(0, limit):
# Based on the random_value function we have for each type we can
# build a dict with a random value for each field.
values = {
f"field_{field_id}": field_object["type"].random_value(
field_object["field"], fake, cache
)
for field_id, field_object in model._field_objects.items()
}
values, manytomany_values = row_handler.extract_manytomany_values(values, model)
order += Decimal("1")
values["order"] = order
# Insert the row with the randomly created values.
instance = model.objects.create(**values)
# Changes the set of the manytomany values.
for field_name, value in manytomany_values.items():
if value and len(value) > 0:
getattr(instance, field_name).set(value)
def create_a_column_for_every_type(table):
field_handler = FieldHandler()
all_kwargs_per_type = construct_all_possible_field_kwargs(None, None, None)
for field_type_name, all_possible_kwargs in all_kwargs_per_type.items():
if field_type_name == "link_row":
continue
i = 0
for kwargs in all_possible_kwargs:
i = i + 1
field_handler.create_field(
table.database.group.users.first(), table, field_type_name, **kwargs
)
|
supriya/providers.py | josiah-wolf-oberholtzer/supriya | 191 | 12752192 | import abc
import collections
import contextlib
import dataclasses
import pathlib
import re
import tempfile
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from uqbar.objects import new
import supriya.nonrealtime # noqa
import supriya.realtime # noqa
from supriya import commands, nonrealtime, realtime
from supriya.assets.synthdefs.default import default
from supriya.enums import AddAction, CalculationRate, ParameterRate
from supriya.nonrealtime import Session
from supriya.realtime import AsyncServer, BaseServer, Server
from supriya.synthdefs import SynthDef
# with provider.at(): proxy = provider.add_buffer(file_path=file_path)
# with provider.at(): proxy.free()
@dataclasses.dataclass(frozen=True)
class Proxy:
provider: "Provider"
@dataclasses.dataclass(frozen=True)
class BufferProxy:
provider: "Provider"
identifier: Union["supriya.nonrealtime.Buffer", int]
channel_count: Optional[int] = None
frame_count: Optional[int] = None
file_path: Optional[str] = None
starting_frame: Optional[int] = None
def __float__(self):
return float(int(self))
def __int__(self):
if self.provider.server:
return self.identifier
elif self.provider.session:
return self.provider.identifier.session_id
def close(self):
pass
def free(self):
self.provider.free_buffer(self)
def normalize(self, new_maximum=1.0):
pass
def read(self, file_path, leave_open=False):
pass
def write(
self,
file_path,
frame_count=None,
header_format="aiff",
leave_open=False,
sample_format="int24",
starting_frame=None,
):
pass
def as_allocate_request(self):
kwargs = dict(buffer_id=int(self), frame_count=self.frame_count)
if self.file_path is None:
return commands.BufferAllocateRequest(
**kwargs, channel_count=self.channel_count
)
kwargs.update(file_path=self.file_path, starting_frame=self.starting_frame)
if self.channel_count is None:
return commands.BufferAllocateReadRequest(**kwargs)
return commands.BufferAllocateReadChannelRequest(
**kwargs, channel_indices=list(range(self.channel_count))
)
def as_free_request(self):
return commands.BufferFreeRequest(buffer_id=int(self))
@dataclasses.dataclass(frozen=True)
class OscCallbackProxy(Proxy):
provider: "Provider"
identifier: Any
def unregister(self):
self.provider.unregister_osc_callback(self)
@dataclasses.dataclass(frozen=True)
class BusProxy(Proxy):
calculation_rate: CalculationRate
provider: "Provider"
identifier: Union["supriya.nonrealtime.Bus", int]
def __float__(self):
return float(int(self))
def __int__(self):
if self.provider.server:
return self.identifier
elif self.provider.session:
return self.provider.identifier.session_id
def set_(self, value):
self.provider.set_bus(self, value)
def free(self):
self.provider.free_bus(self)
@property
def map_symbol(self):
if self.calculation_rate == CalculationRate.AUDIO:
return f"a{int(self)}"
return f"c{int(self)}"
@dataclasses.dataclass(frozen=True)
class BusGroupProxy(Proxy):
calculation_rate: CalculationRate
channel_count: int
identifier: Union["supriya.nonrealtime.BusGroup", int]
provider: "Provider"
buses: Sequence["BusProxy"] = dataclasses.field(init=False)
def __post_init__(self):
if isinstance(self.identifier, int):
bus_identifiers = range(
self.identifier, self.identifier + self.channel_count
)
else:
bus_identifiers = self.identifier[:]
object.__setattr__(
self,
"buses",
tuple(
BusProxy(
calculation_rate=self.calculation_rate,
provider=self.provider,
identifier=bus_identifier,
)
for bus_identifier in bus_identifiers
),
)
def __float__(self):
return float(int(self))
def __getitem__(self, item):
return self.buses[item]
def __int__(self):
if self.provider.server:
return self.identifier
elif self.provider.session:
return self.provider.identifier.session_id
def __len__(self):
return self.channel_count
def free(self):
self.provider.free_bus_group(self)
@dataclasses.dataclass(frozen=True)
class NodeProxy(Proxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
def __float__(self):
return float(int(self))
def __int__(self):
if self.provider.server:
return self.identifier
elif self.provider.session:
return self.provider.identifier.session_id
def __setitem__(self, key, value):
self.provider.set_node(self, **{key: value})
def add_group(
self, *, add_action: int = AddAction.ADD_TO_HEAD, name: Optional[str] = None
) -> "GroupProxy":
return self.provider.add_group(add_action=add_action, target_node=self)
def add_synth(
self,
*,
synthdef: SynthDef = None,
add_action: int = AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> "SynthProxy":
return self.provider.add_synth(
add_action=add_action, synthdef=synthdef, target_node=self, **settings
)
def as_move_request(
self, add_action: AddAction, target_node: "NodeProxy"
) -> commands.MoveRequest:
request_classes: Dict[int, Type[commands.MoveRequest]] = {
AddAction.ADD_TO_HEAD: commands.GroupHeadRequest,
AddAction.ADD_TO_TAIL: commands.GroupTailRequest,
AddAction.ADD_BEFORE: commands.NodeBeforeRequest,
AddAction.ADD_AFTER: commands.NodeAfterRequest,
}
request_class: Type[commands.MoveRequest] = request_classes[add_action]
return request_class(
node_id_pairs=[request_class.NodeIdPair(int(self), int(target_node))]
)
def as_set_request(self, **settings):
coerced_settings = {}
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
if value.calculation_rate == CalculationRate.AUDIO:
value = f"a{value.identifier}"
else:
value = f"c{value.identifier}"
coerced_settings[key] = value
return commands.NodeSetRequest(node_id=int(self), **coerced_settings)
def dispose(self):
self.provider.dispose(self)
def free(self):
self.provider.free_node(self)
def move(self, add_action: AddAction, target_node: "NodeProxy"):
self.provider.move_node(self, add_action, target_node)
@dataclasses.dataclass(frozen=True)
class GroupProxy(NodeProxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
def as_add_request(self, add_action, target_node):
return commands.GroupNewRequest(
items=[
commands.GroupNewRequest.Item(
node_id=int(self.identifier),
add_action=add_action,
target_node_id=int(target_node),
)
]
)
def as_free_request(self, force=False):
return commands.NodeFreeRequest(node_ids=[int(self)])
@dataclasses.dataclass(frozen=True)
class SynthProxy(NodeProxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
synthdef: SynthDef
settings: Dict[str, Union[float, BusGroupProxy]]
def as_add_request(self, add_action, target_node):
# TODO: Handle map symbols
# If arg is a bus proxy, and synth param is scalar, cast to int
# Elif arg is a bus proxy, and synth param not scalar, map
# Else cast to float
synthdef = self.synthdef or default
synthdef_kwargs = {}
for _, parameter in synthdef.indexed_parameters:
if parameter.name not in self.settings:
continue
value = self.settings[parameter.name]
if value == parameter.value:
continue
if parameter.parameter_rate == ParameterRate.SCALAR:
synthdef_kwargs[parameter.name] = float(value)
elif parameter.name in ("in_", "out"):
synthdef_kwargs[parameter.name] = float(value)
elif isinstance(value, (BusProxy, BusGroupProxy)):
synthdef_kwargs[parameter.name] = value.map_symbol
else:
synthdef_kwargs[parameter.name] = float(value)
return commands.SynthNewRequest(
node_id=int(self.identifier),
add_action=add_action,
target_node_id=int(target_node),
synthdef=synthdef,
**synthdef_kwargs,
)
def as_free_request(self, force=False):
if force or "gate" not in self.synthdef.parameters:
return commands.NodeFreeRequest(node_ids=[int(self)])
return commands.NodeSetRequest(node_id=int(self), gate=0)
@dataclasses.dataclass(frozen=True)
class ProviderMoment:
provider: "Provider"
seconds: float
bus_settings: List[Tuple[BusProxy, float]] = dataclasses.field(default_factory=list)
buffer_additions: List[BufferProxy] = dataclasses.field(default_factory=list)
buffer_removals: List[BufferProxy] = dataclasses.field(default_factory=list)
node_reorderings: List[Tuple[NodeProxy, AddAction, NodeProxy]] = dataclasses.field(
default_factory=list
)
node_additions: List[Tuple[NodeProxy, AddAction, NodeProxy]] = dataclasses.field(
default_factory=list
)
node_removals: List[NodeProxy] = dataclasses.field(default_factory=list)
node_settings: List[
Tuple[NodeProxy, Dict[str, Union[float, BusGroupProxy]]]
] = dataclasses.field(default_factory=list)
wait: bool = dataclasses.field(default=False)
exit_stack: contextlib.ExitStack = dataclasses.field(
init=False, default_factory=contextlib.ExitStack, compare=False
)
def __postinit__(self):
self.exit_stack = contextlib.ExitStack()
async def __aenter__(self):
if self.provider.server and not isinstance(self.provider.server, AsyncServer):
raise RuntimeError(repr(self.provider.server))
return self._enter()
async def __aexit__(self, *args):
results = self._exit()
if not results:
return
timestamp, request_bundle, synthdefs = results
server = self.provider.server
# The underlying asyncio UDP transport will silently drop oversize packets
if len(request_bundle.to_datagram()) <= 8192:
if self.wait:
# If waiting, the original ProviderMoment timestamp can be ignored
await request_bundle.communicate_async(server=server, sync=True)
else:
server.send(request_bundle.to_osc())
else:
# If over the UDP packet limit, partition the message
requests = request_bundle.contents
# Always wait for SynthDefs to load.
if synthdefs:
synthdef_request = requests[0]
requests = synthdef_request.callback.contents or []
synthdef_request = new(synthdef_request, callback=None)
await synthdef_request.communicate_async(sync=True, server=server)
if self.wait:
# If waiting, the original ProviderMoment timestamp can be ignored
for bundle in commands.RequestBundle.partition(requests):
await bundle.communicate_async(server=server, sync=True)
else:
for bundle in commands.RequestBundle.partition(
requests, timestamp=timestamp
):
server.send(bundle.to_osc())
def __enter__(self):
if self.provider.session is not None:
self.exit_stack.enter_context(self.provider.session.at(self.seconds or 0))
if self.provider.server and not isinstance(self.provider.server, Server):
raise RuntimeError(repr(self.provider.server))
return self._enter()
def __exit__(self, *args):
results = self._exit()
if not results:
return
timestamp, request_bundle, synthdefs = results
try:
self.provider.server.send(request_bundle.to_osc())
except OSError:
requests = request_bundle.contents
if synthdefs:
synthdef_request = requests[0]
requests = synthdef_request.callback.contents or []
synthdef_request = new(synthdef_request, callback=None)
synthdef_request.communicate(sync=True, server=self.provider.server)
for bundle in commands.RequestBundle.partition(
requests, timestamp=timestamp
):
self.provider.server.send(bundle.to_osc())
def _enter(self):
self.provider._moments.append(self)
self.provider._counter[self.seconds] += 1
return self
def _exit(self):
self.exit_stack.close()
self.provider._moments.pop()
self.provider._counter[self.seconds] -= 1
if not self.provider.server:
return
elif self.provider._counter[self.seconds]:
return
requests = []
synthdefs = set()
new_nodes = set()
for buffer_proxy in self.buffer_additions:
requests.append(buffer_proxy.as_allocate_request())
for node_proxy, add_action, target_node in self.node_additions:
request = node_proxy.as_add_request(add_action, target_node)
if isinstance(request, commands.SynthNewRequest):
if request.synthdef not in self.provider.server:
synthdefs.add(request.synthdef)
requests.append(request)
new_nodes.add(node_proxy.identifier)
for node_proxy, add_action, target_node in self.node_reorderings:
requests.append(node_proxy.as_move_request(add_action, target_node))
for node_proxy, settings in self.node_settings:
requests.append(node_proxy.as_set_request(**settings))
for node_proxy in self.node_removals:
requests.append(
node_proxy.as_free_request(force=node_proxy.identifier in new_nodes)
)
for buffer_proxy in self.buffer_removals:
requests.append(buffer_proxy.as_free_request())
if self.bus_settings:
sorted_pairs = sorted(
dict(
(int(bus_proxy.identifier), value)
for bus_proxy, value in self.bus_settings
).items()
)
request = commands.ControlBusSetRequest(index_value_pairs=sorted_pairs)
requests.append(request)
if not requests:
return
timestamp = self.seconds
if timestamp is not None:
timestamp += self.provider._latency
if synthdefs:
request_bundle = commands.RequestBundle(
timestamp=timestamp,
contents=[
commands.SynthDefReceiveRequest(
synthdefs=sorted(synthdefs, key=lambda x: x.actual_name),
callback=commands.RequestBundle(contents=requests),
)
],
)
# check bundle size, write synthdefs to disk and do /d_load
if len(request_bundle.to_datagram(with_placeholders=True)) > 8192:
directory_path = pathlib.Path(tempfile.mkdtemp())
# directory_path = pathlib.Path("~/Desktop").expanduser()
for synthdef in synthdefs:
name = synthdef.anonymous_name
if synthdef.name:
name += "-" + re.sub(r"[^\w]", "-", synthdef.name)
file_name = "{}.scsyndef".format(name)
synthdef_path = directory_path / file_name
synthdef_path.write_bytes(synthdef.compile())
request_bundle = commands.RequestBundle(
timestamp=timestamp,
contents=[
supriya.commands.SynthDefLoadDirectoryRequest(
directory_path=directory_path,
callback=commands.RequestBundle(contents=requests),
)
],
)
else:
request_bundle = commands.RequestBundle(
timestamp=timestamp, contents=requests
)
for synthdef in synthdefs:
synthdef._register_with_local_server(server=self.provider.server)
return timestamp, request_bundle, synthdefs
class Provider(metaclass=abc.ABCMeta):
"""
Provides limited realtime/non-realtime compatibility layer.
"""
### INITIALIZER ###
def __init__(self, latency=0.1):
self._moments: List[ProviderMoment] = []
self._counter = collections.Counter()
self._server = None
self._session = None
self._latency = latency
self._annotation_map: Dict[Union["supriya.nonrealtime.Node", int], str] = {}
### PUBLIC METHODS ###
@abc.abstractmethod
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[str] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
raise NotImplementedError
@abc.abstractmethod
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
raise NotImplementedError
@abc.abstractmethod
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
raise NotImplementedError
@abc.abstractmethod
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
) -> GroupProxy:
raise NotImplementedError
@abc.abstractmethod
def add_synth(
self,
*,
synthdef: SynthDef = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
raise NotImplementedError
@abc.abstractmethod
def boot(self, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def dispose(self, node_proxy: NodeProxy):
raise NotImplementedError
@abc.abstractmethod
def free_buffer(self, buffer_proxy):
raise NotImplementedError
@abc.abstractmethod
def free_bus(self, bus_proxy: BusProxy):
raise NotImplementedError
@abc.abstractmethod
def free_bus_group(self, bus_group_proxy: BusGroupProxy):
raise NotImplementedError
@abc.abstractmethod
def free_node(self, node_proxy: NodeProxy):
raise NotImplementedError
@abc.abstractmethod
def move_node(
self, node_proxy: NodeProxy, add_action: AddAction, target_node: NodeProxy
):
raise NotImplementedError
@abc.abstractmethod
def set_bus(self, bus_proxy: BusProxy, value: float):
raise NotImplementedError
@abc.abstractmethod
def set_node(self, node_proxy: NodeProxy, **settings):
raise NotImplementedError
def at(self, seconds=None, wait=False):
if self._moments and self._moments[-1].seconds == seconds:
provider_moment = self._moments[-1]
else:
provider_moment = ProviderMoment(provider=self, seconds=seconds, wait=wait)
return provider_moment
@classmethod
def from_context(cls, context, latency=0.1) -> "Provider":
if isinstance(context, Session):
return NonrealtimeProvider(context, latency=latency)
elif isinstance(context, BaseServer):
return RealtimeProvider(context, latency=latency)
raise ValueError("Unknown context")
@classmethod
def nonrealtime(cls) -> "NonrealtimeProvider":
session = Session()
return cast("NonrealtimeProvider", cls.from_context(session))
@abc.abstractmethod
def quit(self):
raise NotImplementedError
@classmethod
def realtime(
cls, scsynth_path=None, options=None, port=None, **kwargs
) -> "RealtimeProvider":
server = Server()
server.boot(port=port, scsynth_path=scsynth_path, options=options, **kwargs)
return cast("RealtimeProvider", cls.from_context(server))
@classmethod
async def realtime_async(
cls, scsynth_path=None, options=None, port=None, **kwargs
) -> "RealtimeProvider":
server = AsyncServer()
await server.boot(
port=port, scsynth_path=scsynth_path, options=options, **kwargs
)
return cast("RealtimeProvider", cls.from_context(server))
@abc.abstractmethod
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
raise NotImplementedError
@abc.abstractmethod
def unregister_osc_callback(self, proxy: OscCallbackProxy):
raise NotImplementedError
### PUBLIC PROPERTIES ###
@property
def annotation_map(self) -> Mapping[Union["supriya.nonrealtime.Node", int], str]:
return MappingProxyType(self._annotation_map)
@property
def latency(self):
return self._latency
@property
def moment(self) -> Optional[ProviderMoment]:
if self._moments:
return self._moments[-1]
return None
@property
def server(self) -> Server:
return self._server
@property
def session(self) -> Session:
return self._session
class NonrealtimeProvider(Provider):
### INITIALIZER ###
def __init__(self, session, latency=0.1):
if not isinstance(session, Session):
raise ValueError(f"Expected session, got {session}")
Provider.__init__(self, latency=latency)
self._session = session
### SPECIAL METHODS ###
def __str__(self):
return f"<{type(self).__name__} {self._session!r}>"
### PRIVATE METHODS ###
def _resolve_target_node(self, target_node) -> nonrealtime.Node:
if target_node is None:
target_node = self.session.root_node
elif isinstance(target_node, NodeProxy):
target_node = target_node.identifier
return target_node
### PUBLIC METHODS ###
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[str] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self.session.add_buffer(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
starting_frame=starting_frame,
)
return BufferProxy(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
identifier=identifier,
provider=self,
starting_frame=starting_frame,
)
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
identifier = self.session.add_bus(calculation_rate=calculation_rate)
return BusProxy(
calculation_rate=calculation_rate, identifier=identifier, provider=self
)
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
if channel_count < 1:
raise ValueError("Channel-count must be positive, non-zero integer")
identifier = self.session.add_bus_group(
bus_count=channel_count, calculation_rate=calculation_rate
)
return BusGroupProxy(
calculation_rate=calculation_rate,
channel_count=channel_count,
identifier=identifier,
provider=self,
)
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
) -> GroupProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self._resolve_target_node(target_node).add_group(
add_action=add_action
)
proxy = GroupProxy(identifier=identifier, provider=self)
return proxy
def add_synth(
self,
*,
synthdef: SynthDef = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
if not self.moment:
raise ValueError("No current moment")
sanitized_settings = {}
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
value = value.identifier
sanitized_settings[key] = value
identifier = self._resolve_target_node(target_node).add_synth(
add_action=add_action, synthdef=synthdef, **sanitized_settings
)
proxy = SynthProxy(
identifier=identifier,
provider=self,
synthdef=synthdef or default,
settings=settings,
)
return proxy
def free_buffer(self, buffer_: BufferProxy):
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def boot(self, **kwargs):
pass # no-op
def dispose(self, node_proxy: NodeProxy):
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_bus(self, bus: BusProxy):
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_bus_group(self, bus_group: BusGroupProxy):
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_node(self, node_proxy: NodeProxy):
if not self.moment:
raise ValueError("No current moment")
cast(nonrealtime.Node, node_proxy.identifier).free()
def move_node(
self,
node_proxy: NodeProxy,
add_action: AddAction,
target_node: Union[NodeProxy, nonrealtime.Node],
):
if not self.moment:
raise ValueError("No current moment")
self._resolve_target_node(target_node).move_node(
node_proxy.identifier, add_action=add_action
)
def set_bus(self, bus_proxy: BusProxy, value: float):
if not self.moment:
raise ValueError("No current moment")
elif bus_proxy.calculation_rate != CalculationRate.CONTROL:
raise ValueError("Can only set control-rate buses")
cast(nonrealtime.Bus, bus_proxy.identifier).set_(value)
def set_node(self, node_proxy: NodeProxy, **settings):
if not self.moment:
raise ValueError("No current moment")
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
value = value.identifier
cast(nonrealtime.Node, node_proxy.identifier)[key] = value
def quit(self):
pass # no-op
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
return OscCallbackProxy(provider=self, identifier=None)
def unregister_osc_callback(self, proxy: OscCallbackProxy):
pass # no-op
class RealtimeProvider(Provider):
### INITIALIZER ###
def __init__(self, server, latency=0.1):
if not isinstance(server, BaseServer):
raise ValueError(f"Expected Server, got {server}")
Provider.__init__(self, latency=latency)
self._server = server
### SPECIAL METHODS ###
def __str__(self):
return f"<{type(self).__name__} {self._server!r}>"
### PRIVATE METHODS ###
def _resolve_target_node(self, target_node):
if target_node is None:
# TODO: Will this work with AsyncServer?
target_node = self.server.default_group
return target_node
### PUBLIC METHODS ###
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[str] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self.server.buffer_allocator.allocate(1)
proxy = BufferProxy(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
identifier=identifier,
provider=self,
starting_frame=starting_frame,
)
self.moment.buffer_additions.append(proxy)
return proxy
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
allocator = realtime.Bus._get_allocator(calculation_rate, server=self.server)
identifier = allocator.allocate(1)
return BusProxy(
calculation_rate=calculation_rate, identifier=identifier, provider=self
)
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
if channel_count < 1:
raise ValueError("Channel-count must be positive, non-zero integer")
allocator = realtime.Bus._get_allocator(calculation_rate, server=self.server)
identifier = allocator.allocate(channel_count)
if identifier is None:
raise RuntimeError
return BusGroupProxy(
calculation_rate=calculation_rate,
channel_count=channel_count,
identifier=identifier,
provider=self,
)
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
) -> GroupProxy:
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
identifier = self.server.node_id_allocator.allocate_node_id(1)
proxy = GroupProxy(identifier=identifier, provider=self)
self.moment.node_additions.append((proxy, add_action, target_node))
if name:
self._annotation_map[identifier] = name
return proxy
def add_synth(
self,
*,
synthdef: SynthDef = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
identifier = self.server.node_id_allocator.allocate_node_id(1)
proxy = SynthProxy(
identifier=identifier,
provider=self,
synthdef=synthdef or default,
settings=settings,
)
self.moment.node_additions.append((proxy, add_action, target_node))
if name:
self._annotation_map[identifier] = name
return proxy
def boot(self, **kwargs):
self.server.boot(**kwargs)
def dispose(self, node_proxy: NodeProxy):
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_buffer(self, buffer_: BufferProxy):
if not self.moment:
raise ValueError("No current moment")
self.moment.buffer_removals.append(buffer_)
def free_bus(self, bus_proxy: BusProxy):
if not self.moment:
raise ValueError("No current moment")
allocator = realtime.Bus._get_allocator(
bus_proxy.calculation_rate, server=self.server
)
allocator.free(cast(int, bus_proxy.identifier))
def free_bus_group(self, bus_group_proxy: BusGroupProxy):
if not self.moment:
raise ValueError("No current moment")
allocator = realtime.Bus._get_allocator(
bus_group_proxy.calculation_rate, server=self.server
)
allocator.free(cast(int, bus_group_proxy.identifier))
def free_node(self, node_proxy: NodeProxy):
if not self.moment:
raise ValueError("No current moment")
self.moment.node_removals.append(node_proxy)
self._annotation_map.pop(node_proxy.identifier, None)
def move_node(
self, node_proxy: NodeProxy, add_action: AddAction, target_node: NodeProxy
):
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
self.moment.node_reorderings.append((node_proxy, add_action, target_node))
def quit(self):
self.server.quit()
def set_bus(self, bus_proxy: BusProxy, value: float):
if not self.moment:
raise ValueError("No current moment")
elif bus_proxy.calculation_rate != CalculationRate.CONTROL:
raise ValueError("Can only set control-rate buses")
self.moment.bus_settings.append((bus_proxy, value))
def set_node(self, node_proxy: NodeProxy, **settings):
if not self.moment:
raise ValueError("No current moment")
self.moment.node_settings.append((node_proxy, settings))
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
identifier = self.server.osc_protocol.register(
pattern=pattern, procedure=procedure
)
return OscCallbackProxy(provider=self, identifier=identifier)
def unregister_osc_callback(self, proxy: OscCallbackProxy):
self.server.osc_protocol.unregister(proxy.identifier)
|
shhh/enums.py | smallwat3r/shhh | 243 | 12752200 | <reponame>smallwat3r/shhh<filename>shhh/enums.py
from enum import Enum, IntEnum
class LivenessClient(Enum):
"""Liveness client type."""
WEB = "web"
TASK = "task"
class ReadTriesValues(IntEnum):
"""Enum of allowed number of tries to read secrets."""
THREE = 3
FIVE = 5
TEN = 10
@classmethod
def default(cls): # pylint: disable=missing-function-docstring
return cls.FIVE.value # Needs .value as its being passed to Jinja
class SecretExpirationValues(Enum):
"""Enum of allowed expiration values."""
_10_MINUTES = "10m"
_30_MINUTES = "30m"
_AN_HOUR = "1h"
_3_HOURS = "3h"
_6_HOURS = "6h"
_A_DAY = "1d"
_2_DAYS = "2d"
_3_DAYS = "3d"
_5_DAYS = "5d"
_A_WEEK = "7d"
@classmethod
def default(cls): # pylint: disable=missing-function-docstring
return cls._3_DAYS.value
@classmethod
def dict(cls) -> dict:
"""Return a dict of human friendly data."""
return {i.name[1:].replace("_", " ").capitalize(): i.value for i in cls}
class EnvConfig(Enum):
"""Environment config values."""
TESTING = "testing"
DEV_LOCAL = "dev-local"
DEV_DOCKER = "dev-docker"
HEROKU = "heroku"
PRODUCTION = "production"
|
aioinflux/serialization/dataframe.py | claashk/aioinflux | 120 | 12752211 | import re
from functools import reduce
from itertools import chain
from typing import Union, Dict, List
import pandas as pd
import numpy as np
from .common import *
DataFrameType = Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Dict[str, pd.DataFrame]]]
# Serialization helper functions
# -------------------------------
def _serializer(series) -> pd.DataFrame:
df = pd.DataFrame(series.get('values', []), columns=series['columns'])
if 'time' not in df.columns:
return df
df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)
df.index = df.index.tz_localize('UTC')
df.index.name = None
if 'tags' in series:
for k, v in series['tags'].items():
df[k] = v
if 'name' in series:
df.name = series['name']
return df
def _get_name(series):
tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]
return ','.join(filter(None, [series.get('name'), *tags])) or None
def _drop_zero_index(df):
if isinstance(df.index, pd.DatetimeIndex):
if all(i.value == 0 for i in df.index):
return df.reset_index(drop=True)
return df
def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements # list
# Parsing helper functions
# -------------------------
def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols)
def _replace(df):
obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}
other_cols = set(df.columns) - obj_cols
obj_nans = (f'{k}="nan"' for k in obj_cols)
other_nans = (f'{k}=nani?' for k in other_cols)
replacements = [
('|'.join(chain(obj_nans, other_nans)), ''),
(',{2,}', ','),
('|'.join([', ,', ', ', ' ,']), ' '),
]
return replacements
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_, np.floating)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
|
netbox/utilities/querysets.py | TheFlyingCorpse/netbox | 4,994 | 12752212 | from django.db.models import Q, QuerySet
from utilities.permissions import permission_is_exempt
class RestrictedQuerySet(QuerySet):
def restrict(self, user, action='view'):
"""
Filter the QuerySet to return only objects on which the specified user has been granted the specified
permission.
:param user: User instance
:param action: The action which must be permitted (e.g. "view" for "dcim.view_site"); default is 'view'
"""
# Resolve the full name of the required permission
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
permission_required = f'{app_label}.{action}_{model_name}'
# Bypass restriction for superusers and exempt views
if user.is_superuser or permission_is_exempt(permission_required):
qs = self
# User is anonymous or has not been granted the requisite permission
elif not user.is_authenticated or permission_required not in user.get_all_permissions():
qs = self.none()
# Filter the queryset to include only objects with allowed attributes
else:
attrs = Q()
for perm_attrs in user._object_perm_cache[permission_required]:
if type(perm_attrs) is list:
for p in perm_attrs:
attrs |= Q(**p)
elif perm_attrs:
attrs |= Q(**perm_attrs)
else:
# Any permission with null constraints grants access to _all_ instances
attrs = Q()
break
qs = self.filter(attrs)
return qs
|
2017/python-interact-subprocess/emitter.py | mikiec84/code-for-blog | 1,199 | 12752229 | # Helper for testing.
#
# <NAME> [http://eli.thegreenplace.net]
# This code is in the public domain.
import sys
import time
def main():
count = 1
while True:
sys.stdout.write(f'{count} ')
if count % 20 == 0:
sys.stdout.write('\n')
time.sleep(0.05)
count += 1
if __name__ == '__main__':
main()
|
tools/azure-devtools/src/azure_devtools/scenario_tests/tests/test_utilities.py | rsdoherty/azure-sdk-for-python | 2,728 | 12752239 | <filename>tools/azure-devtools/src/azure_devtools/scenario_tests/tests/test_utilities.py<gh_stars>1000+
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
from unittest import mock
except ImportError:
import mock
from azure_devtools.scenario_tests.utilities import create_random_name, get_sha1_hash, is_text_payload, is_json_payload
class TestUtilityFunctions(unittest.TestCase):
def test_create_random_name_default_value(self):
default_generated_name = create_random_name()
self.assertTrue(default_generated_name.startswith("aztest"))
self.assertEqual(24, len(default_generated_name))
self.assertTrue(isinstance(default_generated_name, str))
def test_create_random_name_randomness(self):
self.assertEqual(100, len(set([create_random_name() for _ in range(100)])))
def test_create_random_name_customization(self):
customized_name = create_random_name(prefix="pauline", length=61)
self.assertTrue(customized_name.startswith("pauline"))
self.assertEqual(61, len(customized_name))
self.assertTrue(isinstance(customized_name, str))
def test_create_random_name_exception_long_prefix(self):
prefix = "prefix-too-long"
with self.assertRaises(ValueError) as cm:
create_random_name(prefix, length=len(prefix) - 1)
self.assertEqual(str(cm.exception), "The length of the prefix must not be longer than random name length")
self.assertTrue(create_random_name(prefix, length=len(prefix) + 4).startswith(prefix))
def test_create_random_name_exception_not_enough_space_for_randomness(self):
prefix = "prefix-too-long"
for i in range(4):
with self.assertRaises(ValueError) as cm:
create_random_name(prefix, length=len(prefix) + i)
self.assertEqual(
str(cm.exception),
"The randomized part of the name is shorter than 4, which may not be "
"able to offer enough randomness",
)
def test_get_sha1_hash(self):
import tempfile
with tempfile.NamedTemporaryFile() as f:
content = b"""
All the world's a stage,
And all the men and women merely players;
They have their exits and their entrances,
And one man in his time plays many parts,
His acts being seven ages. At first, the infant,
Mewling and puking in the nurse's arms.
Then the whining schoolboy, with his satchel
And shining morning face, creeping like snail
Unwillingly to school. And then the lover,
Sighing like furnace, with a woeful ballad
Made to his mistress' eyebrow. Then a soldier,
Full of strange oaths and bearded like the pard,
Jealous in honor, sudden and quick in quarrel,
Seeking the bubble reputation
Even in the cannon's mouth. And then the justice,
In fair round belly with good capon lined,
With eyes severe and beard of formal cut,
Full of wise saws and modern instances;
And so he plays his part. The sixth age shifts
Into the lean and slippered pantaloon,
With spectacles on nose and pouch on side;
His youthful hose, well saved, a world too wide
For his shrunk shank, and his big manly voice,
Turning again toward childish treble, pipes
And whistles in his sound. Last scene of all,
That ends this strange eventful history,
Is second childishness and mere oblivion,
Sans teeth, sans eyes, sans taste, sans everything.
<NAME>
"""
f.write(content)
f.seek(0)
hash_value = get_sha1_hash(f.name)
self.assertEqual("6487bbdbd848686338d729e6076da1a795d1ae747642bf906469c6ccd9e642f9", hash_value)
def test_text_payload(self):
http_entity = mock.MagicMock()
headers = {}
http_entity.headers = headers
headers["content-type"] = "foo/"
self.assertFalse(is_text_payload(http_entity))
headers["content-type"] = "text/html; charset=utf-8"
self.assertTrue(is_text_payload(http_entity))
headers["content-type"] = "APPLICATION/JSON; charset=utf-8"
self.assertTrue(is_text_payload(http_entity))
headers["content-type"] = "APPLICATION/xml"
self.assertTrue(is_text_payload(http_entity))
http_entity.headers = None # default to text mode if there is no header
self.assertTrue(is_text_payload(http_entity))
def test_json_payload(self):
http_entity = mock.MagicMock()
headers = {}
http_entity.headers = headers
headers["content-type"] = "APPLICATION/JSON; charset=utf-8"
self.assertTrue(is_json_payload(http_entity))
headers["content-type"] = "application/json; charset=utf-8"
self.assertTrue(is_json_payload(http_entity))
headers["content-type"] = "application/xml; charset=utf-8"
self.assertFalse(is_json_payload(http_entity))
|
skfda/inference/__init__.py | jiduque/scikit-fda | 147 | 12752242 | from . import anova, hotelling
|
pySOT/auxiliary_problems/lcb_ga.py | WY-Wang/pySOT | 180 | 12752278 | import numpy as np
from ..utils import GeneticAlgorithm as GA
from ..utils import round_vars
from .lcb_merit import lcb_merit
def lcb_ga(num_pts, opt_prob, surrogate, X, fX, Xpend=None, kappa=2.0, dtol=1e-3, lcb_target=None):
"""Minimize the LCB using a genetic algorithm.
:param num_pts: Number of points to generate
:type num_pts: int
:param opt_prob: Optimization problem
:type opt_prob: object
:param surrogate: Surrogate model object
:type surrogate: object
:param X: Previously evaluated points, of size n x dim
:type X: numpy.array
:param fX: Values at previously evaluated points, of size n x 1
:type fX: numpy.array
:param Xpend: Pending evaluations
:type Xpend: numpy.array
:param dtol: Minimum distance between evaluated and pending points
:type dtol: float
:param lcb_target: Return None if we don't find an LCB value <= lcb_target
:type lcb_target: float
:return: num_pts new points to evaluate
:rtype: numpy.array of size num_pts x dim
"""
if Xpend is None: # cdist can't handle None arguments
Xpend = np.empty([0, opt_prob.dim])
XX = np.vstack((X, Xpend))
new_points = np.zeros((num_pts, opt_prob.dim))
for i in range(num_pts):
def obj(Y):
"""Round integer variables and compute LCB."""
Y = round_vars(Y.copy(), opt_prob.int_var, opt_prob.lb, opt_prob.ub)
return lcb_merit(X=Y, surrogate=surrogate, fX=fX, XX=XX, dtol=dtol, kappa=kappa)
ga = GA(
function=obj,
dim=opt_prob.dim,
lb=opt_prob.lb,
ub=opt_prob.ub,
int_var=opt_prob.int_var,
pop_size=max([2 * opt_prob.dim, 100]),
num_gen=100,
)
x_best, f_min = ga.optimize()
if f_min > lcb_target:
return None # Give up
new_points[i, :] = x_best
XX = np.vstack((XX, x_best))
return new_points
|
ml4a/dataset/processing.py | KushGabani/ml4a-guides | 1,110 | 12752309 | import os
from random import random, sample
import numpy as np
from PIL import Image, ImageDraw
from skimage.segmentation import felzenszwalb
from skimage.morphology import skeletonize, remove_small_objects
from skimage.util import invert
from tqdm import tqdm
import cv2
def cv2pil(cv2_img):
if len(cv2_img.shape) == 2 or cv2_img.shape[2]==1:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_GRAY2RGB)
else:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(cv2_img.astype('uint8'))
return pil_img
def pil2cv(pil_img):
pil_img = pil_img.convert('RGB')
cv2_img = np.array(pil_img)
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR)
cv2_img = cv2_img[:, :, ::-1].copy()
return cv2_img
def posterize(im, n):
indices = np.arange(0,256) # List of all colors
divider = np.linspace(0,255,n+1)[1] # we get a divider
quantiz = np.int0(np.linspace(0,255,n)) # we get quantization colors
color_levels = np.clip(np.int0(indices/divider),0,n-1) # color levels 0,1,2..
palette = quantiz[color_levels] # Creating the palette
im2 = palette[im] # Applying palette on image
im2 = cv2.convertScaleAbs(im2) # Converting image back to uint8
return im2
def canny(im1):
im1 = pil2cv(im1)
im2 = cv2.GaussianBlur(im1, (5, 5), 0)
im2 = cv2.Canny(im2, 100, 150)
im2 = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB)
im2 = cv2pil(im2)
return im2
def image2colorlabels(img, colors):
h, w = img.height, img.width
pixels = np.array(list(img.getdata()))
dists = np.array([np.sum(np.abs(pixels-c), axis=1) for c in colors])
classes = np.argmin(dists, axis=0)
def colorize_labels(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
img = Image.fromarray(np.uint8(classes.reshape((h, w, 3))))
return img
def quantize_colors(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
pixels_clr = np.array([colors[p] for p in classes]).reshape((h, w, 3))
img = Image.fromarray(np.uint8(pixels_clr))
return img
def segment(img):
img = pil2cv(img)
h, w = img.shape[0:2]
img = cv2.bilateralFilter(img, 9, 100, 100)
scale = int(h * w / 1000)
segments = felzenszwalb(img, scale=scale, sigma=0.5, min_size=150)
out_image = np.zeros((h, w, 3))
num_segments = len(np.unique(segments))
for s in tqdm(range(num_segments)):
label_map = segments==s
label_map3 = np.dstack([label_map] * 3)
masked_img = np.multiply(label_map3, img)
#avg_color = np.sum(np.sum(masked_img, axis=0), axis=0) / np.count_nonzero(label_map) # maybe median is better
nonzeros = [ masked_img[:, :, c].reshape((h * w)) for c in range(3) ]
median_color = [ np.median(np.take(nonzeros[c], nonzeros[c].nonzero())) for c in range(3) ]
smooth_segment = (label_map3 * median_color).astype('uint8')
out_image += smooth_segment
out_image = Image.fromarray(out_image.astype('uint8'))
return out_image
def trace(img):
img = pil2cv(img)
im2 = cv2.GaussianBlur(img, (5, 5), 0)
im3 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
ret, im4 = cv2.threshold(im3, 127, 255, 0)
ret, img = cv2.threshold(im3, 255, 255, 0)
im5, contours, hierarchy = cv2.findContours(im4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [ c for c in contours if cv2.arcLength(c, True) > 8 ] #and cv2.contourArea(c) > 10]
for contour in contours:
cv2.drawContours(img, [contour], 0, (255), 2)
img = cv2pil(img)
return img
def simplify(img, hed_model_path):
import hed_processing
w, h = img.width, img.height
size_thresh = 0.001 * w * h
img = pil2cv(img)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = hed_processing.run_hed(cv2pil(img), hed_model_path)
ret, img = cv2.threshold(pil2cv(img), 50, 255, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = remove_small_objects(img.astype('bool'), size_thresh)
img = 255 * skeletonize(img).astype('uint8')
img = cv2pil(img)
return img
def upsample(img, w2, h2):
h1, w1 = img.height, img.width
r = max(float(w2)/w1, float(h2)/h1)
img = img.resize((int(r*w1), int(r*h1)), resample=Image.BICUBIC)
return img
def crop_rot_resize(img, frac, w2, h2, ang, stretch, centered):
if w2 is None:
w2 = img.width
if h2 is None:
h2 = img.height
if img.height < h2 or img.width < w2:
img = upsample(img, w2, h2)
if stretch != 0:
v = random() < 0.5
h = 1.0 if not v else (1.0 + stretch)
w = 1.0 if v else (1.0 + stretch)
img = img.resize((int(img.width * w), int(img.height * h)), resample=Image.BICUBIC)
if ang > 0:
img = img.rotate(ang, resample=Image.BICUBIC, expand=False)
ar = float(w2 / h2)
h1, w1 = img.height, img.width
if float(w1) / h1 > ar:
h1_crop = max(h2, h1 * frac)
w1_crop = h1_crop * ar
else:
w1_crop = max(w2, w1 * frac)
h1_crop = w1_crop / ar
xr, yr = (0.5, 0.5) if centered else (random(), random())
x_crop, y_crop = (w1 - w1_crop - 1) * xr, (h1 - h1_crop - 1) * yr
h1_crop, w1_crop, y_crop, x_crop = int(h1_crop), int(w1_crop), int(y_crop), int(x_crop)
img_crop = img.crop((x_crop, y_crop, x_crop+w1_crop, y_crop+h1_crop))
img_resize = img_crop.resize((w2, h2), resample=Image.BICUBIC)
return img_resize
|
pyNastran/converters/nastran/gui/menus/modify_map.py | ACea15/pyNastran | 293 | 12752319 | <gh_stars>100-1000
from itertools import chain
class Var:
def __init__(self, name, var, vartype='lineedit', pulldown_objs=None,
pulldown_type_limit=None, enabled=True, pulldown_allow_zero=False, required=True):
self.name = name
self.var = var
self.vartype = vartype
self.pulldown_objs = pulldown_objs
self.pulldown_type_limit = pulldown_type_limit
self.pulldown_allow_zero = pulldown_allow_zero
self.enabled = enabled
self.required = required
assert vartype in ['lineedit', 'lineedit_table', 'pulldown', 'spinner'], vartype
def __repr__(self):
return (f'Var(name={self.name}, var={self.var}, vartype={self.vartype}, '
f'pulldown_objs={self.pulldown_objs}, enabled={self.enabled}, required={self.required})')
class TransposedVars:
def __init__(self, variables):
self.variables = variables
shell_ptypes = ['PSHELL', 'PCOMP']
bar_ptypes = ['PBAR', 'PBARL']
beam_ptypes = ['PBEAM', 'PBEAML', 'PBCOMP']
ELEMENTS_MAP = {
'CQUAD4' : [
Var('Element ID', 'eid', enabled=False),
Var('Property ID', 'pid', vartype='pulldown', pulldown_objs='properties',
pulldown_type_limit=shell_ptypes),
Var('Nodes', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 1', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 2', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 3', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 4', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
Var('Theta/Material Coord', 'theta_mcid', vartype='lineedit'),
Var('Z Offset', 'zoffset', vartype='lineedit'),
],
'CTRIA3' : [
Var('Element ID', 'eid', enabled=False),
Var('Property ID', 'pid', vartype='pulldown', pulldown_objs='properties',
pulldown_type_limit=shell_ptypes),
Var('Nodes', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
Var('Theta/Material Coord', 'theta_mcid', vartype='lineedit'),
Var('Z Offset', 'zoffset', vartype='lineedit'),
],
'CBAR' : [
Var('Element ID', 'eid', enabled=False),
Var('Property ID', 'pid', vartype='pulldown', pulldown_objs='properties',
pulldown_type_limit=bar_ptypes, ),
Var('Node 1', 'ga', vartype='pulldown', pulldown_objs='nodes'),
Var('Node 2', 'gb', vartype='pulldown', pulldown_objs='nodes'),
Var('OFFT', 'offt', vartype='pulldown',
pulldown_objs=['GGG', 'GOO', 'BOO']),
Var('g0', 'g0', vartype='pulldown', pulldown_objs='nodes', required=False),
Var('x', 'x', vartype='lineedit'),
Var('wa', 'wa', vartype='lineedit'),
Var('wb', 'wa', vartype='lineedit'),
],
'CBEAM' : [
#bit : None
#is_bit : False
#is_offt : True
Var('Element ID', 'eid', enabled=False),
Var('Property ID', 'pid', vartype='pulldown', pulldown_objs='properties',
pulldown_type_limit=beam_ptypes, ),
Var('Node 1', 'ga', vartype='pulldown', pulldown_objs='nodes'),
Var('Node 2', 'gb', vartype='pulldown', pulldown_objs='nodes'),
Var('OFFT', 'offt', vartype='pulldown',
pulldown_objs=['GGG', 'GOO', 'BOO']),
Var('g0', 'g0', vartype='pulldown', pulldown_objs='nodes', required=False),
Var('x', 'x', vartype='lineedit'),
Var('Pin A, pa', 'pa', vartype='lineedit'),
Var('Pin B, pa', 'pa', vartype='lineedit'),
Var('Warping A, sa', 'sa', vartype='lineedit'),
Var('Warping B, sb', 'sb', vartype='lineedit'),
Var('wa', 'wa', vartype='lineedit'),
Var('wb', 'wa', vartype='lineedit'),
],
}
MASSES_MAP = {
'CONM2' : [
Var('Element ID', 'eid', enabled=False),
Var('Node', 'nid', vartype='pulldown', pulldown_objs='nodes'),
Var('Coord', 'cid', vartype='pulldown', pulldown_objs='coords'),
Var('Offset', 'X', vartype='lineedit'),
Var('Mass', 'mass', vartype='lineedit'),
Var('I, Inertia', 'I', vartype='lineedit'),
],
}
bar_types = [
'ROD', 'TUBE', 'TUBE2', 'I', 'CHAN', 'T', 'BOX', 'BAR', 'CROSS', 'H', 'T1',
'I1', 'CHAN1', 'Z', 'CHAN2', 'T2', 'BOX1', 'HEXA', 'HAT', 'HAT1', 'DBOX', 'L']
#shell_mtypes = ['MAT1', 'MAT2', 'MAT8']
PROPERTIES_MAP = {
'PSHELL' : [
Var('Property ID', 'pid', enabled=False),
[
Var('Material ID 1', 'mid1', pulldown_objs='materials', required=False),
Var('Thickness', 't', vartype='lineedit', required=False),
],
[
Var('Material ID 2', 'mid2', pulldown_objs='materials', required=False),
Var('12I/t^3', 'twelveIt3', vartype='lineedit', required=False),
],
[
Var('Material ID 3', 'mid3', pulldown_objs='materials', required=False),
Var('ts/t', 'tst', vartype='lineedit', required=False),
],
Var('Material ID 4', 'mid4', pulldown_objs='materials', required=False),
Var('z1', 'z1', vartype='lineedit', required=False),
#Var('z2', 'z2', vartype='lineedit', required=False),
Var('nsm', 'nsm', vartype='lineedit', required=False),
],
'PCOMP' : [
Var('Property ID', 'pid', enabled=False),
TransposedVars([
Var('Material ID', 'mids', vartype='pulldown', pulldown_objs='materials'),
Var('Material Angle, Theta', 'thetas'),
Var('Thickness', 'thicknesses'),
Var('SOUT', 'souts', vartype='pulldown', pulldown_objs=['YES', 'NO']),
]),
Var('TRef', 'tref', vartype='lineedit'),
Var('lam', 'lam', vartype='lineedit', required=False),
Var('ft', 'ft', vartype='lineedit', required=False),
Var('sb', 'sb', vartype='lineedit', required=False),
Var('z0', 'z0', vartype='lineedit'),
Var('Damping', 'ge', vartype='lineedit'),
Var('nsm', 'nsm', vartype='lineedit', required=False),
],
'PBARL' : [
Var('Property ID', 'pid', enabled=False),
Var('Material ID 1', 'mid', vartype='pulldown', pulldown_objs='materials',
pulldown_type_limit=['MAT1'], ),
Var('Bar Type', 'beam_type', vartype='pulldown', pulldown_objs=bar_types, enabled=False),
Var('Dimensions', 'dim', vartype='lineedit', required=False),
Var('Group', 'group', vartype='lineedit', required=False),
Var('nsm', 'nsm', vartype='lineedit', required=False),
],
'PBAR' : [
Var('Property ID', 'pid', enabled=False),
Var('Material ID', 'mid', vartype='pulldown', pulldown_objs='materials',
pulldown_type_limit=['MAT1'], ),
[
Var('Area', 'A', vartype='lineedit'),
Var('I1', 'i1', vartype='lineedit'),
Var('I2', 'i2', vartype='lineedit'),
Var('I12', 'i12', vartype='lineedit'),
Var('J', 'j', vartype='lineedit'),
],
[
Var('C1', 'c1', vartype='lineedit'),
Var('D1', 'd1', vartype='lineedit'),
Var('E1', 'e1', vartype='lineedit'),
Var('F1', 'f1', vartype='lineedit'),
],
[
Var('C2', 'c2', vartype='lineedit'),
Var('D2', 'd2', vartype='lineedit'),
Var('E2', 'e2', vartype='lineedit'),
Var('F2', 'f2', vartype='lineedit'),
],
[
Var('K1', 'k1', vartype='lineedit'),
Var('K2', 'k2', vartype='lineedit'),
],
Var('nsm', 'nsm', vartype='lineedit', required=False),
],
'PBEAML' : [
Var('Property ID', 'pid', enabled=False),
Var('Material ID', 'mid', vartype='pulldown', pulldown_objs='materials',
pulldown_type_limit=['MAT1'], ),
Var('Beam Type', 'beam_type', vartype='pulldown', pulldown_objs=bar_types, enabled=False),
Var('Group', 'group', vartype='lineedit', required=False),
#TransposedVars([
Var('SOUT', 'so', vartype='pulldown', pulldown_objs=['YES', 'NO']),
Var('x/xb', 'xxb', vartype='lineedit'),
Var('nsm', 'nsm', vartype='lineedit', required=False),
Var('Dimensions', 'dim', vartype='lineedit_table'),
],
}
MATERIALS_MAP = {
'MAT1' : [
Var('Material ID', 'mid', enabled=False),
Var("E, Young's Modulus", 'e', vartype='lineedit', required=False),
Var("G, Shear Modulus", 'g', vartype='lineedit', required=False),
Var("nu, Poisson's Ratio", 'nu', vartype='lineedit', required=False),
Var('TRef', 'tref', vartype='lineedit'),
Var('Damping', 'ge', vartype='lineedit'),
Var('Density', 'rho', vartype='lineedit'),
Var('Material Coord', 'mcsid', vartype='lineedit'),
],
'MAT8' : [
Var('Material ID', 'mid', enabled=False),
Var("E11", 'e11', vartype='lineedit'),
Var("E22", 'e22', vartype='lineedit'),
[
Var("G12", 'g12', vartype='lineedit'),
Var("G1z", 'g1z', vartype='lineedit'),
Var("G2z", 'g2z', vartype='lineedit'),
],
Var("nu12", 'nu12', vartype='lineedit'),
Var('Density', 'rho', vartype='lineedit'),
Var('Damping', 'ge', vartype='lineedit'),
Var('TRef', 'tref', vartype='lineedit'),
Var('A1', 'a1', vartype='lineedit'),
Var('A2', 'a2', vartype='lineedit'),
[
Var('Xt', 'Xt', vartype='lineedit'),
Var('Xc', 'Xc', vartype='lineedit'),
Var('Yt', 'Yt', vartype='lineedit'),
Var('Yc', 'Yc', vartype='lineedit'),
Var('S', 'S', vartype='lineedit'),
Var('F12', 'F12', vartype='lineedit'),
],
Var('strn', 'strn', vartype='lineedit'),
],
}
CAERO_MAP = {
'CAERO1': [
Var('Element ID', 'eid', enabled=False),
Var('Property ID', 'pid', vartype='pulldown', pulldown_objs='paeros'),
Var('iGroup', 'igroup'),
[
Var('nSpan Boxes', 'nspan', vartype='spinner'),
Var('AEFACT Span', 'lspan', vartype='pulldown', pulldown_objs='aefacts', pulldown_allow_zero=True),
],
[
Var('nChord Boxes', 'nchord', vartype='spinner'),
Var('AEFACT Chord', 'lchord', vartype='pulldown', pulldown_objs='aefacts', pulldown_allow_zero=True),
],
Var('Point 1', 'p1'),
Var('Distance 12', 'x12'),
Var('Point 4', 'p4'),
Var('Distance 43', 'x43'),
],
}
MODIFY_MAP = dict(chain(
ELEMENTS_MAP.items(),
MASSES_MAP.items(),
PROPERTIES_MAP.items(),
MATERIALS_MAP.items(),
CAERO_MAP.items(),
))
UPDATE_MAP = {
#'GRID' : 'update_grid',
#'CONROD' : 'update_element',
'CAERO1' : 'update_caeros',
}
|
utils/logs.py | VitorDominguesR/Astra | 1,986 | 12752324 | <reponame>VitorDominguesR/Astra<filename>utils/logs.py
import logging
import os
if os.getcwd().split('/')[-1] == 'API':
path = '../logs/scan.log'
else:
path = 'logs/scan.log'
logger = logging.getLogger()
fh = logging.FileHandler(path)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
#logging.basicConfig(filename=path, level=logging.INFO) |
ice/error/human_readable_error.py | reavessm/Ice | 578 | 12752328 |
class HumanReadableError(Exception):
pass
|
InnerEye-DataQuality/InnerEyeDataQuality/selection/simulation_statistics.py | faz1993/InnerEye-DeepLearning | 402 | 12752331 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Tuple, List, Dict, Any
import numpy as np
from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation
STAT_FIELDS = ["relabelling_score", "ambiguity", "label_correctness"]
@dataclass(frozen=True)
class SelectionType(Enum):
"""
Defines the 5 possible types of selections that can be made in an iteration
"""
MISLABELLED_CASE_SELECTED_CORRECTED = 1
MISLABELLED_CASE_SELECTED_NOT_CORRECTED = 2
AMBIGUOUS_CASE_SELECTED_CORRECTED = 3
AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED = 4
CLEAN_CASE_SELECTED = 5
def compute_selection_type_of_current_iter(sample_id: int,
true_ambiguous_cases: np.ndarray,
true_label_counts: np.ndarray,
mislabelled_ids_current: np.ndarray,
ambiguous_case_ids_current: np.ndarray,
mislabelled_ids_prev: np.ndarray,
ambiguous_case_ids_prev: np.ndarray) -> SelectionType:
"""
Compute the type of selection that occurred between the previous and current iteration.
:param sample_id: The sample id.
:param true_ambiguous_cases: The ids for the true ambiguous samples.
:param true_label_counts: The label counts for the true label distribution.
:param mislabelled_ids_current: The ids for the current iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_current: The ids for the current iteration remaining ambiguous mislabelled samples.
:param mislabelled_ids_prev: The ids for the previous iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_prev: The ids for the previous iteration remaining ambiguous mislabelled samples.
:return: An enum representing the selection type that occurred between the previous and current iteration.
"""
if sample_id in true_ambiguous_cases:
if len(set(ambiguous_case_ids_prev) - set(ambiguous_case_ids_current)) > 0:
return SelectionType.AMBIGUOUS_CASE_SELECTED_CORRECTED
else:
return SelectionType.AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED
else:
if len(set(mislabelled_ids_prev) - set(mislabelled_ids_current)) > 0:
return SelectionType.MISLABELLED_CASE_SELECTED_CORRECTED
elif len(np.unique(np.where(true_label_counts[sample_id])[0])) == 1:
return SelectionType.CLEAN_CASE_SELECTED
else:
return SelectionType.MISLABELLED_CASE_SELECTED_NOT_CORRECTED
def get_mislabelled_sample_ids(true_label_counts: np.ndarray, current_label_counts: np.ndarray) -> np.ndarray:
"""
Compute which samples are mislabelled.
:param true_label_counts: The label counts for the true label distribution.
:param current_label_counts: The label counts for the current distribution.
:return: An array with the ids of the mislabeled samples (majority voting)
"""
true_class = np.argmax(true_label_counts, axis=1)
current_class = np.argmax(current_label_counts, axis=1)
return np.where(true_class != current_class)
def get_ambiguous_sample_ids(true_label_counts: np.ndarray, threshold: float = 0.30) -> np.ndarray:
"""
Compute which samples are ambiguous
:param true_label_counts: The label counts for the true label distribution.
:param threshold: The label entropy threshold above which a sample is considered ambiguous
:return: An array with the ids of the ambiguous samples
"""
label_entropy = compute_label_entropy(true_label_counts)
return np.where(label_entropy > threshold)[0]
class SimulationStats:
"""
A class that keeps track of statistics/metrics during the simulation
"""
def __init__(self, name: str, true_label_counts: np.ndarray, initial_labels: np.ndarray):
"""
:param name: The name of the simulation
:param true_label_counts: The label counts for the true label distribution
np.ndarray [num_samples x num_classes]
:param initial_labels: The initial label counts, np.ndarray [num_samples x num_classes]
"""
self.name = name
self.initial_labels = np.copy(initial_labels)
self.true_label_counts = true_label_counts
self.true_ambiguous_cases = get_ambiguous_sample_ids(true_label_counts)
self.true_distribution = true_label_counts / np.sum(true_label_counts, axis=-1, keepdims=True)
self.selected_sample_id: List[int] = list()
self.num_fetches: List[int] = list()
self.accuracy: List[float] = list()
self.avg_total_variation: List[float] = list()
self.selection_type: List[SelectionType] = list()
self.selector_stats: Dict[str, Any] = {key: list() for key in STAT_FIELDS}
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(initial_labels)
self.mislabelled_not_ambiguous_sample_ids = [mislabelled_ids_current]
self.mislabelled_ambiguous_sample_ids = [ambiguous_case_ids_current]
self.num_initial_mislabelled_not_ambiguous = self.mislabelled_not_ambiguous_sample_ids[0].size
self.num_initial_mislabelled_ambiguous = self.mislabelled_ambiguous_sample_ids[0].size
self.num_remaining_mislabelled_not_ambiguous: List[int] = list()
self.num_remaining_mislabelled_ambiguous: List[int] = list()
def get_noisy_and_ambiguous_cases(self, current_label_counts: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute which of the current labels are still mislabelled, separate the former into ambiguous and not ambiguous
samples
:param current_label_counts: The label counts of the current iteration
:return: A tuple containing an array with the current mislabelled not ambiguous sample ids and an array with
the current mislabelled ambiguous sample ids.
"""
# Find the potential label noise and ambiguous cases
label_mismatch_ids_current = get_mislabelled_sample_ids(self.true_label_counts, current_label_counts)
# Split the label mismatch cases into ambiguous and clear label noise types
mislabelled_ids_current = np.setdiff1d(label_mismatch_ids_current, self.true_ambiguous_cases)
ambiguous_case_ids_current = np.array(np.intersect1d(label_mismatch_ids_current, self.true_ambiguous_cases))
return mislabelled_ids_current, ambiguous_case_ids_current
def record_selector_stats(self, selector_stats: Dict[str, Any]) -> None:
"""
"""
if len(selector_stats) == 0:
return
for key in STAT_FIELDS:
if key in selector_stats:
self.selector_stats[key].append(selector_stats[key])
def record_iteration(self, selected_sample_id: int, num_fetches: int, current_label_counts: np.ndarray) -> None:
"""
:param selected_sample_id: The sample id that was selected at this iteration
:param num_fetches: The number of fetches (relabels) it took to achieve a majority
:param current_label_counts: The labels counts for the current iteration
:return:
"""
self.selected_sample_id.append(selected_sample_id)
self.num_fetches.append(num_fetches)
self.accuracy.append(compute_accuracy(current_label_counts, self.true_label_counts))
current_distribution = current_label_counts / np.sum(current_label_counts, axis=-1, keepdims=True)
self.avg_total_variation.append(np.nanmean(total_variation(self.true_distribution, current_distribution)))
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(current_label_counts)
mislabelled_ids_prev = self.mislabelled_not_ambiguous_sample_ids[-1]
ambiguous_case_ids_prev = self.mislabelled_ambiguous_sample_ids[-1]
selection_type = compute_selection_type_of_current_iter(selected_sample_id,
self.true_ambiguous_cases,
self.true_label_counts,
mislabelled_ids_current, ambiguous_case_ids_current,
mislabelled_ids_prev, ambiguous_case_ids_prev)
self.selection_type.append(selection_type)
self.num_remaining_mislabelled_not_ambiguous.append(len(mislabelled_ids_current))
self.num_remaining_mislabelled_ambiguous.append(len(ambiguous_case_ids_current))
self.mislabelled_not_ambiguous_sample_ids.append(mislabelled_ids_current)
self.mislabelled_ambiguous_sample_ids.append(ambiguous_case_ids_current)
def log_last_iter(self) -> None:
"""
Log the statistics of the last iteration
:return: None
"""
logging.info(f"Method: {self.name}, selected_id: {self.selected_sample_id[-1]} "
f"accuracy: {self.accuracy[-1]}")
logging.info(f"Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} "
f"and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}")
class SimulationStatsDistribution(object):
"""
A class that takes a list of simulation statistics and creates a distribution over them.
"""
def __init__(self, simulation_stats_list: List[SimulationStats]):
"""
:param simulation_stats_list: A list of SimulationStats objects
"""
self.simulation_stats = simulation_stats_list
end_point = max([np.max(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
start_point = min([np.min(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
self.num_initial_mislabelled_not_ambiguous = simulation_stats_list[0].num_initial_mislabelled_not_ambiguous
self.num_initial_mislabelled_ambiguous = simulation_stats_list[0].num_initial_mislabelled_ambiguous
self.name = simulation_stats_list[0].name
self.num_fetches = np.arange(start_point, end_point)
self.accuracy = self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list, 'accuracy')
self.avg_total_variation = self._interpolate_and_make_dist_array(
self.num_fetches, simulation_stats_list, 'avg_total_variation')
self.num_remaining_mislabelled_not_ambiguous =\
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_not_ambiguous')
self.num_remaining_mislabelled_ambiguous = \
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_ambiguous')
@staticmethod
def _interpolate_and_make_dist_array(num_fetches: np.ndarray,
simulation_stats_list: List[SimulationStats],
fp_attr_name: str) -> np.ndarray:
return np.array([np.interp(num_fetches, np.cumsum(sim_stats.num_fetches),
sim_stats.__getattribute__(fp_attr_name)) for sim_stats in simulation_stats_list])
|
BiliVideoChecker.py | SettingDust/DDRecorder | 175 | 12752365 | <filename>BiliVideoChecker.py
import datetime
import logging
import os
import time
import threading
import requests
import urllib3
import utils
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class BiliVideoChecker(threading.Thread):
def __init__(self, bvid: str, path: str, config: dict):
threading.Thread.__init__(self)
default_headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-TW;q=0.2',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 '
}
self.headers = {**default_headers, **
config.get('root', {}).get('request_header', {})}
self.session = requests.session()
self.bvid = bvid
self.path = path
self.config = config
self.check_url = "https://api.bilibili.com/x/web-interface/view"
self.check_interval = config['root']['check_interval']
def common_request(self, method: str, url: str, params: dict = None, data: dict = None) -> requests.Response:
connection = None
if method == 'GET':
connection = self.session.get(
url, headers=self.headers, params=params, verify=False)
if method == 'POST':
connection = self.session.post(
url, headers=self.headers, params=params, data=data, verify=False)
return connection
def run(self) -> None:
logging.basicConfig(level=utils.get_log_level(self.config),
format='%(asctime)s %(thread)d %(threadName)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
handlers=[logging.FileHandler(os.path.join(self.config.get('root', {}).get('logger', {}).get('log_path', "./log"), "VideoChecker_"+datetime.datetime.now(
).strftime('%Y-%m-%d_%H-%M-%S')+'.log'), "a", encoding="utf-8")])
while True:
video_info = self.common_request("GET", self.check_url, {
'bvid': self.bvid
}).json()
try:
if video_info['code'] == 0 and video_info['data']['state'] == 0:
logging.info("稿件%s 已开放浏览,准备删除 %s", self.bvid, self.path)
utils.del_files_and_dir(self.path)
return
else:
logging.info("稿件%s 未开放浏览", self.bvid)
time.sleep(self.check_interval)
except KeyError:
pass
|
__init__.py | federicozaiter/LogClass | 159 | 12752370 | __all__ = ["utils", "logclass"]
from .preprocess import *
from .feature_engineering import *
from .models import *
from .reporting import *
|
sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/models/update_kb_operation_dto_delete.py | rsdoherty/azure-sdk-for-python | 2,728 | 12752379 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .delete_kb_contents_dto import DeleteKbContentsDTO
class UpdateKbOperationDTODelete(DeleteKbContentsDTO):
"""An instance of DeleteKbContentsDTO for delete Operation.
:param ids: List of Qna Ids to be deleted
:type ids: list[int]
:param sources: List of sources to be deleted from knowledgebase.
:type sources: list[str]
"""
_attribute_map = {
'ids': {'key': 'ids', 'type': '[int]'},
'sources': {'key': 'sources', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(UpdateKbOperationDTODelete, self).__init__(**kwargs)
|
corehq/ex-submodules/dimagi/utils/data/deid_generator.py | dimagilg/commcare-hq | 471 | 12752390 | <reponame>dimagilg/commcare-hq<filename>corehq/ex-submodules/dimagi/utils/data/deid_generator.py
import hashlib
from functools import reduce
def to_number(bytes):
return reduce(lambda a, b: a * 256 + b, bytes)
def to_base(n, b):
if not n:
return []
else:
rest, digit = divmod(n, b)
answer = to_base(rest, b)
answer.append(digit)
return answer
class DeidGenerator(object):
def __init__(self, seed, salt, bytes=8):
assert(bytes < 20)
self.seed = "%s:%s" % (seed, salt)
self.bytes = bytes
self.number = self._get_number()
def _get_number(self):
return to_number(self._sha1_bytes())
def _sha1_bytes(self):
byte_list = hashlib.sha1(self.seed.encode('utf-8')).digest()
yield from byte_list[:self.bytes]
def digest(self, alphabet="0123456789"):
b = len(alphabet)
answer = [alphabet[i] for i in to_base(self.number, b)]
if isinstance(alphabet, str):
answer = ''.join(answer)
return answer
def random_hash(self):
"""Generate a 'random' hash of 10 alphanumerics (ALL CAPS)"""
id = self.digest("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")[-10:]
while len(id) < 10:
id = "0" + id
return id
def random_number(self, low, high):
"""Generate a 'random' number such that low <= n < high"""
return self.digest(list(range(low, high)))[-1]
|
SimpleCV/Camera.py | M93Pragya/SimpleCV | 1,686 | 12752407 | # SimpleCV Cameras & Devices
#load system libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import Image, ImageSet, ColorSpace
from SimpleCV.Display import Display
from SimpleCV.Color import Color
from collections import deque
import time
import ctypes as ct
import subprocess
import cv2
import numpy as np
import traceback
import sys
#Globals
_cameras = []
_camera_polling_thread = ""
_index = []
class FrameBufferThread(threading.Thread):
"""
**SUMMARY**
This is a helper thread which continually debuffers the camera frames. If
you don't do this, cameras may constantly give you a frame behind, which
causes problems at low sample rates. This makes sure the frames returned
by your camera are fresh.
"""
def run(self):
global _cameras
while (1):
for cam in _cameras:
if cam.pygame_camera:
cam.pygame_buffer = cam.capture.get_image(cam.pygame_buffer)
else:
cv.GrabFrame(cam.capture)
cam._threadcapturetime = time.time()
time.sleep(0.04) #max 25 fps, if you're lucky
class FrameSource:
"""
**SUMMARY**
An abstract Camera-type class, for handling multiple types of video input.
Any sources of images inheirit from it
"""
_calibMat = "" #Intrinsic calibration matrix
_distCoeff = "" #Distortion matrix
_threadcapturetime = '' #when the last picture was taken
capturetime = '' #timestamp of the last aquired image
def __init__(self):
return
def getProperty(self, p):
return None
def getAllProperties(self):
return {}
def getImage(self):
return None
def calibrate(self, imageList, grid_sz=0.03, dimensions=(8, 5)):
"""
**SUMMARY**
Camera calibration will help remove distortion and fisheye effects
It is agnostic of the imagery source, and can be used with any camera
The easiest way to run calibration is to run the
calibrate.py file under the tools directory for SimpleCV.
This will walk you through the calibration process.
**PARAMETERS**
* *imageList* - is a list of images of color calibration images.
* *grid_sz* - is the actual grid size of the calibration grid, the unit used will be
the calibration unit value (i.e. if in doubt use meters, or U.S. standard)
* *dimensions* - is the the count of the *interior* corners in the calibration grid.
So for a grid where there are 4x4 black grid squares has seven interior corners.
**RETURNS**
The camera's intrinsic matrix.
**EXAMPLE**
See :py:module:calibrate.py
"""
# This routine was adapted from code originally written by:
# <NAME> -- <EMAIL>
# See: https://github.com/abidrahmank/OpenCV-Python/blob/master/Other_Examples/camera_calibration.py
warn_thresh = 1
n_boards = 0 #no of boards
board_w = int(dimensions[0]) # number of horizontal corners
board_h = int(dimensions[1]) # number of vertical corners
n_boards = int(len(imageList))
board_n = board_w * board_h # no of total corners
board_sz = (board_w, board_h) #size of board
if( n_boards < warn_thresh ):
logger.warning("FrameSource.calibrate: We suggest using 20 or more images to perform camera calibration!" )
# creation of memory storages
image_points = cv.CreateMat(n_boards * board_n, 2, cv.CV_32FC1)
object_points = cv.CreateMat(n_boards * board_n, 3, cv.CV_32FC1)
point_counts = cv.CreateMat(n_boards, 1, cv.CV_32SC1)
intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion_coefficient = cv.CreateMat(5, 1, cv.CV_32FC1)
# capture frames of specified properties and modification of matrix values
i = 0
z = 0 # to print number of frames
successes = 0
imgIdx = 0
# capturing required number of views
while(successes < n_boards):
found = 0
img = imageList[imgIdx]
(found, corners) = cv.FindChessboardCorners(img.getGrayscaleMatrix(), board_sz,
cv.CV_CALIB_CB_ADAPTIVE_THRESH |
cv.CV_CALIB_CB_FILTER_QUADS)
corners = cv.FindCornerSubPix(img.getGrayscaleMatrix(), corners,(11, 11),(-1, -1),
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
# if got a good image,draw chess board
if found == 1:
corner_count = len(corners)
z = z + 1
# if got a good image, add to matrix
if len(corners) == board_n:
step = successes * board_n
k = step
for j in range(board_n):
cv.Set2D(image_points, k, 0, corners[j][0])
cv.Set2D(image_points, k, 1, corners[j][1])
cv.Set2D(object_points, k, 0, grid_sz*(float(j)/float(board_w)))
cv.Set2D(object_points, k, 1, grid_sz*(float(j)%float(board_w)))
cv.Set2D(object_points, k, 2, 0.0)
k = k + 1
cv.Set2D(point_counts, successes, 0, board_n)
successes = successes + 1
# now assigning new matrices according to view_count
if( successes < warn_thresh ):
logger.warning("FrameSource.calibrate: You have %s good images for calibration we recommend at least %s" % (successes, warn_thresh))
object_points2 = cv.CreateMat(successes * board_n, 3, cv.CV_32FC1)
image_points2 = cv.CreateMat(successes * board_n, 2, cv.CV_32FC1)
point_counts2 = cv.CreateMat(successes, 1, cv.CV_32SC1)
for i in range(successes * board_n):
cv.Set2D(image_points2, i, 0, cv.Get2D(image_points, i, 0))
cv.Set2D(image_points2, i, 1, cv.Get2D(image_points, i, 1))
cv.Set2D(object_points2, i, 0, cv.Get2D(object_points, i, 0))
cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1))
cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2))
for i in range(successes):
cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0))
cv.Set2D(intrinsic_matrix, 0, 0, 1.0)
cv.Set2D(intrinsic_matrix, 1, 1, 1.0)
rcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
tcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
# camera calibration
cv.CalibrateCamera2(object_points2, image_points2, point_counts2,
(img.width, img.height), intrinsic_matrix,distortion_coefficient,
rcv, tcv, 0)
self._calibMat = intrinsic_matrix
self._distCoeff = distortion_coefficient
return intrinsic_matrix
def getCameraMatrix(self):
"""
**SUMMARY**
This function returns a cvMat of the camera's intrinsic matrix.
If there is no matrix defined the function returns None.
"""
return self._calibMat
def undistort(self, image_or_2darray):
"""
**SUMMARY**
If given an image, apply the undistortion given by the camera's matrix and return the result.
If given a 1xN 2D cvmat or a 2xN numpy array, it will un-distort points of
measurement and return them in the original coordinate system.
**PARAMETERS**
* *image_or_2darray* - an image or an ndarray.
**RETURNS**
The undistorted image or the undistorted points. If the camera is un-calibrated
we return None.
**EXAMPLE**
>>> img = cam.getImage()
>>> result = cam.undistort(img)
"""
if(type(self._calibMat) != cv.cvmat or type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.undistort: This operation requires calibration, please load the calibration matrix")
return None
if (type(image_or_2darray) == InstanceType and image_or_2darray.__class__ == Image):
inImg = image_or_2darray # we have an image
retVal = inImg.getEmpty()
cv.Undistort2(inImg.getBitmap(), retVal, self._calibMat, self._distCoeff)
return Image(retVal)
else:
mat = ''
if (type(image_or_2darray) == cv.cvmat):
mat = image_or_2darray
else:
arr = cv.fromarray(np.array(image_or_2darray))
mat = cv.CreateMat(cv.GetSize(arr)[1], 1, cv.CV_64FC2)
cv.Merge(arr[:, 0], arr[:, 1], None, None, mat)
upoints = cv.CreateMat(cv.GetSize(mat)[1], 1, cv.CV_64FC2)
cv.UndistortPoints(mat, upoints, self._calibMat, self._distCoeff)
#undistorted.x = (x* focalX + principalX);
#undistorted.y = (y* focalY + principalY);
return (np.array(upoints[:, 0]) *\
[self.getCameraMatrix()[0, 0], self.getCameraMatrix()[1, 1]] +\
[self.getCameraMatrix()[0, 2], self.getCameraMatrix()[1, 2]])[:, 0]
def getImageUndistort(self):
"""
**SUMMARY**
Using the overridden getImage method we retrieve the image and apply the undistortion
operation.
**RETURNS**
The latest image from the camera after applying undistortion.
**EXAMPLE**
>>> cam = Camera()
>>> cam.loadCalibration("mycam.xml")
>>> while True:
>>> img = cam.getImageUndistort()
>>> img.show()
"""
return self.undistort(self.getImage())
def saveCalibration(self, filename):
"""
**SUMMARY**
Save the calibration matrices to file. The file name should be without the extension.
The default extension is .xml.
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was saved , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
if( type(self._calibMat) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration matrix present, can't save.")
else:
intrFName = filename + "Intrinsic.xml"
cv.Save(intrFName, self._calibMat)
if( type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration distortion present, can't save.")
else:
distFName = filename + "Distortion.xml"
cv.Save(distFName, self._distCoeff)
return None
def loadCalibration(self, filename):
"""
**SUMMARY**
Load a calibration matrix from file.
The filename should be the stem of the calibration files names.
e.g. If the calibration files are MyWebcamIntrinsic.xml and MyWebcamDistortion.xml
then load the calibration file "MyWebcam"
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was loaded , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
retVal = False
intrFName = filename + "Intrinsic.xml"
self._calibMat = cv.Load(intrFName)
distFName = filename + "Distortion.xml"
self._distCoeff = cv.Load(distFName)
if( type(self._distCoeff) == cv.cvmat
and type(self._calibMat) == cv.cvmat):
retVal = True
return retVal
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
**EXAMPLE**
To use it's as simple as:
>>> cam = Camera()
>>> cam.live()
Left click will show mouse coordinates and color
Right click will kill the live image
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self.getImage()
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self.getImage()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
class Camera(FrameSource):
"""
**SUMMARY**
The Camera class is the class for managing input from a basic camera. Note
that once the camera is initialized, it will be locked from being used
by other processes. You can check manually if you have compatible devices
on linux by looking for /dev/video* devices.
This class wrappers OpenCV's cvCapture class and associated methods.
Read up on OpenCV's CaptureFromCAM method for more details if you need finer
control than just basic frame retrieval
"""
capture = "" #cvCapture object
thread = ""
pygame_camera = False
pygame_buffer = ""
prop_map = {"width": cv.CV_CAP_PROP_FRAME_WIDTH,
"height": cv.CV_CAP_PROP_FRAME_HEIGHT,
"brightness": cv.CV_CAP_PROP_BRIGHTNESS,
"contrast": cv.CV_CAP_PROP_CONTRAST,
"saturation": cv.CV_CAP_PROP_SATURATION,
"hue": cv.CV_CAP_PROP_HUE,
"gain": cv.CV_CAP_PROP_GAIN,
"exposure": cv.CV_CAP_PROP_EXPOSURE}
#human readable to CV constant property mapping
def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibrationfile = ''):
global _cameras
global _camera_polling_thread
global _index
"""
**SUMMARY**
In the camera constructor, camera_index indicates which camera to connect to
and props is a dictionary which can be used to set any camera attributes
Supported props are currently: height, width, brightness, contrast,
saturation, hue, gain, and exposure.
You can also specify whether you want the FrameBufferThread to continuously
debuffer the camera. If you specify True, the camera is essentially 'on' at
all times. If you specify off, you will have to manage camera buffers.
**PARAMETERS**
* *camera_index* - The index of the camera, these go from 0 upward, and are system specific.
* *prop_set* - The property set for the camera (i.e. a dict of camera properties).
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
* *threaded* - If True we constantly debuffer the camera, otherwise the user
must do this manually.
* *calibrationfile* - A calibration file to load.
"""
self.index = None
self.threaded = False
self.capture = None
if platform.system() == "Linux" and -1 in _index and camera_index != -1 and camera_index not in _index:
process = subprocess.Popen(["lsof /dev/video"+str(camera_index)],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = -1
elif platform.system() == "Linux" and camera_index == -1 and -1 not in _index:
process = subprocess.Popen(["lsof /dev/video*"],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = int(data[0].split("\n")[1].split()[-1][-1])
for cam in _cameras:
if camera_index == cam.index:
self.threaded = cam.threaded
self.capture = cam.capture
self.index = cam.index
_cameras.append(self)
return
#This is to add support for XIMEA cameras.
if isinstance(camera_index, str):
if camera_index.lower() == 'ximea':
camera_index = 1100
_index.append(camera_index)
self.capture = cv.CaptureFromCAM(camera_index) #This fixes bug with opencv not being able to grab frames from webcams on linux
self.index = camera_index
if "delay" in prop_set:
time.sleep(prop_set['delay'])
if platform.system() == "Linux" and (prop_set.has_key("height") or cv.GrabFrame(self.capture) == False):
import pygame.camera
pygame.camera.init()
threaded = True #pygame must be threaded
if camera_index == -1:
camera_index = 0
self.index = camera_index
_index.append(camera_index)
print _index
if(prop_set.has_key("height") and prop_set.has_key("width")):
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index), (prop_set['width'], prop_set['height']))
else:
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index))
try:
self.capture.start()
except Exception as exc:
msg = "caught exception: %r" % exc
logger.warning(msg)
logger.warning("SimpleCV can't seem to find a camera on your system, or the drivers do not work with SimpleCV.")
return
time.sleep(0)
self.pygame_buffer = self.capture.get_image()
self.pygame_camera = True
else:
_index.append(camera_index)
self.threaded = False
if (platform.system() == "Windows"):
threaded = False
if (not self.capture):
return None
#set any properties in the constructor
for p in prop_set.keys():
if p in self.prop_map:
cv.SetCaptureProperty(self.capture, self.prop_map[p], prop_set[p])
if (threaded):
self.threaded = True
_cameras.append(self)
if (not _camera_polling_thread):
_camera_polling_thread = FrameBufferThread()
_camera_polling_thread.daemon = True
_camera_polling_thread.start()
time.sleep(0) #yield to thread
if calibrationfile:
self.loadCalibration(calibrationfile)
#todo -- make these dynamic attributes of the Camera class
def getProperty(self, prop):
"""
**SUMMARY**
Retrieve the value of a given property, wrapper for cv.GetCaptureProperty
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
**PARAMETERS**
* *prop* - The property to retrive.
**RETURNS**
The specified property. If it can't be found the method returns False.
**EXAMPLE**
>>> cam = Camera()
>>> prop = cam.getProperty("width")
"""
if self.pygame_camera:
if prop.lower() == 'width':
return self.capture.get_size()[0]
elif prop.lower() == 'height':
return self.capture.get_size()[1]
else:
return False
if prop in self.prop_map:
return cv.GetCaptureProperty(self.capture, self.prop_map[prop])
return False
def getAllProperties(self):
"""
**SUMMARY**
Return all properties from the camera.
**RETURNS**
A dict of all the camera properties.
"""
if self.pygame_camera:
return False
props = {}
for p in self.prop_map:
props[p] = self.getProperty(p)
return props
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera. If you experience problems
with stale frames from the camera's hardware buffer, increase the flushcache
number to dequeue multiple frames before retrieval
We're working on how to solve this problem.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().show()
"""
if self.pygame_camera:
return Image(self.pygame_buffer.copy())
if (not self.threaded):
cv.GrabFrame(self.capture)
self.capturetime = time.time()
else:
self.capturetime = self._threadcapturetime
frame = cv.RetrieveFrame(self.capture)
newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, newimg)
return Image(newimg, self)
class VirtualCamera(FrameSource):
"""
**SUMMARY**
The virtual camera lets you test algorithms or functions by providing
a Camera object which is not a physically connected device.
Currently, VirtualCamera supports "image", "imageset" and "video" source types.
**USAGE**
* For image, pass the filename or URL to the image
* For the video, the filename
* For imageset, you can pass either a path or a list of [path, extension]
* For directory you treat a directory to show the latest file, an example would be where a security camera logs images to the directory, calling .getImage() will get the latest in the directory
"""
source = ""
sourcetype = ""
lastmtime = 0
def __init__(self, s, st, start=1):
"""
**SUMMARY**
The constructor takes a source, and source type.
**PARAMETERS**
* *s* - the source of the imagery.
* *st* - the type of the virtual camera. Valid strings include:
* *start* - the number of the frame that you want to start with.
* "image" - a single still image.
* "video" - a video file.
* "imageset" - a SimpleCV image set.
* "directory" - a VirtualCamera for loading a directory
**EXAMPLE**
>>> vc = VirtualCamera("img.jpg", "image")
>>> vc = VirtualCamera("video.mpg", "video")
>>> vc = VirtualCamera("./path_to_images/", "imageset")
>>> vc = VirtualCamera("video.mpg", "video", 300)
>>> vc = VirtualCamera("./imgs", "directory")
"""
self.source = s
self.sourcetype = st
self.counter = 0
if start==0:
start=1
self.start = start
if self.sourcetype not in ["video", "image", "imageset", "directory"]:
print 'Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype
print '\tVirtualCamera("filename","video")'
print '\tVirtualCamera("filename","image")'
print '\tVirtualCamera("./path_to_images","imageset")'
print '\tVirtualCamera("./path_to_images","directory")'
return None
else:
if isinstance(self.source,str) and not os.path.exists(self.source):
print 'Error: In VirtualCamera()\n\t"%s" was not found.' % self.source
return None
if (self.sourcetype == "imageset"):
if( isinstance(s,ImageSet) ):
self.source = s
elif( isinstance(s,(list,str)) ):
self.source = ImageSet()
if (isinstance(s,list)):
self.source.load(*s)
else:
self.source.load(s)
else:
warnings.warn('Virtual Camera is unable to figure out the contents of your ImageSet, it must be a directory, list of directories, or an ImageSet object')
elif (self.sourcetype == 'video'):
self.capture = cv.CaptureFromFile(self.source)
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
elif (self.sourcetype == 'directory'):
pass
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the virtual camera.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = VirtualCamera()
>>> while True:
>>> cam.getImage().show()
"""
if (self.sourcetype == 'image'):
self.counter = self.counter + 1
return Image(self.source, self)
elif (self.sourcetype == 'imageset'):
print len(self.source)
img = self.source[self.counter % len(self.source)]
self.counter = self.counter + 1
return img
elif (self.sourcetype == 'video'):
# cv.QueryFrame returns None if the video is finished
frame = cv.QueryFrame(self.capture)
if frame:
img = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, img)
return Image(img, self)
else:
return None
elif (self.sourcetype == 'directory'):
img = self.findLastestImage(self.source, 'bmp')
self.counter = self.counter + 1
return Image(img, self)
def rewind(self, start=None):
"""
**SUMMARY**
Rewind the Video source back to the given frame.
Available for only video sources.
**PARAMETERS**
start - the number of the frame that you want to rewind to.
if not provided, the video source would be rewound
to the starting frame number you provided or rewound
to the beginning.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.rewind()
"""
if (self.sourcetype == 'video'):
if not start:
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
else:
if start==0:
start=1
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, start-1)
else:
self.counter = 0
def getFrame(self, frame):
"""
**SUMMARY**
Get the provided numbered frame from the video source.
Available for only video sources.
**PARAMETERS**
frame - the number of the frame
**RETURNS**
Image
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> cam.getFrame(400).show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, frame-1)
img = self.getImage()
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame)
return img
elif (self.sourcetype == 'imageset'):
img = None
if( frame < len(self.source)):
img = self.source[frame]
return img
else:
return None
def skipFrames(self, n):
"""
**SUMMARY**
Skip n number of frames.
Available for only video sources.
**PARAMETERS**
n - number of frames to be skipped.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getImage().show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame + n - 1)
elif (self.sourcetype == 'imageset'):
self.counter = (self.counter + n) % len(self.source)
else:
self.counter = self.counter + n
def getFrameNumber(self):
"""
**SUMMARY**
Get the current frame number of the video source.
Available for only video sources.
**RETURNS**
* *int* - number of the frame
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getFrameNumber()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
return number_frame
else:
return self.counter
def getCurrentPlayTime(self):
"""
**SUMMARY**
Get the current play time in milliseconds of the video source.
Available for only video sources.
**RETURNS**
* *int* - milliseconds of time from beginning of file.
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getCurrentPlayTime()
"""
if (self.sourcetype == 'video'):
milliseconds = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_MSEC))
return milliseconds
else:
raise ValueError('sources other than video do not have play time property')
def findLastestImage(self, directory='.', extension='png'):
"""
**SUMMARY**
This function finds the latest file in a directory
with a given extension.
**PARAMETERS**
directory - The directory you want to load images from (defaults to current directory)
extension - The image extension you want to use (defaults to .png)
**RETURNS**
The filename of the latest image
**USAGE**
>>> cam = VirtualCamera('imgs/', 'png') #find all .png files in 'img' directory
>>> cam.getImage() # Grab the latest image from that directory
"""
max_mtime = 0
max_dir = None
max_file = None
max_full_path = None
for dirname,subdirs,files in os.walk(directory):
for fname in files:
if fname.split('.')[-1] == extension:
full_path = os.path.join(dirname, fname)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_mtime = mtime
max_dir = dirname
max_file = fname
self.lastmtime = mtime
max_full_path = os.path.abspath(os.path.join(dirname, fname))
#if file is being written, block until mtime is at least 100ms old
while time.mktime(time.localtime()) - os.stat(max_full_path).st_mtime < 0.1:
time.sleep(0)
return max_full_path
class Kinect(FrameSource):
"""
**SUMMARY**
This is an experimental wrapper for the Freenect python libraries
you can getImage() and getDepth() for separate channel images
"""
def __init__(self, device_number=0):
"""
**SUMMARY**
In the kinect contructor, device_number indicates which kinect to
connect to. It defaults to 0.
**PARAMETERS**
* *device_number* - The index of the kinect, these go from 0 upward.
"""
self.deviceNumber = device_number
if not FREENECT_ENABLED:
logger.warning("You don't seem to have the freenect library installed. This will make it hard to use a Kinect.")
#this code was borrowed from
#https://github.com/amiller/libfreenect-goodies
def getImage(self):
"""
**SUMMARY**
This method returns the Kinect camera image.
**RETURNS**
The Kinect's color camera image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> k.getImage().show()
"""
video = freenect.sync_get_video(self.deviceNumber)[0]
self.capturetime = time.time()
#video = video[:, :, ::-1] # RGB -> BGR
return Image(video.transpose([1,0,2]), self)
#low bits in this depth are stripped so it fits in an 8-bit image channel
def getDepth(self):
"""
**SUMMARY**
This method returns the Kinect depth image.
**RETURNS**
The Kinect's depth camera image as a grayscale image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> d = k.getDepth()
>>> img = k.getImage()
>>> result = img.sideBySide(d)
>>> result.show()
"""
depth = freenect.sync_get_depth(self.deviceNumber)[0]
self.capturetime = time.time()
np.clip(depth, 0, 2**10 - 1, depth)
depth >>= 2
depth = depth.astype(np.uint8).transpose()
return Image(depth, self)
#we're going to also support a higher-resolution (11-bit) depth matrix
#if you want to actually do computations with the depth
def getDepthMatrix(self):
self.capturetime = time.time()
return freenect.sync_get_depth(self.deviceNumber)[0]
class JpegStreamReader(threading.Thread):
"""
**SUMMARY**
A Threaded class for pulling down JPEG streams and breaking up the images. This
is handy for reading the stream of images from a IP CAmera.
"""
url = ""
currentframe = ""
_threadcapturetime = ""
def run(self):
f = ''
if re.search('@', self.url):
authstuff = re.findall('//(\S+)@', self.url)[0]
self.url = re.sub("//\S+@", "//", self.url)
user, password = authstuff.split(":")
#thank you missing urllib2 manual
#http://www.voidspace.org.uk/python/articles/urllib2.shtml#id5
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.url, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
f = opener.open(self.url)
else:
f = urllib2.urlopen(self.url)
headers = f.info()
if (headers.has_key("content-type")):
headers['Content-type'] = headers['content-type'] #force ucase first char
if not headers.has_key("Content-type"):
logger.warning("Tried to load a JpegStream from " + self.url + ", but didn't find a content-type header!")
return
(multipart, boundary) = headers['Content-type'].split("boundary=")
if not re.search("multipart", multipart, re.I):
logger.warning("Tried to load a JpegStream from " + self.url + ", but the content type header was " + multipart + " not multipart/replace!")
return
buff = ''
data = f.readline().strip()
length = 0
contenttype = "jpeg"
#the first frame contains a boundarystring and some header info
while (1):
#print data
if (re.search(boundary, data.strip()) and len(buff)):
#we have a full jpeg in buffer. Convert to an image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
buff = ''
if (re.match("Content-Type", data, re.I)):
#set the content type, if provided (default to jpeg)
(header, typestring) = data.split(":")
(junk, contenttype) = typestring.strip().split("/")
if (re.match("Content-Length", data, re.I)):
#once we have the content length, we know how far to go jfif
(header, length) = data.split(":")
length = int(length.strip())
if (re.search("JFIF", data, re.I) or re.search("\xff\xd8\xff\xdb", data) or len(data) > 55):
# we have reached the start of the image
buff = ''
if length and length > len(data):
buff += data + f.read(length - len(data)) #read the remainder of the image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
else:
while (not re.search(boundary, data)):
buff += data
data = f.readline()
endimg, junk = data.split(boundary)
buff += endimg
data = boundary
continue
data = f.readline() #load the next (header) line
time.sleep(0) #let the other threads go
class JpegStreamCamera(FrameSource):
"""
**SUMMARY**
The JpegStreamCamera takes a URL of a JPEG stream and treats it like a camera. The current frame can always be accessed with getImage()
Requires the Python Imaging Library: http://www.pythonware.com/library/pil/handbook/index.htm
**EXAMPLE**
Using your Android Phone as a Camera. Softwares like IP Webcam can be used.
>>> cam = JpegStreamCamera("http://192.168.65.101:8080/videofeed") # your IP may be different.
>>> img = cam.getImage()
>>> img.show()
"""
url = ""
camthread = ""
def __init__(self, url):
if not PIL_ENABLED:
logger.warning("You need the Python Image Library (PIL) to use the JpegStreamCamera")
return
if not url.startswith('http://'):
url = "http://" + url
self.url = url
self.camthread = JpegStreamReader()
self.camthread.url = self.url
self.camthread.daemon = True
self.camthread.start()
def getImage(self):
"""
**SUMMARY**
Return the current frame of the JpegStream being monitored
"""
if not self.camthread._threadcapturetime:
now = time.time()
while not self.camthread._threadcapturetime:
if time.time() - now > 5:
warnings.warn("Timeout fetching JpegStream at " + self.url)
return
time.sleep(0.1)
self.capturetime = self.camthread._threadcapturetime
return Image(pil.open(StringIO(self.camthread.currentframe)), self)
_SANE_INIT = False
class Scanner(FrameSource):
"""
**SUMMARY**
The Scanner lets you use any supported SANE-compatable scanner as a SimpleCV camera
List of supported devices: http://www.sane-project.org/sane-supported-devices.html
Requires the PySANE wrapper for libsane. The sane scanner object
is available for direct manipulation at Scanner.device
This scanner object is heavily modified from
https://bitbucket.org/DavidVilla/pysane
Constructor takes an index (default 0) and a list of SANE options
(default is color mode).
**EXAMPLE**
>>> scan = Scanner(0, { "mode": "gray" })
>>> preview = scan.getPreview()
>>> stuff = preview.findBlobs(minsize = 1000)
>>> topleft = (np.min(stuff.x()), np.min(stuff.y()))
>>> bottomright = (np.max(stuff.x()), np.max(stuff.y()))
>>> scan.setROI(topleft, bottomright)
>>> scan.setProperty("resolution", 1200) #set high resolution
>>> scan.setProperty("mode", "color")
>>> img = scan.getImage()
>>> scan.setROI() #reset region of interest
>>> img.show()
"""
usbid = None
manufacturer = None
model = None
kind = None
device = None
max_x = None
max_y = None
def __init__(self, id = 0, properties = { "mode": "color"}):
global _SANE_INIT
import sane
if not _SANE_INIT:
try:
sane.init()
_SANE_INIT = True
except:
warn("Initializing pysane failed, do you have pysane installed?")
return
devices = sane.get_devices()
if not len(devices):
warn("Did not find a sane-compatable device")
return
self.usbid, self.manufacturer, self.model, self.kind = devices[id]
self.device = sane.open(self.usbid)
self.max_x = self.device.br_x
self.max_y = self.device.br_y #save our extents for later
for k, v in properties.items():
setattr(self.device, k, v)
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the scanner. Any ROI set with
setROI() is taken into account.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getImage().show()
"""
return Image(self.device.scan())
def getPreview(self):
"""
**SUMMARY**
Retrieve a preview-quality Image-object from the scanner.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getPreview().show()
"""
self.preview = True
img = Image(self.device.scan())
self.preview = False
return img
def getAllProperties(self):
"""
**SUMMARY**
Return a list of all properties and values from the scanner
**RETURNS**
Dictionary of active options and values. Inactive options appear
as "None"
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getAllProperties()
"""
props = {}
for prop in self.device.optlist:
val = None
if hasattr(self.device, prop):
val = getattr(self.device, prop)
props[prop] = val
return props
def printProperties(self):
"""
**SUMMARY**
Print detailed information about the SANE device properties
**RETURNS**
Nothing
**EXAMPLES**
>>> scan = Scanner()
>>> scan.printProperties()
"""
for prop in self.device.optlist:
try:
print self.device[prop]
except:
pass
def getProperty(self, prop):
"""
**SUMMARY**
Returns a single property value from the SANE device
equivalent to Scanner.device.PROPERTY
**RETURNS**
Value for option or None if missing/inactive
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
"""
if hasattr(self.device, prop):
return getattr(self.device, prop)
return None
def setROI(self, topleft = (0,0), bottomright = (-1,-1)):
"""
**SUMMARY**
Sets an ROI for the scanner in the current resolution. The
two parameters, topleft and bottomright, will default to the
device extents, so the ROI can be reset by calling setROI with
no parameters.
The ROI is set by SANE in resolution independent units (default
MM) so resolution can be changed after ROI has been set.
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> scan.setROI((50, 50), (100,100))
>>> scan.getImage().show() # a very small crop on the scanner
"""
self.device.tl_x = self.px2mm(topleft[0])
self.device.tl_y = self.px2mm(topleft[1])
if bottomright[0] == -1:
self.device.br_x = self.max_x
else:
self.device.br_x = self.px2mm(bottomright[0])
if bottomright[1] == -1:
self.device.br_y = self.max_y
else:
self.device.br_y = self.px2mm(bottomright[1])
def setProperty(self, prop, val):
"""
**SUMMARY**
Assigns a property value from the SANE device
equivalent to Scanner.device.PROPERTY = VALUE
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
>>> scan.setProperty("mode") = "gray"
"""
setattr(self.device, prop, val)
def px2mm(self, pixels = 1):
"""
**SUMMARY**
Helper function to convert native scanner resolution to millimeter units
**RETURNS**
Float value
**EXAMPLES**
>>> scan = Scanner()
>>> scan.px2mm(scan.device.resolution) #return DPI in DPMM
"""
return float(pixels * 25.4 / float(self.device.resolution))
class DigitalCamera(FrameSource):
"""
**SUMMARY**
The DigitalCamera takes a point-and-shoot camera or high-end slr and uses it as a Camera. The current frame can always be accessed with getPreview()
Requires the PiggyPhoto Library: https://github.com/alexdu/piggyphoto
**EXAMPLE**
>>> cam = DigitalCamera()
>>> pre = cam.getPreview()
>>> pre.findBlobs().show()
>>>
>>> img = cam.getImage()
>>> img.show()
"""
camera = None
usbid = None
device = None
def __init__(self, id = 0):
try:
import piggyphoto
except:
warn("Initializing piggyphoto failed, do you have piggyphoto installed?")
return
devices = piggyphoto.cameraList(autodetect=True).toList()
if not len(devices):
warn("No compatible digital cameras attached")
return
self.device, self.usbid = devices[id]
self.camera = piggyphoto.camera()
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the highest quality possible.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getImage().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_image(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
def getPreview(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the preview quality from the camera.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getPreview().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_preview(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
class ScreenCamera():
"""
**SUMMARY**
ScreenCapture is a camera class would allow you to capture all or part of the screen and return it as a color image.
Requires the pyscreenshot Library: https://github.com/vijaym123/pyscreenshot
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> print res
>>>
>>> img = sc.getImage()
>>> img.show()
"""
_roi = None
def __init__(self):
if not PYSCREENSHOT_ENABLED:
warn("Initializing pyscreenshot failed. Install pyscreenshot from https://github.com/vijaym123/pyscreenshot")
return None
def getResolution(self):
"""
**DESCRIPTION**
returns the resolution of the screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
returns the resolution.
**EXAMPLE**
>>> img = ScreenCamera()
>>> res = img.getResolution()
>>> print res
"""
return Image(pyscreenshot.grab()).size()
def setROI(self,roi):
"""
**DESCRIPTION**
To set the region of interest.
**PARAMETERS**
* *roi* - tuple - It is a tuple of size 4. where region of interest is to the center of the screen.
**RETURNS**
None
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> sc.setROI(res[0]/4,res[1]/4,res[0]/2,res[1]/2)
>>> img = sc.getImage()
>>> s.show()
"""
if isinstance(roi,tuple) and len(roi)==4:
self._roi = roi
return
def getImage(self):
"""
**DESCRIPTION**
getImage function returns a Image object capturing the current screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
Returns the region of interest if setROI is used.
else returns the original capture of the screenshot.
**EXAMPLE**
>>> sc = ScreenCamera()
>>> img = sc.getImage()
>>> img.show()
"""
img = Image(pyscreenshot.grab())
try :
if self._roi :
img = img.crop(self._roi,centered=True)
except :
print "Error croping the image. ROI specified is not correct."
return None
return img
class StereoImage:
"""
**SUMMARY**
This class is for binaculor Stereopsis. That is exactrating 3D information from two differing views of a scene(Image). By comparing the two images, the relative depth information can be obtained.
- Fundamental Matrix : F : a 3 x 3 numpy matrix, is a relationship between any two images of the same scene that constrains where the projection of points from the scene can occur in both images. see : http://en.wikipedia.org/wiki/Fundamental_matrix_(computer_vision)
- Homography Matrix : H : a 3 x 3 numpy matrix,
- ptsLeft : The matched points on the left image.
- ptsRight : The matched points on the right image.
-findDisparityMap and findDepthMap - provides 3D information.
for more information on stereo vision, visit : http://en.wikipedia.org/wiki/Computer_stereo_vision
**EXAMPLE**
>>> img1 = Image('sampleimages/stereo_view1.png')
>>> img2 = Image('sampleimages/stereo_view2.png')
>>> stereoImg = StereoImage(img1,img2)
>>> stereoImg.findDisparityMap(method="BM",nDisparity=20).show()
"""
def __init__( self, imgLeft , imgRight ):
self.ImageLeft = imgLeft
self.ImageRight = imgRight
if self.ImageLeft.size() != self.ImageRight.size():
logger.warning('Left and Right images should have the same size.')
return None
else:
self.size = self.ImageLeft.size()
def findFundamentalMat(self, thresh=500.00, minDist=0.15 ):
"""
**SUMMARY**
This method returns the fundamental matrix F such that (P_2).T F P_1 = 0
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *F* - Fundamental matrix as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
**NOTE**
If you deal with the fundamental matrix F directly, be aware of (P_2).T F P_1 = 0
where P_2 and P_1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use fundamental matrix without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(F, mask) = cv2.findFundamentalMat(matched_pts1, matched_pts2, method=cv.CV_FM_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (F, matched_pts1, matched_pts2)
def findHomography( self, thresh=500.00, minDist=0.15):
"""
**SUMMARY**
This method returns the homography H such that P2 ~ H P1
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *H* - homography as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> H,pts1,pts2 = stereoImg.findHomography()
**NOTE**
If you deal with the homography H directly, be aware of P2 ~ H P1
where P2 and P1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use homography without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(H, mask) = cv2.findHomography(matched_pts1, matched_pts2,
method=cv.CV_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (H, matched_pts1, matched_pts2)
def findDisparityMap( self, nDisparity=16 ,method='BM'):
"""
The method generates disparity map from set of stereo images.
**PARAMETERS**
* *method* :
*BM* - Block Matching algorithm, this is a real time algorithm.
*SGBM* - Semi Global Block Matching algorithm, this is not a real time algorithm.
*GC* - Graph Cut algorithm, This is not a real time algorithm.
* *nDisparity* - Maximum disparity value. This should be multiple of 16
* *scale* - Scale factor
**RETURNS**
Return None if it fails.
Returns Disparity Map Image
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> disp = stereoImg.findDisparityMap(method="BM")
"""
gray_left = self.ImageLeft.getGrayscaleMatrix()
gray_right = self.ImageRight.getGrayscaleMatrix()
(r, c) = self.size
scale = int(self.ImageLeft.depth)
if nDisparity % 16 !=0 :
if nDisparity < 16 :
nDisparity = 16
nDisparity = (nDisparity/16)*16
try :
if method == 'BM':
disparity = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoBMState()
state.SADWindowSize = 41
state.preFilterType = 1
state.preFilterSize = 41
state.preFilterCap = 31
state.minDisparity = -8
state.numberOfDisparities = nDisparity
state.textureThreshold = 10
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.uniquenessRatio=15
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, state)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity, disparity_visual, 0, 256, cv.CV_MINMAX )
disparity_visual = Image(disparity_visual)
return Image(disparity_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'GC':
disparity_left = cv.CreateMat(c, r, cv.CV_32F)
disparity_right = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoGCState(nDisparity, 8)
state.minDisparity = -8
cv.FindStereoCorrespondenceGC( gray_left, gray_right, disparity_left, disparity_right, state, 0)
disparity_left_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity_left, disparity_left_visual, 0, 256, cv.CV_MINMAX )
#cv.Scale(disparity_left, disparity_left_visual, -scale)
disparity_left_visual = Image(disparity_left_visual)
return Image(disparity_left_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'SGBM':
try:
import cv2
ver = cv2.__version__
if ver.startswith("$Rev :"):
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
except:
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
state = cv2.StereoSGBM()
state.SADWindowSize = 41
state.preFilterCap = 31
state.minDisparity = 0
state.numberOfDisparities = nDisparity
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.disp12MaxDiff = 1
state.fullDP=False
state.P1 = 8 * 1 * 41 * 41
state.P2 = 32 * 1 * 41 * 41
state.uniquenessRatio=15
disparity=state.compute(self.ImageLeft.getGrayNumpy(),self.ImageRight.getGrayNumpy())
return Image(disparity)
else :
logger.warning("Unknown method. Choose one method amoung BM or SGBM or GC !")
return None
except :
logger.warning("Error in computing the Disparity Map, may be due to the Images are stereo in nature.")
return None
def Eline (self, point, F, whichImage):
"""
**SUMMARY**
This method returns, line feature object.
**PARAMETERS**
* *point* - Input point (x, y)
* *F* - Fundamental matrix.
* *whichImage* - Index of the image (1 or 2) that contains the point
**RETURNS**
epipolar line, in the form of line feature object.
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> epiline = mapper.Eline(point,F, 1) #find corresponding Epipolar line in the left image.
"""
from SimpleCV.Features.Detection import Line
pts1 = (0,0)
pts2 = self.size
pt_cvmat = cv.CreateMat(1, 1, cv.CV_32FC2)
pt_cvmat[0, 0] = (point[1], point[0]) # OpenCV seems to use (y, x) coordinate.
line = cv.CreateMat(1, 1, cv.CV_32FC3)
cv.ComputeCorrespondEpilines(pt_cvmat, whichImage, npArray2cvMat(F), line)
line_npArray = np.array(line).squeeze()
line_npArray = line_npArray[[1.00, 0, 2]]
pts1 = (pts1[0],(-line_npArray[2]-line_npArray[0]*pts1[0])/line_npArray[1] )
pts2 = (pts2[0],(-line_npArray[2]-line_npArray[0]*pts2[0])/line_npArray[1] )
if whichImage == 1 :
return Line(self.ImageLeft, [pts1,pts2])
elif whichImage == 2 :
return Line(self.ImageRight, [pts1,pts2])
def projectPoint( self, point, H ,whichImage):
"""
**SUMMARY**
This method returns the corresponding point (x, y)
**PARAMETERS**
* *point* - Input point (x, y)
* *whichImage* - Index of the image (1 or 2) that contains the point
* *H* - Homography that can be estimated
using StereoCamera.findHomography()
**RETURNS**
Corresponding point (x, y) as tuple
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> projectPoint = stereoImg.projectPoint(point,H ,1) #finds corresponding point in the left image.
"""
H = np.matrix(H)
point = np.matrix((point[1], point[0],1.00))
if whichImage == 1.00:
corres_pt = H * point.T
else:
corres_pt = np.linalg.inv(H) * point.T
corres_pt = corres_pt / corres_pt[2]
return (float(corres_pt[1]), float(corres_pt[0]))
def get3DImage(self, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoImage.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoImage(lImage, rImage)
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(Q, "BM", state).show()
>>> stereo.get3DImage(Q, "SGBM", state).show()
"""
imgLeft = self.ImageLeft
imgRight = self.ImageRight
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
(r, c) = self.size
if method == "BM":
sbm = cv.CreateStereoBMState()
disparity = cv.CreateMat(c, r, cv.CV_32F)
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
textureThreshold = state.get("textureThreshold")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
preFilterType = state.get("perFilterType")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if textureThreshold is not None:
sbm.textureThreshold = textureThreshold
if preFilterType is not None:
sbm.preFilterType = preFilterType
else:
sbm.SADWindowSize = 9
sbm.preFilterType = 1
sbm.preFilterSize = 5
sbm.preFilterCap = 61
sbm.minDisparity = -39
sbm.numberOfDisparities = 112
sbm.textureThreshold = 507
sbm.uniquenessRatio= 0
sbm.speckleRange = 8
sbm.speckleWindowSize = 0
gray_left = imgLeft.getGrayscaleMatrix()
gray_right = imgRight.getGrayscaleMatrix()
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, sbm)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
elif method == "SGBM":
if not cv2flag:
warnings.warn("Can't Use SGBM without OpenCV >= 2.4. Use SBM instead.")
sbm = cv2.StereoSGBM()
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
P1 = state.get("P1")
P2 = state.get("P2")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
fullDP = state.get("fullDP")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if P1 is not None:
sbm.P1 = P1
if P2 is not None:
sbm.P2 = P2
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if fullDP is not None:
sbm.fullDP = fullDP
else:
sbm.SADWindowSize = 9;
sbm.numberOfDisparities = 96;
sbm.preFilterCap = 63;
sbm.minDisparity = -21;
sbm.uniquenessRatio = 7;
sbm.speckleWindowSize = 0;
sbm.speckleRange = 8;
sbm.disp12MaxDiff = 1;
sbm.fullDP = False;
disparity = sbm.compute(imgLeft.getGrayNumpyCv2(), imgRight.getGrayNumpyCv2())
else:
warnings.warn("Unknown method. Returning None")
return None
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
if not isinstance(disparity, np.ndarray):
disparity = np.array(disparity)
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
def get3DImageFromDisparity(self, disparity, Q):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *disparity* - Disparity Image
* *Q* - reprojection Matrix (disparity to depth matrix)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> disp = stereo.findDisparityMap()
>>> stereo.get3DImageFromDisparity(disp, Q)
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
disparity = disparity.getNumpyCv2()
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
disparity = disparity.getMatrix()
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
class StereoCamera :
"""
Stereo Camera is a class dedicated for calibration stereo camera. It also has functionalites for
rectification and getting undistorted Images.
This class can be used to calculate various parameters related to both the camera's :
-> Camera Matrix
-> Distortion coefficients
-> Rotation and Translation matrix
-> Rectification transform (rotation matrix)
-> Projection matrix in the new (rectified) coordinate systems
-> Disparity-to-depth mapping matrix (Q)
"""
def __init__(self):
return
def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gridsize=0.027, WinSize = (352,288)):
"""
**SUMMARY**
Stereo Calibration is a way in which you obtain the parameters that will allow you to calculate 3D information of the scene.
Once both the camera's are initialized.
Press [Space] once chessboard is identified in both the camera's.
Press [esc] key to exit the calibration process.
**PARAMETERS**
* camLeft - Left camera index.
* camRight - Right camera index.
* nboards - Number of samples or multiple views of the chessboard in different positions and orientations with your stereo camera.
* chessboard - A tuple of Cols, Rows in the chessboard (used for calibration).
* gridsize - chessboard grid size in real units
* WinSize - This is the window resolution.
**RETURNS**
A tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
**Note**
Press space to capture the images.
"""
count = 0
n1="Left"
n2="Right"
try :
captureLeft = cv.CaptureFromCAM(camLeft)
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameLeft = cv.QueryFrame(captureLeft)
cv.FindChessboardCorners(frameLeft, (chessboard))
captureRight = cv.CaptureFromCAM(camRight)
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameRight = cv.QueryFrame(captureRight)
cv.FindChessboardCorners(frameRight, (chessboard))
except :
print "Error Initialising the Left and Right camera"
return None
imagePoints1 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
imagePoints2 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
objectPoints = cv.CreateMat(1, chessboard[0] * chessboard[1] * nboards, cv.CV_64FC3)
nPoints = cv.CreateMat(1, nboards, cv.CV_32S)
# the intrinsic camera matrices
CM1 = cv.CreateMat(3, 3, cv.CV_64F)
CM2 = cv.CreateMat(3, 3, cv.CV_64F)
# the distortion coefficients of both cameras
D1 = cv.CreateMat(1, 5, cv.CV_64F)
D2 = cv.CreateMat(1, 5, cv.CV_64F)
# matrices governing the rotation and translation from camera 1 to camera 2
R = cv.CreateMat(3, 3, cv.CV_64F)
T = cv.CreateMat(3, 1, cv.CV_64F)
# the essential and fundamental matrices
E = cv.CreateMat(3, 3, cv.CV_64F)
F = cv.CreateMat(3, 3, cv.CV_64F)
while True:
frameLeft = cv.QueryFrame(captureLeft)
cv.Flip(frameLeft, frameLeft, 1)
frameRight = cv.QueryFrame(captureRight)
cv.Flip(frameRight, frameRight, 1)
k = cv.WaitKey(3)
cor1 = cv.FindChessboardCorners(frameLeft, (chessboard))
if cor1[0] :
cv.DrawChessboardCorners(frameLeft, (chessboard), cor1[1], cor1[0])
cv.ShowImage(n1, frameLeft)
cor2 = cv.FindChessboardCorners(frameRight, (chessboard))
if cor2[0]:
cv.DrawChessboardCorners(frameRight, (chessboard), cor2[1], cor2[0])
cv.ShowImage(n2, frameRight)
if cor1[0] and cor2[0] and k==0x20:
print count
for i in range(0, len(cor1[1])):
cv.Set1D(imagePoints1, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor1[1][i][0], cor1[1][i][1]))
cv.Set1D(imagePoints2, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor2[1][i][0], cor2[1][i][1]))
count += 1
if count == nboards:
cv.DestroyAllWindows()
for i in range(nboards):
for j in range(chessboard[1]):
for k in range(chessboard[0]):
cv.Set1D(objectPoints, i * chessboard[1] * chessboard[0] + j * chessboard[0] + k, (k * gridsize, j * gridsize, 0))
for i in range(nboards):
cv.Set1D(nPoints, i, chessboard[0] * chessboard[1])
cv.SetIdentity(CM1)
cv.SetIdentity(CM2)
cv.Zero(D1)
cv.Zero(D2)
print "Running stereo calibration..."
del(camLeft)
del(camRight)
cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, nPoints, CM1, D1, CM2, D2, WinSize, R, T, E, F,
flags=cv.CV_CALIB_SAME_FOCAL_LENGTH | cv.CV_CALIB_ZERO_TANGENT_DIST)
print "Done."
return (CM1, CM2, D1, D2, R, T, E, F)
cv.ShowImage(n1, frameLeft)
cv.ShowImage(n2, frameRight)
if k == 0x1b:
print "ESC pressed. Exiting. WARNING: NOT ENOUGH CHESSBOARDS FOUND YET"
cv.DestroyAllWindows()
break
def saveCalibration(self,calibration=None, fname="Stereo",cdir="."):
"""
**SUMMARY**
saveCalibration is a method to save the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method returns True on success and saves the calibration in the following format.
StereoCM1.txt
StereoCM2.txt
StereoD1.txt
StereoD2.txt
StereoR.txt
StereoT.txt
StereoE.txt
StereoF.txt
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 -> Camera Matrix for left camera,
CM2 -> Camera Matrix for right camera,
D1 -> Vector of distortion coefficients for left camera,
D2 -> Vector of distortion coefficients for right camera,
R -> Rotation matrix between the left and the right camera coordinate systems,
T -> Translation vector between the left and the right coordinate systems of the cameras,
E -> Essential matrix,
F -> Fundamental matrix
**RETURNS**
return True on success and saves the calibration files.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
>>> StereoCam.saveCalibration(calibration,fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
(CM1, CM2, D1, D2, R, T, E, F) = calibration
cv.Save("{0}/{1}".format(cdir, filenames[0]), CM1)
cv.Save("{0}/{1}".format(cdir, filenames[1]), CM2)
cv.Save("{0}/{1}".format(cdir, filenames[2]), D1)
cv.Save("{0}/{1}".format(cdir, filenames[3]), D2)
cv.Save("{0}/{1}".format(cdir, filenames[4]), R)
cv.Save("{0}/{1}".format(cdir, filenames[5]), T)
cv.Save("{0}/{1}".format(cdir, filenames[6]), E)
cv.Save("{0}/{1}".format(cdir, filenames[7]), F)
print "Calibration parameters written to directory '{0}'.".format(cdir)
return True
except :
return False
def loadCalibration(self,fname="Stereo",dir="."):
"""
**SUMMARY**
loadCalibration is a method to load the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method loads from calibration files and return calibration on success else return false.
**PARAMETERS**
fname - is the prefix of the calibration files.
dir - is the directory in which files are present.
**RETURNS**
a tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success.
CM1 - Camera Matrix for left camera
CM2 - Camera Matrix for right camera
D1 - Vector of distortion coefficients for left camera
D2 - Vector of distortion coefficients for right camera
R - Rotation matrix between the left and the right camera coordinate systems
T - Translation vector between the left and the right coordinate systems of the cameras
E - Essential matrix
F - Fundamental matrix
else returns false
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> loadedCalibration = StereoCam.loadCalibration(fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
CM1 = cv.Load("{0}/{1}".format(dir, filenames[0]))
CM2 = cv.Load("{0}/{1}".format(dir, filenames[1]))
D1 = cv.Load("{0}/{1}".format(dir, filenames[2]))
D2 = cv.Load("{0}/{1}".format(dir, filenames[3]))
R = cv.Load("{0}/{1}".format(dir, filenames[4]))
T = cv.Load("{0}/{1}".format(dir, filenames[5]))
E = cv.Load("{0}/{1}".format(dir, filenames[6]))
F = cv.Load("{0}/{1}".format(dir, filenames[7]))
print "Calibration files loaded from dir '{0}'.".format(dir)
return (CM1, CM2, D1, D2, R, T, E, F)
except :
return False
def stereoRectify(self,calib=None,WinSize=(352,288)):
"""
**SUMMARY**
Computes rectification transforms for each head of a calibrated stereo camera.
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**RETURNS**
On success returns a a tuple of the format -> (R1, R2, P1, P2, Q, roi)
R1 - Rectification transform (rotation matrix) for the left camera.
R2 - Rectification transform (rotation matrix) for the right camera.
P1 - Projection matrix in the new (rectified) coordinate systems for the left camera.
P2 - Projection matrix in the new (rectified) coordinate systems for the right camera.
Q - disparity-to-depth mapping matrix.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(calibration)
"""
(CM1, CM2, D1, D2, R, T, E, F) = calib
R1 = cv.CreateMat(3, 3, cv.CV_64F)
R2 = cv.CreateMat(3, 3, cv.CV_64F)
P1 = cv.CreateMat(3, 4, cv.CV_64F)
P2 = cv.CreateMat(3, 4, cv.CV_64F)
Q = cv.CreateMat(4, 4, cv.CV_64F)
print "Running stereo rectification..."
(leftroi, rightroi) = cv.StereoRectify(CM1, CM2, D1, D2, WinSize, R, T, R1, R2, P1, P2, Q)
roi = []
roi.append(max(leftroi[0], rightroi[0]))
roi.append(max(leftroi[1], rightroi[1]))
roi.append(min(leftroi[2], rightroi[2]))
roi.append(min(leftroi[3], rightroi[3]))
print "Done."
return (R1, R2, P1, P2, Q, roi)
def getImagesUndistort(self,imgLeft, imgRight, calibration, rectification, WinSize=(352,288)):
"""
**SUMMARY**
Rectify two images from the calibration and rectification parameters.
**PARAMETERS**
* *imgLeft* - Image captured from left camera and needs to be rectified.
* *imgRight* - Image captures from right camera and need to be rectified.
* *calibration* - A calibration tuple of the format (CM1, CM2, D1, D2, R, T, E, F)
* *rectification* - A rectification tuple of the format (R1, R2, P1, P2, Q, roi)
**RETURNS**
returns rectified images in a tuple -> (imgLeft,imgRight)
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(loadedCalibration)
>>> imgLeft = camLeft.getImage()
>>> imgRight = camRight.getImage()
>>> rectLeft,rectRight = StereoCam.getImagesUndistort(imgLeft,imgRight,calibration,rectification)
"""
imgLeft = imgLeft.getMatrix()
imgRight = imgRight.getMatrix()
(CM1, CM2, D1, D2, R, T, E, F) = calibration
(R1, R2, P1, P2, Q, roi) = rectification
dst1 = cv.CloneMat(imgLeft)
dst2 = cv.CloneMat(imgRight)
map1x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map1y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
#print "Rectifying images..."
cv.InitUndistortRectifyMap(CM1, D1, R1, P1, map1x, map1y)
cv.InitUndistortRectifyMap(CM2, D2, R2, P2, map2x, map2y)
cv.Remap(imgLeft, dst1, map1x, map1y)
cv.Remap(imgRight, dst2, map2x, map2y)
return Image(dst1), Image(dst2)
def get3DImage(self, leftIndex, rightIndex, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *leftIndex* - Index of left camera
* *rightIndex* - Index of right camera
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(1, 2, Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(1, 2, Q, "BM", state).show()
>>> stereo.get3DImage(1, 2, Q, "SGBM", state).show()
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
camLeft = cv2.VideoCapture(leftIndex)
camRight = cv2.VideoCapture(rightIndex)
if camLeft.isOpened():
_, imgLeft = camLeft.read()
else:
warnings.warn("Unable to open left camera")
return None
if camRight.isOpened():
_, imgRight = camRight.read()
else:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
else:
camLeft = cv.CaptureFromCAM(leftIndex)
camRight = cv.CaptureFromCAM(rightIndex)
imgLeft = cv.QueryFrame(camLeft)
if imgLeft is None:
warnings.warn("Unable to open left camera")
return None
imgRight = cv.QueryFrame(camRight)
if imgRight is None:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
del camLeft
del camRight
stereoImages = StereoImage(imgLeft, imgRight)
Image3D_normalize = stereoImages.get3DImage(Q, method, state)
self.Image3D = stereoImages.Image3D
return Image3D_normalize
class AVTCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(AVTCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
self.camera.runCommand("AcquisitionStart")
frame = self.camera._getFrame(1000)
if frame:
img = Image(pil.fromstring(self.camera.imgformat,
(self.camera.width, self.camera.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.camera._buffer.appendleft(img)
self.camera.runCommand("AcquisitionStop")
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
AVTCameraErrors = [
("ePvErrSuccess", "No error"),
("ePvErrCameraFault", "Unexpected camera fault"),
("ePvErrInternalFault", "Unexpected fault in PvApi or driver"),
("ePvErrBadHandle", "Camera handle is invalid"),
("ePvErrBadParameter", "Bad parameter to API call"),
("ePvErrBadSequence", "Sequence of API calls is incorrect"),
("ePvErrNotFound", "Camera or attribute not found"),
("ePvErrAccessDenied", "Camera cannot be opened in the specified mode"),
("ePvErrUnplugged", "Camera was unplugged"),
("ePvErrInvalidSetup", "Setup is invalid (an attribute is invalid)"),
("ePvErrResources", "System/network resources or memory not available"),
("ePvErrBandwidth", "1394 bandwidth not available"),
("ePvErrQueueFull", "Too many frames on queue"),
("ePvErrBufferTooSmall", "Frame buffer is too small"),
("ePvErrCancelled", "Frame cancelled by user"),
("ePvErrDataLost", "The data for the frame was lost"),
("ePvErrDataMissing", "Some data in the frame is missing"),
("ePvErrTimeout", "Timeout during wait"),
("ePvErrOutOfRange", "Attribute value is out of the expected range"),
("ePvErrWrongType", "Attribute is not this type (wrong access function)"),
("ePvErrForbidden", "Attribute write forbidden at this time"),
("ePvErrUnavailable", "Attribute is not available at this time"),
("ePvErrFirewall", "A firewall is blocking the traffic (Windows only)"),
]
def pverr(errcode):
if errcode:
raise Exception(": ".join(AVTCameraErrors[errcode]))
class AVTCamera(FrameSource):
"""
**SUMMARY**
AVTCamera is a ctypes wrapper for the Prosilica/Allied Vision cameras,
such as the "manta" series.
These require the PvAVT binary driver from Allied Vision:
http://www.alliedvisiontec.com/us/products/1108.html
Note that as of time of writing the new VIMBA driver is not available
for Mac/Linux - so this uses the legacy PvAVT drive
Props to Cixelyn, whos py-avt-pvapi module showed how to get much
of this working https://bitbucket.org/Cixelyn/py-avt-pvapi
All camera properties are directly from the PvAVT manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = AVTCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
_buffer = None # Buffer to store images
_buffersize = 10 # Number of images to keep in the rolling image buffer for threads
_lastimage = None # Last image loaded into memory
_thread = None
_framerate = 0
threaded = False
_pvinfo = { }
_properties = {
"AcqEndTriggerEvent": ("Enum", "R/W"),
"AcqEndTriggerMode": ("Enum", "R/W"),
"AcqRecTriggerEvent": ("Enum", "R/W"),
"AcqRecTriggerMode": ("Enum", "R/W"),
"AcqStartTriggerEvent": ("Enum", "R/W"),
"AcqStartTriggerMode": ("Enum", "R/W"),
"FrameRate": ("Float32", "R/W"),
"FrameStartTriggerDelay": ("Uint32", "R/W"),
"FrameStartTriggerEvent": ("Enum", "R/W"),
"FrameStartTriggerMode": ("Enum", "R/W"),
"FrameStartTriggerOverlap": ("Enum", "R/W"),
"AcquisitionFrameCount": ("Uint32", "R/W"),
"AcquisitionMode": ("Enum", "R/W"),
"RecorderPreEventCount": ("Uint32", "R/W"),
"ConfigFileIndex": ("Enum", "R/W"),
"ConfigFilePowerup": ("Enum", "R/W"),
"DSPSubregionBottom": ("Uint32", "R/W"),
"DSPSubregionLeft": ("Uint32", "R/W"),
"DSPSubregionRight": ("Uint32", "R/W"),
"DSPSubregionTop": ("Uint32", "R/W"),
"DefectMaskColumnEnable": ("Enum", "R/W"),
"ExposureAutoAdjustTol": ("Uint32", "R/W"),
"ExposureAutoAlg": ("Enum", "R/W"),
"ExposureAutoMax": ("Uint32", "R/W"),
"ExposureAutoMin": ("Uint32", "R/W"),
"ExposureAutoOutliers": ("Uint32", "R/W"),
"ExposureAutoRate": ("Uint32", "R/W"),
"ExposureAutoTarget": ("Uint32", "R/W"),
"ExposureMode": ("Enum", "R/W"),
"ExposureValue": ("Uint32", "R/W"),
"GainAutoAdjustTol": ("Uint32", "R/W"),
"GainAutoMax": ("Uint32", "R/W"),
"GainAutoMin": ("Uint32", "R/W"),
"GainAutoOutliers": ("Uint32", "R/W"),
"GainAutoRate": ("Uint32", "R/W"),
"GainAutoTarget": ("Uint32", "R/W"),
"GainMode": ("Enum", "R/W"),
"GainValue": ("Uint32", "R/W"),
"LensDriveCommand": ("Enum", "R/W"),
"LensDriveDuration": ("Uint32", "R/W"),
"LensVoltage": ("Uint32", "R/V"),
"LensVoltageControl": ("Uint32", "R/W"),
"IrisAutoTarget": ("Uint32", "R/W"),
"IrisMode": ("Enum", "R/W"),
"IrisVideoLevel": ("Uint32", "R/W"),
"IrisVideoLevelMax": ("Uint32", "R/W"),
"IrisVideoLevelMin": ("Uint32", "R/W"),
"VsubValue": ("Uint32", "R/C"),
"WhitebalAutoAdjustTol": ("Uint32", "R/W"),
"WhitebalAutoRate": ("Uint32", "R/W"),
"WhitebalMode": ("Enum", "R/W"),
"WhitebalValueRed": ("Uint32", "R/W"),
"WhitebalValueBlue": ("Uint32", "R/W"),
"EventAcquisitionStart": ("Uint32", "R/C 40000"),
"EventAcquisitionEnd": ("Uint32", "R/C 40001"),
"EventFrameTrigger": ("Uint32", "R/C 40002"),
"EventExposureEnd": ("Uint32", "R/C 40003"),
"EventAcquisitionRecordTrigger": ("Uint32", "R/C 40004"),
"EventSyncIn1Rise": ("Uint32", "R/C 40010"),
"EventSyncIn1Fall": ("Uint32", "R/C 40011"),
"EventSyncIn2Rise": ("Uint32", "R/C 40012"),
"EventSyncIn2Fall": ("Uint32", "R/C 40013"),
"EventSyncIn3Rise": ("Uint32", "R/C 40014"),
"EventSyncIn3Fall": ("Uint32", "R/C 40015"),
"EventSyncIn4Rise": ("Uint32", "R/C 40016"),
"EventSyncIn4Fall": ("Uint32", "R/C 40017"),
"EventOverflow": ("Uint32", "R/C 65534"),
"EventError": ("Uint32", "R/C"),
"EventNotification": ("Enum", "R/W"),
"EventSelector": ("Enum", "R/W"),
"EventsEnable1": ("Uint32", "R/W"),
"BandwidthCtrlMode": ("Enum", "R/W"),
"ChunkModeActive": ("Boolean", "R/W"),
"NonImagePayloadSize": ("Unit32", "R/V"),
"PayloadSize": ("Unit32", "R/V"),
"StreamBytesPerSecond": ("Uint32", "R/W"),
"StreamFrameRateConstrain": ("Boolean", "R/W"),
"StreamHoldCapacity": ("Uint32", "R/V"),
"StreamHoldEnable": ("Enum", "R/W"),
"TimeStampFrequency": ("Uint32", "R/C"),
"TimeStampValueHi": ("Uint32", "R/V"),
"TimeStampValueLo": ("Uint32", "R/V"),
"Height": ("Uint32", "R/W"),
"RegionX": ("Uint32", "R/W"),
"RegionY": ("Uint32", "R/W"),
"Width": ("Uint32", "R/W"),
"PixelFormat": ("Enum", "R/W"),
"TotalBytesPerFrame": ("Uint32", "R/V"),
"BinningX": ("Uint32", "R/W"),
"BinningY": ("Uint32", "R/W"),
"CameraName": ("String", "R/W"),
"DeviceFirmwareVersion": ("String", "R/C"),
"DeviceModelName": ("String", "R/W"),
"DevicePartNumber": ("String", "R/C"),
"DeviceSerialNumber": ("String", "R/C"),
"DeviceVendorName": ("String", "R/C"),
"FirmwareVerBuild": ("Uint32", "R/C"),
"FirmwareVerMajor": ("Uint32", "R/C"),
"FirmwareVerMinor": ("Uint32", "R/C"),
"PartClass": ("Uint32", "R/C"),
"PartNumber": ("Uint32", "R/C"),
"PartRevision": ("String", "R/C"),
"PartVersion": ("String", "R/C"),
"SerialNumber": ("String", "R/C"),
"SensorBits": ("Uint32", "R/C"),
"SensorHeight": ("Uint32", "R/C"),
"SensorType": ("Enum", "R/C"),
"SensorWidth": ("Uint32", "R/C"),
"UniqueID": ("Uint32", "R/C"),
"Strobe1ControlledDuration": ("Enum", "R/W"),
"Strobe1Delay": ("Uint32", "R/W"),
"Strobe1Duration": ("Uint32", "R/W"),
"Strobe1Mode": ("Enum", "R/W"),
"SyncIn1GlitchFilter": ("Uint32", "R/W"),
"SyncInLevels": ("Uint32", "R/V"),
"SyncOut1Invert": ("Enum", "R/W"),
"SyncOut1Mode": ("Enum", "R/W"),
"SyncOutGpoLevels": ("Uint32", "R/W"),
"DeviceEthAddress": ("String", "R/C"),
"HostEthAddress": ("String", "R/C"),
"DeviceIPAddress": ("String", "R/C"),
"HostIPAddress": ("String", "R/C"),
"GvcpRetries": ("Uint32", "R/W"),
"GvspLookbackWindow": ("Uint32", "R/W"),
"GvspResentPercent": ("Float32", "R/W"),
"GvspRetries": ("Uint32", "R/W"),
"GvspSocketBufferCount": ("Enum", "R/W"),
"GvspTimeout": ("Uint32", "R/W"),
"HeartbeatInterval": ("Uint32", "R/W"),
"HeartbeatTimeout": ("Uint32", "R/W"),
"MulticastEnable": ("Enum", "R/W"),
"MulticastIPAddress": ("String", "R/W"),
"PacketSize": ("Uint32", "R/W"),
"StatDriverType": ("Enum", "R/V"),
"StatFilterVersion": ("String", "R/C"),
"StatFrameRate": ("Float32", "R/V"),
"StatFramesCompleted": ("Uint32", "R/V"),
"StatFramesDropped": ("Uint32", "R/V"),
"StatPacketsErroneous": ("Uint32", "R/V"),
"StatPacketsMissed": ("Uint32", "R/V"),
"StatPacketsReceived": ("Uint32", "R/V"),
"StatPacketsRequested": ("Uint32", "R/V"),
"StatPacketResent": ("Uint32", "R/V")
}
class AVTCameraInfo(ct.Structure):
"""
AVTCameraInfo is an internal ctypes.Structure-derived class which
contains metadata about cameras on the local network.
Properties include:
* UniqueId
* CameraName
* ModelName
* PartNumber
* SerialNumber
* FirmwareVersion
* PermittedAccess
* InterfaceId
* InterfaceType
"""
_fields_ = [
("StructVer", ct.c_ulong),
("UniqueId", ct.c_ulong),
("CameraName", ct.c_char*32),
("ModelName", ct.c_char*32),
("PartNumber", ct.c_char*32),
("SerialNumber", ct.c_char*32),
("FirmwareVersion", ct.c_char*32),
("PermittedAccess", ct.c_long),
("InterfaceId", ct.c_ulong),
("InterfaceType", ct.c_int)
]
def __repr__(self):
return "<SimpleCV.Camera.AVTCameraInfo - UniqueId: %s>" % (self.UniqueId)
class AVTFrame(ct.Structure):
_fields_ = [
("ImageBuffer", ct.POINTER(ct.c_char)),
("ImageBufferSize", ct.c_ulong),
("AncillaryBuffer", ct.c_int),
("AncillaryBufferSize", ct.c_int),
("Context", ct.c_int*4),
("_reserved1", ct.c_ulong*8),
("Status", ct.c_int),
("ImageSize", ct.c_ulong),
("AncillarySize", ct.c_ulong),
("Width", ct.c_ulong),
("Height", ct.c_ulong),
("RegionX", ct.c_ulong),
("RegionY", ct.c_ulong),
("Format", ct.c_int),
("BitDepth", ct.c_ulong),
("BayerPattern", ct.c_int),
("FrameCount", ct.c_ulong),
("TimestampLo", ct.c_ulong),
("TimestampHi", ct.c_ulong),
("_reserved2", ct.c_ulong*32)
]
def __init__(self, buffersize):
self.ImageBuffer = ct.create_string_buffer(buffersize)
self.ImageBufferSize = ct.c_ulong(buffersize)
self.AncillaryBuffer = 0
self.AncillaryBufferSize = 0
self.img = None
self.hasImage = False
self.frame = None
def __del__(self):
#This function should disconnect from the AVT Camera
pverr(self.dll.PvCameraClose(self.handle))
def __init__(self, camera_id = -1, properties = {}, threaded = False):
#~ super(AVTCamera, self).__init__()
import platform
if platform.system() == "Windows":
self.dll = ct.windll.LoadLibrary("PvAPI.dll")
elif platform.system() == "Darwin":
self.dll = ct.CDLL("libPvAPI.dylib", ct.RTLD_GLOBAL)
else:
self.dll = ct.CDLL("libPvAPI.so")
if not self._pvinfo.get("initialized", False):
self.dll.PvInitialize()
self._pvinfo['initialized'] = True
#initialize. Note that we rely on listAllCameras being the next
#call, since it blocks on cameras initializing
camlist = self.listAllCameras()
if not len(camlist):
raise Exception("Couldn't find any cameras with the PvAVT driver. Use SampleViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
camera_id = camlist[camera_id].UniqueId
camera_id = long(camera_id)
self.handle = ct.c_uint()
init_count = 0
while self.dll.PvCameraOpen(camera_id,0,ct.byref(self.handle)) != 0: #wait until camera is availble
if init_count > 4: # Try to connect 5 times before giving up
raise Exception('Could not connect to camera, please verify with SampleViewer you can connect')
init_count += 1
time.sleep(1) # sleep and retry to connect to camera in a second
pverr(self.dll.PvCaptureStart(self.handle))
self.uniqueid = camera_id
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("FrameStartTriggerMode","Freerun")
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
self.setProperty("PixelFormat", "Rgb24")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self.frame = None
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of AVTCameraInfo objects, otherwise empty list
"""
camlist = (self.AVTCameraInfo*100)()
starttime = time.time()
while int(camlist[0].UniqueId) == 0 and time.time() - starttime < 10:
self.dll.PvCameraListEx(ct.byref(camlist), 100, None, ct.sizeof(self.AVTCameraInfo))
time.sleep(0.1) #keep checking for cameras until timeout
return [cam for cam in camlist if cam.UniqueId != 0]
def runCommand(self,command):
"""
**SUMMARY**
Runs a PvAVT Command on the camera
Valid Commands include:
* FrameStartTriggerSoftware
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
* ConfigFileLoad
* ConfigFileSave
* TimeStampReset
* TimeStampValueLatch
**RETURNS**
0 on success
**EXAMPLE**
>>>c = AVTCamera()
>>>c.runCommand("TimeStampReset")
"""
return self.dll.PvCommandRun(self.handle,command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the AVT Camera attribute
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
Note that the error codes are currently ignored, so empty values
may be returned.
**EXAMPLE**
>>>c = AVTCamera()
>>>print c.getProperty("ExposureValue")
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
val = ''
err = 0
if valtype == "Enum":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrEnumGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Uint32":
val = ct.c_uint()
err = self.dll.PvAttrUint32Get(self.handle, name, ct.byref(val))
val = int(val.value)
elif valtype == "Float32":
val = ct.c_float()
err = self.dll.PvAttrFloat32Get(self.handle, name, ct.byref(val))
val = float(val.value)
elif valtype == "String":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrStringGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Boolean":
val = ct.c_bool()
err = self.dll.PvAttrBooleanGet(self.handle, name, ct.byref(val))
val = bool(val.value)
#TODO, handle error codes
return val
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented PvAVT attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = AVTCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureValue']
"""
props = {}
for p in self._properties.keys():
props[p] = self.getProperty(p)
return props
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the AVT Camera attribute.
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
By default, we will also refresh the height/width and bytes per
frame we're expecting -- you can manually bypass this if you want speed
Returns the raw PvAVT error code (0 = success)
**Example**
>>>c = AVTCamera()
>>>c.setProperty("ExposureValue", 30000)
>>>c.getImage().show()
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
if valtype == "Uint32":
err = self.dll.PvAttrUint32Set(self.handle, name, ct.c_uint(int(value)))
elif valtype == "Float32":
err = self.dll.PvAttrFloat32Set(self.handle, name, ct.c_float(float(value)))
elif valtype == "Enum":
err = self.dll.PvAttrEnumSet(self.handle, name, str(value))
elif valtype == "String":
err = self.dll.PvAttrStringSet(self.handle, name, str(value))
elif valtype == "Boolean":
err = self.dll.PvAttrBooleanSet(self.handle, name, ct.c_bool(bool(value)))
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return err
def getImage(self, timeout = 5000):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = AVTCamera()
>>>c.getImage().show()
"""
if self.frame != None:
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(self.frame), timeout) )
except Exception, e:
print "Exception waiting for frame:", e
print "Time taken:",time.time() - st
self.frame = None
raise(e)
img = self.unbuffer()
self.frame = None
return img
elif self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
self.runCommand("AcquisitionStart")
frame = self._getFrame(timeout)
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.runCommand("AcquisitionStop")
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('FrameStartTriggerMode','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','Continuous')
self.setProperty('FrameStartTriggerMode','FreeRun')
def unbuffer(self):
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
self.frame.ImageBuffer[:int(self.frame.ImageBufferSize)]))
return img
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.buffersize = self.getProperty("TotalBytesPerFrame")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self, timeout = 5000):
#return the AVTFrame object from the camera, timeout in ms
#need to multiply by bitdepth
try:
frame = self.AVTFrame(self.buffersize)
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(frame), None) )
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(frame), timeout) )
except Exception, e:
print "Exception waiting for frame:", e
print "Time taken:",time.time() - st
raise(e)
except Exception, e:
print "Exception aquiring frame:", e
raise(e)
return frame
def acquire(self):
self.frame = self.AVTFrame(self.buffersize)
try:
self.runCommand("AcquisitionStart")
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(self.frame), None) )
self.runCommand("AcquisitionStop")
except Exception, e:
print "Exception aquiring frame:", e
raise(e)
class GigECamera(Camera):
"""
GigE Camera driver via Aravis
"""
def __init__(self, camera_id = None, properties = {}, threaded = False):
try:
from gi.repository import Aravis
except:
print "GigE is supported by the Aravis library, download and build from https://github.com/sightmachine/aravis"
print "Note that you need to set GI_TYPELIB_PATH=$GI_TYPELIB_PATH:(PATH_TO_ARAVIS)/src for the GObject Introspection"
sys.exit()
self._cam = Aravis.Camera.new (None)
self._pixel_mode = "RGB"
if properties.get("mode", False):
self._pixel_mode = properties.pop("mode")
if self._pixel_mode == "gray":
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_MONO_8)
else:
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_BAYER_BG_8) #we'll use bayer (basler cams)
#TODO, deal with other pixel formats
if properties.get("roi", False):
roi = properties['roi']
self._cam.set_region(*roi)
#TODO, check sensor size
if properties.get("width", False):
#TODO, set internal function to scale results of getimage
pass
if properties.get("framerate", False):
self._cam.set_frame_rate(properties['framerate'])
self._stream = self._cam.create_stream (None, None)
payload = self._cam.get_payload()
self._stream.push_buffer(Aravis.Buffer.new_allocate (payload))
[x,y,width,height] = self._cam.get_region ()
self._height, self._width = height, width
def getImage(self):
camera = self._cam
camera.start_acquisition()
buff = self._stream.pop_buffer()
self.capturetime = buff.timestamp_ns / 1000000.0
img = np.fromstring(ct.string_at(buff.data_address(), buff.size), dtype = np.uint8).reshape(self._height, self._width)
rgb = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2BGR)
self._stream.push_buffer(buff)
camera.stop_acquisition()
#TODO, we should handle software triggering (separate capture and get image events)
return Image(rgb)
def getPropertyList(self):
l = [
'available_pixel_formats',
'available_pixel_formats_as_display_names',
'available_pixel_formats_as_strings',
'binning',
'device_id',
'exposure_time',
'exposure_time_bounds',
'frame_rate',
'frame_rate_bounds',
'gain',
'gain_bounds',
'height_bounds',
'model_name',
'payload',
'pixel_format',
'pixel_format_as_string',
'region',
'sensor_size',
'trigger_source',
'vendor_name',
'width_bounds'
]
return l
def getProperty(self, name = None):
'''
This function get's the properties availble to the camera
Usage:
> camera.getProperty('region')
> (0, 0, 128, 128)
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print "You need to provide a property, available properties are:"
print ""
for p in self.getPropertyList():
print p
return
stringval = "get_{}".format(name)
try:
return getattr(self._cam, stringval)()
except:
print 'Property {} does not appear to exist'.format(name)
return None
def setProperty(self, name = None, *args):
'''
This function sets the property available to the camera
Usage:
> camera.setProperty('region',(256,256))
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print "You need to provide a property, available properties are:"
print ""
for p in self.getPropertyList():
print p
return
if len(args) <= 0:
print "You must provide a value to set"
return
stringval = "set_{}".format(name)
try:
return getattr(self._cam, stringval)(*args)
except:
print 'Property {} does not appear to exist or value is not in correct format'.format(name)
return None
def getAllProperties(self):
'''
This function just prints out all the properties available to the camera
'''
for p in self.getPropertyList():
print "{}: {}".format(p,self.getProperty(p))
class VimbaCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(VimbaCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
img = self.camera._captureFrame(1000)
self.camera._buffer.appendleft(img)
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class VimbaCamera(FrameSource):
"""
**SUMMARY**
VimbaCamera is a wrapper for the Allied Vision cameras,
such as the "manta" series.
This requires the
1) Vimba SDK provided from Allied Vision
http://www.alliedvisiontec.com/us/products/software/vimba-sdk.html
2) Pyvimba Python library
TODO: <INSERT URL>
Note that as of time of writing, the VIMBA driver is not available
for Mac.
All camera properties are directly from the Vimba SDK manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = VimbaCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
def _setupVimba(self):
from pymba import Vimba
self._vimba = Vimba()
self._vimba.startup()
system = self._vimba.getSystem()
if system.GeVTLIsPresent:
system.runFeatureCommand("GeVDiscoveryAllOnce")
time.sleep(0.2)
def __del__(self):
#This function should disconnect from the Vimba Camera
if self._camera is not None:
if self.threaded:
self._thread.stop()
time.sleep(0.2)
if self._frame is not None:
self._frame.revokeFrame()
self._frame = None
self._camera.closeCamera()
self._vimba.shutdown()
def shutdown(self):
"""You must call this function if you are using threaded=true when you are finished
to prevent segmentation fault"""
# REQUIRED TO PREVENT SEGMENTATION FAULT FOR THREADED=True
if (self._camera):
self._camera.closeCamera()
self._vimba.shutdown()
def __init__(self, camera_id = -1, properties = {}, threaded = False):
if not VIMBA_ENABLED:
raise Exception("You don't seem to have the pymba library installed. This will make it hard to use a AVT Vimba Camera.")
self._vimba = None
self._setupVimba()
camlist = self.listAllCameras()
self._camTable = {}
self._frame = None
self._buffer = None # Buffer to store images
self._buffersize = 10 # Number of images to keep in the rolling image buffer for threads
self._lastimage = None # Last image loaded into memory
self._thread = None
self._framerate = 0
self.threaded = False
self._properties = {}
self._camera = None
i = 0
for cam in camlist:
self._camTable[i] = {'id': cam.cameraIdString}
i += 1
if not len(camlist):
raise Exception("Couldn't find any cameras with the Vimba driver. Use VimbaViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
if (camera_id > len(camlist)):
raise Exception("Couldn't find camera at index %d." % camera_id)
cam_guid = camlist[camera_id].cameraIdString
else:
raise Exception("Index %d is too large" % camera_id)
self._camera = self._vimba.getCamera(cam_guid)
self._camera.openCamera()
self.uniqueid = cam_guid
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("TriggerSource","Freerun")
# TODO: FIX
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
fmt = "RGB8Packed" # alternatively use BayerRG8
self.setProperty("PixelFormat", "BayerRG8")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of VimbaCamera objects, otherwise empty list
VimbaCamera objects are defined in the pymba module
"""
cameraIds = self._vimba.getCameraIds()
ar = []
for cameraId in cameraIds:
ar.append(self._vimba.getCamera(cameraId))
return ar
def runCommand(self,command):
"""
**SUMMARY**
Runs a Vimba Command on the camera
Valid Commands include:
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
**RETURNS**
0 on success
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.runCommand("TimeStampReset")
"""
return self._camera.runFeatureCommand(command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the Vimba Camera attribute
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property is not found or not implemented yet.
**EXAMPLE**
>>>c = VimbaCamera()
>>>print c.getProperty("ExposureMode")
"""
return self._camera.__getattr__(name)
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented Vimba attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = VimbaCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureMode']
"""
from pymba import VimbaException
# TODO
ar = {}
c = self._camera
cameraFeatureNames = c.getFeatureNames()
for name in cameraFeatureNames:
try:
ar[name] = c.__getattr__(name)
except VimbaException:
# Ignore features not yet implemented
pass
return ar
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the Vimba Camera attribute.
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property not found or not yet implemented
**Example**
>>>c = VimbaCamera()
>>>c.setProperty("ExposureAutoRate", 200)
>>>c.getImage().show()
"""
ret = self._camera.__setattr__(name, value)
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return ret
def getImage(self):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.getImage().show()
"""
if self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
img = self._captureFrame()
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Freerun')
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self):
if not self._frame:
self._frame = self._camera.getFrame() # creates a frame
self._frame.announceFrame()
return self._frame
def _captureFrame(self, timeout = 5000):
try:
c = self._camera
f = self._getFrame()
colorSpace = ColorSpace.BGR
if self.pixelformat == 'Mono8':
colorSpace = ColorSpace.GRAY
c.startCapture()
f.queueFrameCapture()
c.runFeatureCommand('AcquisitionStart')
c.runFeatureCommand('AcquisitionStop')
try:
f.waitFrameCapture(timeout)
except Exception, e:
print "Exception waiting for frame: %s: %s" % (e, traceback.format_exc())
raise(e)
imgData = f.getBufferByteData()
moreUsefulImgData = np.ndarray(buffer = imgData,
dtype = np.uint8,
shape = (f.height, f.width, 1))
rgb = cv2.cvtColor(moreUsefulImgData, cv2.COLOR_BAYER_RG2RGB)
c.endCapture()
return Image(rgb, colorSpace=colorSpace, cv2image=imgData)
except Exception, e:
print "Exception acquiring frame: %s: %s" % (e, traceback.format_exc())
raise(e)
|
HLTrigger/HLTanalyzers/test/test_hltrigreport_run_lumi.py | ckamtsikis/cmssw | 852 | 12752429 | from test_hltrigreport_base_cfg import process
process.hlTrigReport.resetBy = "run"
process.hlTrigReport.reportBy = "lumi"
|
source/vsm/vsm/api/views/poolusages.py | ramkrsna/virtual-storage-manager | 172 | 12752466 | <filename>source/vsm/vsm/api/views/poolusages.py<gh_stars>100-1000
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm.api import common
import logging
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
_collection_name = "poolusages"
def basic(self, poolusage):
if not poolusage.get('id', ""):
poolusage['id'] = ""
return poolusage
return {
"poolusage": {
"id": poolusage.get("id", 0),
"pool_id": poolusage.get("pool_id", ""),
"vsmapp_id": poolusage.get("vsmapp_id", ""),
"cinder_volume_host": poolusage.get("cinder_volume_host", ""),
"as_glance_store_pool": poolusage.get("as_glance_store_pool", ""),
"attach_status": poolusage.get("attach_status", ""),
"attach_at": poolusage.get("attach_at", "")
}
}
def index(self, poolusages):
"""Show a list of poolusages without many details."""
return self._list_view(self.basic, poolusages)
def _list_view(self, func, poolusages):
"""Provide a view for a list of poolusages."""
node_list = [func(poolusage)["poolusage"] for poolusage in poolusages]
nodes_dict = dict(poolusages=node_list)
return nodes_dict
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/ip_tag_py3.py | Mannan2812/azure-cli-extensions | 207 | 12752473 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IpTag(Model):
"""Contains the IpTag associated with the public IP address.
:param ip_tag_type: Gets or sets the ipTag type: Example FirstPartyUsage.
:type ip_tag_type: str
:param tag: Gets or sets value of the IpTag associated with the public IP.
Example SQL, Storage etc
:type tag: str
"""
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(self, *, ip_tag_type: str=None, tag: str=None, **kwargs) -> None:
super(IpTag, self).__init__(**kwargs)
self.ip_tag_type = ip_tag_type
self.tag = tag
|
web_programming/get_top_hn_posts.py | NavpreetDevpuri/Python | 145,614 | 12752476 | from __future__ import annotations
import requests
def get_hackernews_story(story_id: str) -> dict:
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(url).json()
def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
"""
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
"""
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
story_ids = requests.get(url).json()[:max_stories]
return [get_hackernews_story(story_id) for story_id in story_ids]
def hackernews_top_stories_as_markdown(max_stories: int = 10) -> str:
stories = hackernews_top_stories(max_stories)
return "\n".join("* [{title}]({url})".format(**story) for story in stories)
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.