content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
import unittest
from shutil import copyfile, copytree
from tempfile import TemporaryDirectory
import jmespath
import pytest
from parameterized import parameterized
from tests.helm_template_generator import render_chart
class PodTemplateFileTest(unittest.TestCase):
@classmethod
@pytest.fixture(autouse=True, scope="class")
def isolate_chart(cls):
with TemporaryDirectory() as tmp_dir:
cls.temp_chart_dir = tmp_dir + "/chart"
copytree(sys.path[0], cls.temp_chart_dir)
copyfile(
cls.temp_chart_dir + "/files/pod-template-file.kubernetes-helm-yaml",
cls.temp_chart_dir + "/templates/pod-template-file.yaml",
)
yield
def test_should_work(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert jmespath.search("spec.containers[0].image", docs[0]) is not None
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_should_add_an_init_container_if_git_sync_is_true(self):
docs = render_chart(
values={
"images": {
"gitSync": {
"repository": "test-registry/test-repo",
"tag": "test-tag",
"pullPolicy": "Always",
}
},
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"wait": 66,
"maxFailures": 70,
"subPath": "path1/path2",
"dest": "test-dest",
"root": "/git-root",
"rev": "HEAD",
"depth": 1,
"repo": "https://github.com/apache/airflow.git",
"branch": "test-branch",
"sshKeySecret": None,
"credentialsSecret": None,
"knownHosts": None,
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {
"name": "git-sync-test",
"securityContext": {"runAsUser": 65533},
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
"env": [
{"name": "GIT_SYNC_REV", "value": "HEAD"},
{"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
{"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
{"name": "GIT_SYNC_DEPTH", "value": "1"},
{"name": "GIT_SYNC_ROOT", "value": "/git-root"},
{"name": "GIT_SYNC_DEST", "value": "test-dest"},
{"name": "GIT_SYNC_ADD_USER", "value": "true"},
{"name": "GIT_SYNC_WAIT", "value": "66"},
{"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
{"name": "GIT_SYNC_ONE_TIME", "value": "true"},
],
"volumeMounts": [{"mountPath": "/git-root", "name": "dags"}],
} == jmespath.search("spec.initContainers[0]", docs[0])
def test_should_not_add_init_container_if_dag_persistence_is_true(self):
docs = render_chart(
values={
"dags": {
"persistence": {"enabled": True},
"gitSync": {"enabled": True},
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert jmespath.search("spec.initContainers", docs[0]) is None
@parameterized.expand(
[
({"gitSync": {"enabled": True}},),
({"persistence": {"enabled": True}},),
(
{
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
},
),
]
)
def test_dags_mount(self, dag_values):
docs = render_chart(
values={"dags": dag_values},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"mountPath": "/opt/airflow/dags", "name": "dags", "readOnly": True} in jmespath.search(
"spec.containers[0].volumeMounts", docs[0]
)
def test_dags_mount_with_gitsync_and_persistence(self):
docs = render_chart(
values={
"dags": {
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"mountPath": "/opt/airflow/dags", "name": "dags", "readOnly": True} in jmespath.search(
"spec.containers[0].volumeMounts", docs[0]
)
def test_validate_if_ssh_params_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": None,
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.volumes", docs[0])
def test_validate_if_ssh_known_hosts_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": "github.com ssh-rsa AAAABdummy",
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "git-sync-known-hosts",
"configMap": {"defaultMode": 288, "name": "RELEASE-NAME-airflow-config"},
} in jmespath.search("spec.volumes", docs[0])
assert {
"name": "git-sync-known-hosts",
"mountPath": "/etc/git-secret/known_hosts",
"subPath": "known_hosts",
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_should_set_username_and_pass_env_variables(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"credentialsSecret": "user-pass-secret",
"sshKeySecret": None,
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "GIT_SYNC_USERNAME",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "GIT_SYNC_PASSWORD",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_should_set_the_dags_volume_claim_correctly_when_using_an_existing_claim(self):
docs = render_chart(
values={"dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
"spec.volumes", docs[0]
)
def test_should_use_empty_dir_for_gitsync_without_persistence(self):
docs = render_chart(
values={"dags": {"gitSync": {"enabled": True}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "emptyDir": {}} in jmespath.search("spec.volumes", docs[0])
@parameterized.expand(
[
({"enabled": False}, {"emptyDir": {}}),
({"enabled": True}, {"persistentVolumeClaim": {"claimName": "RELEASE-NAME-logs"}}),
(
{"enabled": True, "existingClaim": "test-claim"},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
]
)
def test_logs_persistence_changes_volume(self, log_persistence_values, expected):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "logs", **expected} in jmespath.search("spec.volumes", docs[0])
def test_should_set_a_custom_image_in_pod_template(self):
docs = render_chart(
values={"images": {"pod_template": {"repository": "dummy_image", "tag": "latest"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "dummy_image:latest" == jmespath.search("spec.containers[0].image", docs[0])
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_mount_airflow_cfg(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'} == jmespath.search(
"spec.volumes[1]", docs[0]
)
assert {
'name': 'config',
'mountPath': '/opt/airflow/airflow.cfg',
'subPath': 'airflow.cfg',
'readOnly': True,
} == jmespath.search("spec.containers[0].volumeMounts[1]", docs[0])
def test_should_create_valid_affinity_and_node_selector(self):
docs = render_chart(
values={
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
def test_should_add_fsgroup_to_the_pod_template(self):
docs = render_chart(
values={"gid": 5000},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
self.assertEqual(5000, jmespath.search("spec.securityContext.fsGroup", docs[0]))
def test_should_create_valid_volume_mount_and_volume(self):
docs = render_chart(
values={
"workers": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
"extraVolumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "test-volume" == jmespath.search(
"spec.volumes[2].name",
docs[0],
)
assert "test-volume" == jmespath.search(
"spec.containers[0].volumeMounts[2].name",
docs[0],
)
def test_should_add_env_for_gitsync(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"env": [{"name": "FOO", "value": "bar"}],
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "FOO", "value": "bar"} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_no_airflow_local_settings_by_default(self):
docs = render_chart(show_only=["templates/pod-template-file.yaml"], chart_dir=self.temp_chart_dir)
volume_mounts = jmespath.search("spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_airflow_pod_annotations(self):
docs = render_chart(
values={"airflowPodAnnotations": {"my_annotation": "annotated!"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
annotations = jmespath.search("metadata.annotations", docs[0])
assert "my_annotation" in annotations
assert "annotated!" in annotations["my_annotation"]
| 38.553488 | 107 | 0.519725 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | MatrixManAtYrService/airflow | chart/tests/test_pod_template_file.py | 16,578 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a Convolutional Network for Mobile GPU
==================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_
Auto-tuning for a specific device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for Mobile GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will tune all convolution, depthwise convolution and dense operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the TVM compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_
to see the results.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_executor as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
#################################################################
# .. _tutorials-autotvm-start-rpc-tracker:
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install TVM RPC APK on the android device. Make sure you can pass the android RPC test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu")
# Also replace this with the device key in your tracker
device_key = "rk3399"
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.%s.log" % (device_key, network)
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 1000,
"early_stopping": 450,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
runner=autotvm.RPCRunner(
device_key,
host="127.0.0.1",
port=9190,
number=10,
timeout=5,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
# If your device runs very slow or your conv2d operators have many GFLOPs, considering to
# set timeout larger.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, _ = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
ops=(relay.op.get("nn.conv2d"),),
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
dev = remote.device(str(target), 0)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=30)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done.
# [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done.
# [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done.
# [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done.
# [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done.
# [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done.
# [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done.
# [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done.
# [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done.
# [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done.
# [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done.
# [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done.
# [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done.
# [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done.
# [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done.
# [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done.
# [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 128.05 ms (7.74 ms)
#
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 38.790476 | 100 | 0.631169 | [
"Apache-2.0"
] | HemiMin/tvm | tutorials/autotvm/tune_relay_mobile_gpu.py | 16,292 | Python |
from __future__ import absolute_import
import abc
class Database(object):
__metaclass__ = abc.ABCMeta
FIELD_FILE_SHA1 = 'file_sha1'
FIELD_SONG_ID = 'song_id'
FIELD_SONGNAME = 'song_name'
FIELD_OFFSET = 'offset'
FIELD_HASH = 'hash'
# Name of your Database subclass, this is used in configuration
# to refer to your class
type = None
def __init__(self):
super(Database, self).__init__()
def before_fork(self):
"""
Called before the database instance is given to the new process
"""
pass
def after_fork(self):
"""
Called after the database instance has been given to the new process
This will be called in the new process.
"""
pass
def setup(self):
"""
Called on creation or shortly afterwards.
"""
pass
@abc.abstractmethod
def empty(self):
"""
Called when the database should be cleared of all data.
"""
pass
@abc.abstractmethod
def delete_unfingerprinted_songs(self):
"""
Called to remove any song entries that do not have any fingerprints
associated with them.
"""
pass
@abc.abstractmethod
def get_num_songs(self):
"""
Returns the amount of songs in the database.
"""
pass
@abc.abstractmethod
def get_num_fingerprints(self):
"""
Returns the number of fingerprints in the database.
"""
pass
@abc.abstractmethod
def set_song_fingerprinted(self, sid):
"""
Sets a specific song as having all fingerprints in the database.
sid: Song identifier
"""
pass
@abc.abstractmethod
def get_songs(self):
"""
Returns all fully fingerprinted songs in the database.
"""
pass
@abc.abstractmethod
def get_song_by_id(self, sid):
"""
Return a song by its identifier
sid: Song identifier
"""
pass
@abc.abstractmethod
def insert(self, hash, sid, offset):
"""
Inserts a single fingerprint into the database.
hash: Part of a sha1 hash, in hexadecimal format
sid: Song identifier this fingerprint is off
offset: The offset this hash is from
"""
pass
@abc.abstractmethod
def insert_song(self, song_name):
"""
Inserts a song name into the database, returns the new
identifier of the song.
song_name: The name of the song.
"""
pass
@abc.abstractmethod
def query(self, hash):
"""
Returns all matching fingerprint entries associated with
the given hash as parameter.
hash: Part of a sha1 hash, in hexadecimal format
"""
pass
@abc.abstractmethod
def get_iterable_kv_pairs(self):
"""
Returns all fingerprints in the database.
"""
pass
@abc.abstractmethod
def insert_hashes(self, sid, hashes):
"""
Insert a multitude of fingerprints.
sid: Song identifier the fingerprints belong to
hashes: A sequence of tuples in the format (hash, offset)
- hash: Part of a sha1 hash, in hexadecimal format
- offset: Offset this hash was created from/at.
"""
pass
@abc.abstractmethod
def return_matches(self, hashes):
"""
Searches the database for pairs of (hash, offset) values.
hashes: A sequence of tuples in the format (hash, offset)
- hash: Part of a sha1 hash, in hexadecimal format
- offset: Offset this hash was created from/at.
Returns a sequence of (sid, offset_difference) tuples.
sid: Song identifier
offset_difference: (offset - database_offset)
"""
pass
def get_database(database_type=None):
# Default to using the mysql database
database_type = database_type or "mysql"
# Lower all the input.
database_type = database_type.lower()
for db_cls in Database.__subclasses__():
if db_cls.type == database_type:
return db_cls
raise TypeError("Unsupported database type supplied.")
# Import our default database handler
# MySQL
# import dejavu.database_sql
# SQLite
import dejavu.database_sqlite
| 24.461111 | 76 | 0.605042 | [
"MIT"
] | KundanGaira/dejavu | dejavu/database.py | 4,403 | Python |
import pytest
from unittest.mock import patch
from collections import Counter
from bigchaindb.core import Bigchain
from bigchaindb.exceptions import CriticalDuplicateVote
from bigchaindb.voting import Voting, INVALID, VALID, UNDECIDED
################################################################################
# Tests for checking vote eligibility
def test_partition_eligible_votes():
class TestVoting(Voting):
@classmethod
def verify_vote_signature(cls, vote):
if vote['node_pubkey'] == 'invalid sig':
return False
if vote['node_pubkey'] == 'value error':
raise ValueError()
return True
voters = ['valid', 'invalid sig', 'value error', 'not in set']
votes = [{'node_pubkey': k} for k in voters]
el, inel = TestVoting.partition_eligible_votes(votes, voters[:-1])
assert el == [votes[0]]
assert inel == votes[1:]
################################################################################
# Test vote counting
def test_count_votes():
class TestVoting(Voting):
@classmethod
def verify_vote_schema(cls, vote):
return vote['node_pubkey'] != 'malformed'
voters = (['says invalid', 'malformed'] +
['kosher' + str(i) for i in range(10)])
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
votes[0]['vote']['is_block_valid'] = False
# Incorrect previous block subtracts from n_valid and adds to n_invalid
votes[-1]['vote']['previous_block'] = 'z'
by_voter = dict(enumerate(votes))
assert TestVoting.count_votes(by_voter) == {
'counts': {
'n_valid': 9, # 9 kosher votes
'n_invalid': 3, # 1 invalid, 1 malformed, 1 rogue prev block
},
'malformed': [votes[1]],
'previous_block': 'a',
'other_previous_block': {'z': 1},
}
def test_must_agree_prev_block():
class TestVoting(Voting):
@classmethod
def verify_vote_schema(cls, vote):
return True
voters = 'abcd'
votes = [Bigchain(v).vote('block', 'a', True) for v in voters]
votes[0]['vote']['previous_block'] = 'b'
votes[1]['vote']['previous_block'] = 'c'
by_voter = dict(enumerate(votes))
assert TestVoting.count_votes(by_voter) == {
'counts': {
'n_valid': 2,
'n_invalid': 2,
},
'previous_block': 'a',
'other_previous_block': {'b': 1, 'c': 1},
'malformed': [],
}
################################################################################
# Tests for vote decision making
DECISION_TESTS = [
{'n_voters': 1, 'n_valid': 1, 'n_invalid': 1},
{'n_voters': 2, 'n_valid': 2, 'n_invalid': 1},
{'n_voters': 3, 'n_valid': 2, 'n_invalid': 2},
{'n_voters': 4, 'n_valid': 3, 'n_invalid': 2},
{'n_voters': 5, 'n_valid': 3, 'n_invalid': 3},
{'n_voters': 6, 'n_valid': 4, 'n_invalid': 3},
{'n_voters': 7, 'n_valid': 4, 'n_invalid': 4},
{'n_voters': 8, 'n_valid': 5, 'n_invalid': 4}
]
@pytest.mark.parametrize('kwargs', DECISION_TESTS)
def test_decide_votes_valid(kwargs):
kwargs = kwargs.copy()
kwargs['n_invalid'] = 0
assert Voting.decide_votes(**kwargs) == VALID
kwargs['n_valid'] -= 1
assert Voting.decide_votes(**kwargs) == UNDECIDED
@pytest.mark.parametrize('kwargs', DECISION_TESTS)
def test_decide_votes_invalid(kwargs):
kwargs = kwargs.copy()
kwargs['n_valid'] = 0
assert Voting.decide_votes(**kwargs) == INVALID
kwargs['n_invalid'] -= 1
assert Voting.decide_votes(**kwargs) == UNDECIDED
################################################################################
# Actions - test state transitions
@pytest.mark.parametrize('n_voters', range(8))
def test_vote_actions(n_voters):
"""* Legal transitions are UNDECIDED -> [VALID|INVALID] only
* Block is never left UNDECIDED after voting
* Accomodates rogues on previous block / invalid schema
"""
class TestVoting(Voting):
@classmethod
def verify_vote_schema(cls, vote):
return type(vote['vote']['is_block_valid']) == bool
@classmethod
def verify_vote_signature(cls, vote):
return True
keyring = 'abcdefghijklmnopqrstuvwxyz'[:n_voters]
block = {'id': 'block', 'block': {'voters': keyring}}
state = UNDECIDED
todo = [(state, [], [])]
def branch(p, r):
todo.append((state, votes, votes + [{
'node_pubkey': keyring[len(votes)],
'vote': {'previous_block': p, 'is_block_valid': r}
}]))
while todo:
prev_state, prev_votes, votes = todo.pop(0)
results = Counter(v['vote']['is_block_valid'] for v in votes)
prev_blocks = Counter(v['vote']['previous_block'] for v in votes)
majority = n_voters // 2 + 1
honest = (len(votes) == majority and len(prev_blocks) == 1 and
not results['lol'] and len(results) == 1)
closed = len(votes) == n_voters
# Test legal transition
if votes:
state = TestVoting.block_election(block, votes, keyring)['status']
assert prev_state in [state, UNDECIDED]
# Test that decision has been reached
if honest or closed:
assert state != UNDECIDED or n_voters == 0
if closed:
continue
# Can accomodate more votes, add them to the todo list.
# This vote is the good case
branch('A', True)
# This vote disagrees on previous block
branch('B', True)
# This vote says the block is invalid
branch('A', False)
# This vote is invalid
branch('A', 'lol')
################################################################################
# Tests for vote signature
def test_verify_vote_signature_passes(b):
vote = b.vote('block', 'a', True)
assert Voting.verify_vote_signature(vote)
vote['signature'] = ''
assert not Voting.verify_vote_signature(vote)
################################################################################
# Tests for vote schema
def test_verify_vote_schema(b):
vote = b.vote('b' * 64, 'a' * 64, True)
assert Voting.verify_vote_schema(vote)
vote = b.vote('b' * 64, 'a', True)
assert not Voting.verify_vote_schema(vote)
vote = b.vote('b', 'a' * 64, True)
assert not Voting.verify_vote_schema(vote)
################################################################################
# block_election tests
def test_block_election(b):
class TestVoting(Voting):
@classmethod
def verify_vote_signature(cls, vote):
return True
@classmethod
def verify_vote_schema(cls, vote):
return True
keyring = 'abc'
block = {'id': 'xyz', 'block': {'voters': 'ab'}}
votes = [{
'node_pubkey': c,
'vote': {'is_block_valid': True, 'previous_block': 'a'}
} for c in 'abc']
assert TestVoting.block_election(block, votes, keyring) == {
'status': VALID,
'block_id': 'xyz',
'counts': {'n_valid': 2, 'n_invalid': 0},
'ineligible': [votes[-1]],
'malformed': [],
'previous_block': 'a',
'other_previous_block': {},
}
@patch('bigchaindb.voting.Voting.verify_vote_signature', return_value=True)
def test_duplicate_vote_throws_critical_error(b):
keyring = 'abc'
block = {'id': 'xyz', 'block': {'voters': 'ab'}}
votes = [{
'node_pubkey': c,
'vote': {'is_block_valid': True, 'previous_block': 'a'}
} for c in 'aabc']
with pytest.raises(CriticalDuplicateVote):
Voting.block_election(block, votes, keyring)
| 31.084677 | 80 | 0.558827 | [
"Apache-2.0"
] | RiddleAndCode/bigchaindb | tests/test_voting.py | 7,709 | Python |
#!/usr/bin/env python
# ref: Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun"Deep Residual Learning for Image Recognition"
# https://arxiv.org/pdf/1512.03385v1.pdf
#
ResNetConfig={
"16":[ "16", "small", 1, 1, 1, 1],
"18":[ "18", "small", 2, 2, 2, 2],
"34":[ "34", "small", 3, 4, 6, 3],
"50":[ "50", "large", 3, 4, 6, 3],
"101":["101", "large", 3, 4, 23, 3],
"152":["152", "large", 3, 8, 36, 3]
}
def genDataLayer(train_val, number):
layer_str = '''name: "Resnet{number}"
layer {{
name: "data"
type: "Data"
top: "data"
top: "label"
data_param {{
source: "examples/imagenet/ilsvrc12_train_lmdb"
backend: LMDB
batch_size: 32
}}
transform_param {{
crop_size: 224
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}}
include: {{ phase: TRAIN }}
}}
layer {{
name: "data"
type: "Data"
top: "data"
top: "label"
data_param {{
source: "examples/imagenet/ilsvrc12_val_lmdb"
backend: LMDB
batch_size: 32
}}
transform_param {{
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
crop_size: 224
mirror: false
}}
include: {{ phase: TEST }}
}}'''.format(number=number)
train_val += layer_str
return train_val, "data"
def genConvLayer(train_val, name, bottom, kernel_size, num_output, stride, pad, bias_term=False,filler="msra"):
layer_str = '''
layer {{
name: "{name}"
type: "Convolution"
bottom: "{bottom}"
top: "{top}"
convolution_param {{
num_output: {num_output}
kernel_size: {kernel_size}
stride: {stride}
pad: {pad}
weight_filler {{
type: "{weight_filler_type}"
std: 0.010
}}'''.format(name=name, top=name, bottom=bottom, kernel_size=kernel_size,
num_output=num_output, pad=pad, stride=stride, weight_filler_type=filler)
if bias_term:
layer_str = layer_str + \
'''
bias_filler {
type: "constant"
value: 0
}
}
}'''
else :
layer_str = layer_str + \
'''
bias_term: false
}
}'''
train_val += layer_str
return train_val, name
def genBNLayer(train_val, name, bottom, top=None):
top = name if top is None else top
layer_str = '''
layer {{
name: "{name}"
type: "BatchNorm"
bottom: "{bottom}"
top: "{top}"
batch_norm_param {{
moving_average_fraction: 0.9
eps: 0.0001
scale_bias: true
}}
}}'''.format(name=name, top=top, bottom=bottom)
train_val += layer_str
return train_val, top
# def genScaleLayer(train_val, name, bottom):
# layer_str = '''
# layer {{
# name: "{name}"
# type: "Scale"
# top: "{top}"
# bottom: "{bottom}"
# scale_param {{
# bias_term: true # TODO
# }}
# }}'''.format(name=name, top=bottom, bottom=bottom)
# train_val += layer_str
# return train_val, bottom
def genActivationLayer(train_val, name, bottom, type="ReLU"):
layer_str = '''
layer {{
name: "{name}"
type: "{type}"
bottom: "{bottom}"
top: "{top}"
}}'''.format(name=name, top=bottom, bottom=bottom, type=type)
train_val += layer_str
return train_val, bottom
def genConvBnLayer(train_val, name, bottom, kernel_size, num_output, stride, pad, filler="msra"):
train_val, last_top = genConvLayer(train_val=train_val, name=name, bottom=bottom,
kernel_size=kernel_size, num_output=num_output, stride=stride, pad=pad, bias_term=False,filler=filler)
train_val, last_top = genBNLayer(train_val=train_val, name="{name}_bn".format(name=name), bottom=last_top)
return train_val, last_top
def genConvBnReluLayer(train_val, name, bottom, kernel_size, num_output, stride, pad, filler="msra", activation_type="ReLU"):
train_val, last_top = genConvBnLayer(train_val=train_val, name=name, bottom=bottom,
kernel_size=kernel_size, num_output=num_output, stride=stride, pad=pad, filler=filler)
train_val, last_top = genActivationLayer(train_val=train_val, name="{name}_relu".format(name=name), bottom=last_top, type=activation_type)
return train_val, last_top
def genBnReluLayer(train_val, name, bottom, activation_type="ReLU"):
train_val, last_top = genBNLayer(train_val=train_val, name="{name}bn".format(name=name), bottom=bottom, top="{name}bn".format(name=name))
train_val, last_top = genActivationLayer(train_val=train_val, name="{name}relu".format(name=name), bottom=last_top, type=activation_type)
return train_val, last_top
def genPoolLayer(train_val, name, bottom, kernel_size, stride, pool_type):
layer_str = '''
layer {{
name: "{name}"
type: "Pooling"
bottom: "{bottom}"
top: "{top}"
pooling_param {{
pool: {pool_type}
kernel_size: {kernel_size}
stride: {stride}
}}
}}'''.format(name=name, top=name, bottom=bottom, pool_type=pool_type, kernel_size=kernel_size, stride=stride)
train_val += layer_str
return train_val, name
def genFCLayer(train_val, name, bottom, num_output, filler="gaussian"):
layer_str = '''
layer {{
name: "{name}"
type: "InnerProduct"
bottom: "{bottom}"
top: "{top}"
inner_product_param {{
num_output: {num_output}
weight_filler {{
type: "{weight_filler_type}"
std: 0.01
}}
bias_filler {{
type: "constant"
value: 0
}}
}}
}}'''.format(name=name, top=name, bottom=bottom, num_output=num_output, weight_filler_type=filler)
train_val += layer_str
return train_val, name
def genEltwiseLayer(train_val, name, bottom_1, bottom_2, operation="SUM"):
layer_str = '''
layer {{
name: "{name}"
type: "Eltwise"
bottom: "{bottom_1}"
bottom: "{bottom_2}"
top: "{top}"
eltwise_param {{
operation: {operation}
}}
}}'''.format(name=name, top=name, bottom_1=bottom_1, bottom_2=bottom_2, operation=operation)
train_val += layer_str
return train_val, name
def genSoftmaxLossLayer(train_val, name, bottom_1, bottom_2="label"):
layer_str = '''
layer {{
name: "{name}"
type: "SoftmaxWithLoss"
bottom: "{bottom_1}"
bottom: "{bottom_2}"
top: "{top}"
}}'''.format(name=name, top=name, bottom_1=bottom_1, bottom_2=bottom_2)
train_val += layer_str
return train_val, name
def genAccuracyLayer(train_val, name, bottom_1, bottom_2="label", k=1):
layer_str='''
layer {{
type: "Accuracy"
name: "{name}"
bottom: "{bottom_1}"
bottom: "{bottom_2}"
top: "{top}"
accuracy_param {{
top_k: {k}
}}
include {{ phase: TEST }}
}}'''.format(name=name, top=name, bottom_1=bottom_1, bottom_2=bottom_2, k=k)
train_val += layer_str
return train_val, name
def addComment(train_val, comment):
train_val += "\n#\n# {comment}\n#".format(comment=comment)
return train_val
def digit_to_char(digit):
return chr(ord('A') + digit)
def str_base(number, base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digit_to_char(m)
return (digit_to_char(m).lower())
def genRes2(train_val, last_top, small, i, fix_dim):
# prefix="res2.{i}_".format(i=str_base(i-1, 26))
prefix = "res2.{i}.".format(i=str(i))
branch_str=""
res_last_top=last_top
branch_last_top=last_top
if small:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=64, stride=1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=64, stride=1, pad=1)
else:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=64, stride=1, pad=0)
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=64, stride=1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv3'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=256, stride=1, pad=0)
if small==False:
branch_str, res_last_top = genConvBnLayer(train_val=branch_str, name='{}skipConv'.format(prefix), bottom=res_last_top,
kernel_size=1, num_output=64 if small else 256, stride=1, pad=0)
branch_str, last_top = genEltwiseLayer(train_val=branch_str, name='{}eltwise'.format(prefix),
bottom_1=branch_last_top, bottom_2=res_last_top, operation="SUM")
branch_str, last_top = genActivationLayer(train_val=branch_str, name="{}relu".format(prefix), bottom=last_top)
train_val += branch_str
return train_val, last_top
def genRes3(train_val, last_top, small, i, fix_dim):
# prefix="res3{i}_".format(i=str_base(i-1, 26))
prefix="res3.{i}.".format(i=str(i))
branch_str=""
res_last_top=last_top
branch_last_top=last_top
if small:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=128, stride=2 if i==1 else 1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=128, stride=1, pad=1)
else:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=128, stride=2 if i==1 else 1, pad=0)
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=128, stride=1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv3'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=512, stride=1, pad=0)
if fix_dim:
branch_str, res_last_top = genConvBnLayer(train_val=branch_str, name='{}skipConv'.format(prefix), bottom=res_last_top,
kernel_size=1, num_output=128 if small else 512, stride=2, pad=0)
branch_str, last_top = genEltwiseLayer(train_val=branch_str, name='{}eltwise'.format(prefix),
bottom_1=branch_last_top, bottom_2=res_last_top, operation="SUM")
branch_str, last_top = genActivationLayer(train_val=branch_str, name="{}relu".format(prefix), bottom=last_top)
train_val += branch_str
return train_val, last_top
def genRes4(train_val, last_top, small, i, fix_dim):
# prefix="res4{i}_".format(i=str_base(i-1, 26))
prefix="res4.{i}.".format(i=str(i))
branch_str=""
res_last_top=last_top
branch_last_top=last_top
if small:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=256, stride=2 if i==1 else 1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=256, stride=1, pad=1)
else:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=256, stride=2 if i==1 else 1, pad=0)
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=256, stride=1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv3'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=1024, stride=1, pad=0)
if fix_dim:
branch_str, res_last_top = genConvBnLayer(train_val=branch_str, name='{}skipConv'.format(prefix), bottom=res_last_top,
kernel_size=1, num_output=256 if small else 1024, stride=2, pad=0)
branch_str, last_top = genEltwiseLayer(train_val=branch_str, name='{}eltwise'.format(prefix),
bottom_1=branch_last_top, bottom_2=res_last_top, operation="SUM")
branch_str, last_top = genActivationLayer(train_val=branch_str, name="{}relu".format(prefix), bottom=last_top)
train_val += branch_str
return train_val, last_top
def genRes5(train_val, last_top, small, i, fix_dim):
# prefix="res5{i}_".format(i=str_base(i-1, 26))
prefix="res5.{i}.".format(i=str(i))
branch_str=""
res_last_top=last_top
branch_last_top=last_top
if small:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=512, stride=2 if i==1 else 1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=512, stride=1, pad=1)
else:
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv1'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=512, stride=2 if i==1 else 1, pad=0)
branch_str, branch_last_top = genConvBnReluLayer(train_val=branch_str, name='{}conv2'.format(prefix), bottom=branch_last_top,
kernel_size=3, num_output=512, stride=1, pad=1)
branch_str, branch_last_top = genConvBnLayer(train_val=branch_str, name='{}conv3'.format(prefix), bottom=branch_last_top,
kernel_size=1, num_output=2048, stride=1, pad=0)
if fix_dim:
branch_str, res_last_top = genConvBnLayer(train_val=branch_str, name='{}skipConv'.format(prefix), bottom=res_last_top,
kernel_size=1, num_output=512 if small else 2048, stride=2, pad=0)
branch_str, last_top = genEltwiseLayer(train_val=branch_str, name='{}eltwise'.format(prefix),
bottom_1=branch_last_top, bottom_2=res_last_top, operation="SUM")
branch_str, last_top = genActivationLayer(train_val=branch_str, name="{}relu".format(prefix), bottom=last_top)
train_val += branch_str
return train_val, last_top
def genTrainVal(network):
train_val = ""
train_val, last_top = genDataLayer(train_val=train_val, number=network[0])
train_val = addComment(train_val=train_val, comment="Res1")
train_val, last_top = genConvBnReluLayer(train_val=train_val, name="conv1", bottom="data", kernel_size=7, num_output=64, stride=2, pad=3)
train_val, last_top = genPoolLayer(train_val=train_val, name="pool1", bottom=last_top, kernel_size=3, stride=2, pool_type="MAX")
train_val = addComment(train_val=train_val, comment="ResBlock2")
for i in xrange(1, network[2]+1):
train_val, last_top = genRes2(train_val=train_val, last_top=last_top, small=network[1] is "small", i=i, fix_dim=False)
train_val = addComment(train_val=train_val, comment="ResBlock3")
for i in xrange(1, network[3]+1):
train_val, last_top = genRes3(train_val=train_val, last_top=last_top, small=network[1] is "small", i=i, fix_dim=i==1)
train_val = addComment(train_val=train_val, comment="ResBlock4")
for i in xrange(1, network[4]+1):
train_val, last_top = genRes4(train_val=train_val, last_top=last_top, small=network[1] is "small", i=i, fix_dim=i==1)
train_val = addComment(train_val=train_val, comment="ResBlock5")
for i in xrange(1, network[5]+1):
train_val, last_top = genRes5(train_val=train_val, last_top=last_top, small=network[1] is "small", i=i, fix_dim=i==1)
train_val, last_top = genPoolLayer(train_val=train_val, name="pool2", bottom=last_top, kernel_size=7, stride=1, pool_type="AVE")
train_val, last_top = genFCLayer (train_val=train_val, name="fc", bottom=last_top, num_output=1000, filler='msra')
fc_top = last_top
train_val, last_top = genSoftmaxLossLayer(train_val=train_val, name="loss", bottom_1=fc_top)
train_val, last_top = genAccuracyLayer(train_val=train_val, name="accuracy/top-1", bottom_1=fc_top, k=1)
train_val, last_top = genAccuracyLayer(train_val=train_val, name="accuracy/top-5", bottom_1=fc_top, k=5)
return train_val
def main():
for net in ResNetConfig.keys():
network_str = genTrainVal(ResNetConfig[net])
# with open("./models/train_val_{}.prototxt".format(net), 'w') as fp:
# fp.write(network_str)
fp = open("./models/train_val_{}.prototxt".format(net), 'w')
fp.write(network_str)
if __name__ == '__main__':
main()
| 37.737931 | 142 | 0.688109 | [
"BSD-2-Clause"
] | 103yiran/caffe | models/resnet18/ResNet_Generator.py | 16,416 | Python |
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.gaussian_and_epislon import \
GaussianAndEpislonStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
# from rlkit.torch.td3.td3 import TD3
from rlkit.demos.td3_bc import TD3BCTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
# from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv
# from multiworld.envs.mujoco.sawyer_xyz.sawyer_reach import SawyerReachXYZEnv
from multiworld.core.image_env import ImageEnv
from multiworld.envs.real_world.sawyer.sawyer_reaching import SawyerReachXYZEnv
# from sawyer_control.envs.sawyer_reaching import SawyerReachXYZEnv
from rlkit.launchers.arglauncher import run_variants
import rlkit.misc.hyperparameter as hyp
# from rlkit.launchers.experiments.ashvin.rfeatures.rfeatures_trainer import TimePredictionTrainer
from rlkit.launchers.experiments.ashvin.rfeatures.rfeatures_rl import encoder_wrapped_td3bc_experiment
if __name__ == "__main__":
variant = dict(
env_class=SawyerReachXYZEnv,
env_kwargs=dict(
action_mode="position",
max_speed = 0.05,
camera="sawyer_head"
),
# algo_kwargs=dict(
# num_epochs=3000,
# max_path_length=20,
# batch_size=128,
# num_eval_steps_per_epoch=1000,
# num_expl_steps_per_train_loop=1000,
# num_trains_per_train_loop=1000,
# min_num_steps_before_training=1000,
# ),
algo_kwargs=dict(
num_epochs=500,
max_path_length=100,
batch_size=128,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=500,
min_num_steps_before_training=0,
),
model_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
input_channels=3,
imsize=224,
architecture=dict(
hidden_sizes=[200, 200],
),
delta_features=True,
pretrained_features=False,
),
trainer_kwargs=dict(
discount=0.99,
demo_path="/home/lerrel/ros_ws/src/railrl-private/demo_v4_processed.npy",
add_demo_latents=False, # already done
bc_num_pretrain_steps=1000,
rl_weight=0.0,
bc_weight=1.0,
weight_decay=0.001,
),
replay_buffer_kwargs=dict(
max_size=100000,
fraction_goals_rollout_goals=1.0,
fraction_goals_env_goals=0.0,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[32, 32],
),
save_video=True,
dump_video_kwargs=dict(
save_period=1,
# imsize=(3, 500, 300),
),
desired_trajectory="demo_v4.npy",
snapshot_mode="all",
)
search_space = {
'seedid': range(1),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(encoder_wrapped_td3bc_experiment, variants, run_id=1)
| 34.719626 | 102 | 0.676716 | [
"MIT"
] | Asap7772/railrl_evalsawyer | experiments/ashvin/rfeatures/sawyer/door/bc3.py | 3,715 | Python |
import os
import platform
import re
import sys
from contextlib import suppress
from pathlib import Path
from loguru import logger
from sc2 import wsl
BASEDIR = {
"Windows": "C:/Program Files (x86)/StarCraft II",
"WSL1": "/mnt/c/Program Files (x86)/StarCraft II",
"WSL2": "/mnt/c/Program Files (x86)/StarCraft II",
"Darwin": "/Applications/StarCraft II",
"Linux": "~/StarCraftII",
"WineLinux": "~/.wine/drive_c/Program Files (x86)/StarCraft II",
}
USERPATH = {
"Windows": "Documents\\StarCraft II\\ExecuteInfo.txt",
"WSL1": "Documents/StarCraft II/ExecuteInfo.txt",
"WSL2": "Documents/StarCraft II/ExecuteInfo.txt",
"Darwin": "Library/Application Support/Blizzard/StarCraft II/ExecuteInfo.txt",
"Linux": None,
"WineLinux": None,
}
BINPATH = {
"Windows": "SC2_x64.exe",
"WSL1": "SC2_x64.exe",
"WSL2": "SC2_x64.exe",
"Darwin": "SC2.app/Contents/MacOS/SC2",
"Linux": "SC2_x64",
"WineLinux": "SC2_x64.exe",
}
CWD = {
"Windows": "Support64",
"WSL1": "Support64",
"WSL2": "Support64",
"Darwin": None,
"Linux": None,
"WineLinux": "Support64",
}
def platform_detect():
pf = os.environ.get("SC2PF", platform.system())
if pf == "Linux":
return wsl.detect() or pf
return pf
PF = platform_detect()
def get_home():
"""Get home directory of user, using Windows home directory for WSL."""
if PF in {"WSL1", "WSL2"}:
return wsl.get_wsl_home() or Path.home().expanduser()
return Path.home().expanduser()
def get_user_sc2_install():
"""Attempts to find a user's SC2 install if their OS has ExecuteInfo.txt"""
if USERPATH[PF]:
einfo = str(get_home() / Path(USERPATH[PF]))
if os.path.isfile(einfo):
with open(einfo) as f:
content = f.read()
if content:
base = re.search(r" = (.*)Versions", content).group(1)
if PF in {"WSL1", "WSL2"}:
base = str(wsl.win_path_to_wsl_path(base))
if os.path.exists(base):
return base
return None
def get_env():
# TODO: Linux env conf from: https://github.com/deepmind/pysc2/blob/master/pysc2/run_configs/platforms.py
return None
def get_runner_args(cwd):
if "WINE" in os.environ:
runner_file = Path(os.environ.get("WINE"))
runner_file = runner_file if runner_file.is_file() else runner_file / "wine"
"""
TODO Is converting linux path really necessary?
That would convert
'/home/burny/Games/battlenet/drive_c/Program Files (x86)/StarCraft II/Support64'
to
'Z:\\home\\burny\\Games\\battlenet\\drive_c\\Program Files (x86)\\StarCraft II\\Support64'
"""
return [runner_file, "start", "/d", cwd, "/unix"]
return []
def latest_executeble(versions_dir, base_build=None):
latest = None
if base_build is not None:
with suppress(ValueError):
latest = (
int(base_build[4:]),
max(p for p in versions_dir.iterdir() if p.is_dir() and p.name.startswith(str(base_build))),
)
if base_build is None or latest is None:
latest = max((int(p.name[4:]), p) for p in versions_dir.iterdir() if p.is_dir() and p.name.startswith("Base"))
version, path = latest
if version < 55958:
logger.critical("Your SC2 binary is too old. Upgrade to 3.16.1 or newer.")
sys.exit(1)
return path / BINPATH[PF]
class _MetaPaths(type):
""""Lazily loads paths to allow importing the library even if SC2 isn't installed."""
# pylint: disable=C0203
def __setup(self):
if PF not in BASEDIR:
logger.critical(f"Unsupported platform '{PF}'")
sys.exit(1)
try:
base = os.environ.get("SC2PATH") or get_user_sc2_install() or BASEDIR[PF]
self.BASE = Path(base).expanduser()
self.EXECUTABLE = latest_executeble(self.BASE / "Versions")
self.CWD = self.BASE / CWD[PF] if CWD[PF] else None
self.REPLAYS = self.BASE / "Replays"
if (self.BASE / "maps").exists():
self.MAPS = self.BASE / "maps"
else:
self.MAPS = self.BASE / "Maps"
except FileNotFoundError as e:
logger.critical(f"SC2 installation not found: File '{e.filename}' does not exist.")
sys.exit(1)
# pylint: disable=C0203
def __getattr__(self, attr):
# pylint: disable=E1120
self.__setup()
return getattr(self, attr)
class Paths(metaclass=_MetaPaths):
"""Paths for SC2 folders, lazily loaded using the above metaclass."""
| 29.740506 | 118 | 0.604597 | [
"MIT"
] | Sc2-AI-Cup/example-bot-marinerush | sc2/paths.py | 4,699 | Python |
# http://www.pythonchallenge.com/pc/def/peak.html
import requests, pickle
input_url = "http://www.pythonchallenge.com/pc/def/banner.p"
obj = requests.get(input_url)
text = obj.text
banner = pickle.loads(text)
final = []
for index, value in enumerate(banner):
for j in value:
final.append("".join(j[0] * j[1]))
final.append('\n')
print "".join(final)
| 19.578947 | 60 | 0.672043 | [
"MIT"
] | yarabarla/python-challenge | 5.py | 372 | Python |
SUMMARIES = {
'git-adjust': 'Amend any commit, not just the last',
'git-copy': 'Copy a git branch locally and remotely',
'git-delete': 'Delete one or more branches locally and remotely',
'git-gitz': 'Print information about the gitz git commands',
'git-go': 'Go to a specific location in the current repo',
'git-infer': 'Commit changes with an auto-generated message',
'git-multi-pick': 'Cherry-pick multiple commits, with an optional squash',
'git-new': 'Create and push new branches',
'git-permute': 'Reorder and delete commits in the current branch',
'git-rename': 'Rename a git branch locally and remotely',
'git-rot': 'Rotate through branches in a Git repository',
'git-split': 'Split a range of commits into many single-file commits',
'git-st': 'Colorful, compact git status',
'git-stripe': 'Push a sequence of commit IDs to a remote repository',
'git-update': 'Update branches from a reference branch',
'git-when': 'When did each file change (date, commit, message)?',
}
| 54.842105 | 78 | 0.68714 | [
"MIT"
] | rec/gitz | gitz/program/summaries.py | 1,042 | Python |
# ABC082b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
s = input()[:-1]
t = input()[:-1]
print('Yes' if sorted(s) < sorted(t, reverse=True) else 'No')
| 19.333333 | 61 | 0.66092 | [
"Unlicense"
] | VolgaKurvar/AtCoder | ABC082/ABC082b.py | 174 | Python |
import argparse
import os
import os.path as osp
import shutil
import tempfile
import json
import pdb
import numpy as np
import pickle
import pandas as pd
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from mmdet.apis import init_dist
from mmdet.core import lvis_eval, results2json, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from mmdet.core import build_assigner
from utils import filter_logits_by_gt
TEMP_DATASET_SIZE = 5000
def single_gpu_test(model, data_loader, show=False, cfg=None, index=0, img_meta=None):
model.eval()
results = []
logits_list = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
class_instances = pickle.load(open('train_instances_list.p', 'rb'))
normalized_classes = np.zeros(1231)
for i, c in enumerate(class_instances):
if c:
normalized_classes[i] = 1/np.sqrt(c)
for i, data in enumerate(data_loader):
# if i < TEMP_DATASET_SIZE*index:
# continue
if i >= TEMP_DATASET_SIZE*(index+1): # temporary condition for testing
break
with torch.no_grad():
bbox_results, det_bboxes, det_labels, scores = model(return_loss=False, rescale=not show, **data, img_id=i, norm_cls=normalized_classes)
det_bboxes = det_bboxes.detach().cpu()
det_labels = det_labels.detach().cpu()
scores = scores.detach().cpu()
# save original logits:
# filename = data['img_meta'][0].data[0][0]['filename'].split('/')[-1] # get the file name, e.g: '000000397133.jpg'
# with open(f'test_logits/logits_per_img/{filename}.p', 'wb') as outfile:
# pickle.dump(scores, outfile)
results.append(bbox_results)
logits_list.append((det_bboxes, det_labels, scores))
if show:
model.module.show_result(data, bbox_results)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results, logits_list # return also class. logits and labels
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--json_out',
help='output result file name without extension',
type=str)
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--tau', type=float, default=0.0)
parser.add_argument('--data_index', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def reweight_cls(model, tauuu):
if tauuu == 0:
return model
model_dict = model.state_dict()
def pnorm(weights, tau):
normB = torch.norm(weights, 2, 1)
ws = weights.clone()
for i in range(0, weights.shape[0]):
ws[i] = ws[i] / torch.pow(normB[i], tau)
return ws
reweight_set = ['bbox_head.fc_cls.weight']
tau = tauuu
for k in reweight_set:
weight = model_dict[k] # ([1231, 1024])
weight = pnorm(weight, tau)
model_dict[k].copy_(weight)
print('Reweight param {:<30} with tau={}'.format(k, tau))
return model
def logits_process(logits):
"""
Get the logits as a tuple of softmax logits ,bounding boxes and labels.
Output: to matrices:
logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.
bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.
labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.
"""
# all_bboxes_logits = []
# for image in logits:
# image_bboxes_logits = []
# for i, bbox in enumerate(image[0]):
# bboxes_logits_dict = dict() # image[0] = tensor including 300 bboxes
# index = int(bbox[5].item()) # bbox[6] specifies the relevant line in the logits matrix
# logits_vector = image[1][index]
# bboxes_logits_dict['bbox'] = bbox[:4]
# bboxes_logits_dict['score'] = bbox[4]
# bboxes_logits_dict['logits'] = logits_vector
# image_bboxes_logits.append(bboxes_logits_dict)
# all_bboxes_logits.append(image_bboxes_logits)
# for idx in range(len(dataset)):
# img_id = dataset.img_ids[idx]
logits_mat = np.zeros((TEMP_DATASET_SIZE, 300, 1231))
bboxes_mat = np.zeros((TEMP_DATASET_SIZE, 300, 4))
labels_mat = np.zeros((TEMP_DATASET_SIZE, 300))
proposal_num = np.zeros((TEMP_DATASET_SIZE, 300, 1))
for i, image in enumerate(logits):
for j, bbox in enumerate(image[0]): # image[0] = tensor including 300 bboxes
# bboxes_logits_dict = dict()
index = int(bbox[5].item()) # bbox[5] specifies the relevant line in the logits matrix
logits_vector = image[2][index] # image[2] includes the scores
# bbox_arr = np.array(bbox[:4])
bboxes_mat[i][j][:] = bbox[:4]
logits_mat[i][j] = np.array(logits_vector)
# added this to compute proposal numbers
proposal_num[i][j] = bbox[-1]
labels_mat[i] = image[1] # image[1] includes the labels
return bboxes_mat, labels_mat, logits_mat, proposal_num
def main():
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.data_index % 2)
assert args.out or args.show or args.json_out, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show" or "--json_out"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.json_out is not None and args.json_out.endswith('.json'):
args.json_out = args.json_out[:-5]
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test) # original - test | changed to test_with_train_data
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=0, # cfg.data.workers_per_gpu
dist=distributed,
shuffle=False)
# save gt boxes and labels for learning nms
# for i, data in enumerate(data_loader):
# img_id = dataset.img_infos[i]['id']
# gt = dataset.get_ann_info(i)
# gt_boxes = gt['bboxes']
# gt_labels = gt['labels']
# filename = f'test_logits/learning_nms_data/{i}/gt_boxes.p' # file name for new directory
# os.makedirs(os.path.dirname(filename), exist_ok=True)
# with open(f'test_logits/learning_nms_data/{i}/gt_boxes.p', 'wb') as outfile: # possible to include img_id
# pickle.dump(gt_boxes, outfile)
# with open(f'test_logits/learning_nms_data/{i}/gt_labels.p', 'wb') as outfile:
# pickle.dump(gt_boxes, outfile)
#
# # filename = dataset.img_infos[i]['filename']
# # with open(f'test_gt/{filename}.p', 'wb') as outfile:
# # pickle.dump(gt_labels, outfile)
# save gt instances per class
# instances_list = np.zeros(1231)
# for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py
# gt = dataset.get_ann_info(i)
# print(i)
# for label in gt['labels']:
# instances_list[label] += 1
# with open('train_instances_list.p', 'wb') as outfile:
# pickle.dump(instances_list, outfile)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
model = reweight_cls(model, args.tau)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs, logits = single_gpu_test(model, data_loader, args.show, cfg, args.data_index)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
# save outputs as csv:
# pd.DataFrame(outputs).to_csv("original_outputs_full.csv")
# preprocess logits and save them on json file
# otp = np.asarray(outputs) # temp
# df = pd.DataFrame(otp)
# df.to_csv('otp.csv', index=False)
bboxes_mat, labels_mat, logits_mat, proposal_num = logits_process(logits)
# save labels, boxes and logits
# with open('test_logits/dragon_test_bboxes_mat.p', 'wb') as outfile:
# pickle.dump(bboxes_mat, outfile)
# with open('test_logits/dragon_labels_mat.p', 'wb') as outfile:
# pickle.dump(labels_mat, outfile)
# with open('logits_mat1.p', 'wb') as outfile:
# pickle.dump(logits_mat[:1000], outfile)
# with open('logits_mat2.p', 'wb') as outfile:
# pickle.dump(logits_mat[1000:2000], outfile)
# with open('logits_mat3.p', 'wb') as outfile:
# pickle.dump(logits_mat[2000:3000], outfile)
# with open('logits_mat4.p', 'wb') as outfile:
# pickle.dump(logits_mat[3000:4000], outfile)
# with open('logits_mat5.p', 'wb') as outfile:
# pickle.dump(logits_mat[4000:], outfile)
# filter detections by iou with gt (for dragon training)
gt_list = []
results_per_image = []
for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py
# if i < TEMP_DATASET_SIZE*args.data_index:
# continue
if i >= TEMP_DATASET_SIZE: # temporary condition for testing
break
print(i)
img_id = dataset.img_infos[i]['id']
gt = dataset.get_ann_info(i)
gt_dict = dict()
gt_dict['id'] = img_id
gt_dict['bboxes'] = gt['bboxes']
gt_dict['labels'] = gt['labels']
gt_list.append(gt_dict)
# filter logits according to equivalent ground truth.
# after filtering, for each image we get a list in length of classes and detections belongs to this class.
results = filter_logits_by_gt(bboxes_mat[i], logits_mat[i], gt_list[i], proposal_num[i], i)
results_per_image.append(results)
with open(f'dragon_bboxes_logits_map24.p', 'wb') as outfile:
pickle.dump(results_per_image, outfile)
print('saved')
# evaluation:
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
eval_types = args.eval
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
lvis_eval(result_file, eval_types, dataset.lvis)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out, args.data_index)
lvis_eval(result_files, eval_types, dataset.lvis, max_dets=300)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
lvis_eval(result_files, eval_types, dataset.lvis)
# Save predictions in the COCO json format
if args.json_out and rank == 0:
if not isinstance(outputs[0], dict):
results2json(dataset, outputs, args.json_out)
else:
for name in outputs[0]:
outputs_ = [out[name] for out in outputs]
result_file = args.json_out + '.{}'.format(name)
results2json(dataset, outputs_, result_file)
if __name__ == '__main__':
main()
| 39.072139 | 148 | 0.625199 | [
"Apache-2.0"
] | ydiller/BalancedGroupSoftmax | tools/test_lvis.py | 15,707 | Python |
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Iterable
from optparse import make_option
import logging
import sys
from django.core.management.base import BaseCommand, CommandParser
from zerver.lib import utils
from zerver.models import UserMessage, get_user_profile_by_email
from django.db import models
class Command(BaseCommand):
help = """Sets user message flags. Used internally by actions.py. Marks all
Expects a comma-delimited list of user message ids via stdin, and an EOF to terminate."""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('-r', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually change message flags. Default is a dry run.")
parser.add_argument('-f', '--flag',
dest='flag',
type=str,
help="The flag to add of remove")
parser.add_argument('-o', '--op',
dest='op',
type=str,
help="The operation to do: 'add' or 'remove'")
parser.add_argument('-u', '--until',
dest='all_until',
type=str,
help="Mark all messages <= specific usermessage id")
parser.add_argument('-m', '--email',
dest='email',
type=str,
help="Email to set messages for")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if not options["flag"] or not options["op"] or not options["email"]:
print("Please specify an operation, a flag and an email")
exit(1)
op = options['op']
flag = getattr(UserMessage.flags, options['flag'])
all_until = options['all_until']
email = options['email']
user_profile = get_user_profile_by_email(email)
if all_until:
filt = models.Q(id__lte=all_until)
else:
filt = models.Q(message__id__in=[mid.strip() for mid in sys.stdin.read().split(',')])
mids = [m.id for m in
UserMessage.objects.filter(filt, user_profile=user_profile).order_by('-id')]
if options["for_real"]:
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
def do_update(batch):
# type: (Iterable[int]) -> None
msgs = UserMessage.objects.filter(id__in=batch)
if op == 'add':
msgs.update(flags=models.F('flags').bitor(flag))
elif op == 'remove':
msgs.update(flags=models.F('flags').bitand(~flag))
if not options["for_real"]:
logging.info("Updating %s by %s %s" % (mids, op, flag))
logging.info("Dry run completed. Run with --for-real to change message flags.")
exit(1)
utils.run_in_batches(mids, 400, do_update, sleep_time=3)
exit(0)
| 36.224719 | 97 | 0.537531 | [
"Apache-2.0"
] | dehnert/zulip | zerver/management/commands/set_message_flags.py | 3,224 | Python |
# coding: utf-8
import errno
import os
import random
import re
from contextlib import contextmanager
import h5py
import numpy as np
import time
import yaml
from datetime import datetime
def write_yaml_file(yaml_dict, file_yaml):
path_yaml = os.path.dirname(file_yaml)
if not os.path.isdir(path_yaml):
os.makedirs(path_yaml)
with open(file_yaml, 'w') as stream:
yaml.dump(yaml_dict, stream, default_flow_style=False)
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def find_file(path, reg):
"""
path: 要遍历的目录
reg: 符合条件的文件
"""
FileLst = []
try:
lst = os.walk(path)
for root, dirs, files in lst:
for name in files:
try:
m = re.match(reg, name)
except Exception as e:
continue
if m:
FileLst.append(os.path.join(root, name))
except Exception as e:
print(str(e))
return sorted(FileLst)
def path_replace_ymd(path, ymd):
"""
path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例
ymd: yyyymmdd (20180101)
"""
# 转成datetime类型
ymd = datetime.strptime(ymd, '%Y%m%d')
yy = ymd.strftime('%Y')
mm = ymd.strftime('%m')
dd = ymd.strftime('%d')
jj = ymd.strftime('%j')
path = path.replace('%YYYY', yy)
path = path.replace('%MM', mm)
path = path.replace('%DD', dd)
path = path.replace('%JJJ', jj)
return path
def is_none(*args):
"""
判断传入的变量中是否有 None
:param args:
:return:
"""
has_none = False
for arg in args:
if arg is None:
has_none = True
return has_none
def copy_attrs_h5py(pre_object, out_object):
"""
复制 file、dataset 或者 group 的属性
:param pre_object: 被复制属性的 dataset 或者 group
:param out_object: 复制属性的 dataset 或者 group
:return:
"""
for akey in list(pre_object.attrs.keys()):
out_object.attrs[akey] = pre_object.attrs[akey]
def read_dataset_hdf5(file_path, set_name):
"""
读取 hdf5 文件,返回一个 numpy 多维数组
:param file_path: (unicode)文件路径
:param set_name: (str or list)表的名字
:return: 如果传入的表名字是一个字符串,返回 numpy.ndarray
如果传入的表名字是一个列表,返回一个字典,key 是表名字,
value 是 numpy.ndarry
"""
if isinstance(set_name, str):
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
data = file_h5py.get(set_name)[:]
dataset = np.array(data)
file_h5py.close()
return dataset
else:
raise ValueError('value error: file_path')
elif isinstance(set_name, list):
datasets = {}
if os.path.isfile(file_path):
file_h5py = h5py.File(file_path, 'r')
for name in set_name:
data = file_h5py.get(name)[:]
dataset = np.array(data)
datasets[name] = dataset
file_h5py.close()
return datasets
else:
raise ValueError('value error: file_path')
else:
raise ValueError('value error: set_name')
def attrs2dict(attrs):
"""
将一个 HDF5 attr 类转为 Dict 类
:return:
"""
d = {}
for k, v in list(attrs.items()):
d[k] = v
return d
@contextmanager
def progress_lock(max_wait_time=5):
try:
sleep_time = 0
lock = "progress.lock"
while True:
if os.path.isfile(lock):
if sleep_time > max_wait_time:
try:
os.remove(lock)
break
except:
continue
else:
random_number = random.random() * 0.1
sleep_time += random_number
time.sleep(random_number)
else:
break
with open(lock, "w"):
pass
yield
finally:
try:
os.remove(lock)
except:
pass
def write_txt(in_file, head, bodys, keylens=8):
"""
description: wangpeng add 20180615 (写入或更新txt)
:in_file 写入文件位置
:head 文件头信息
:bodys 文件体
:keylens 更新文件使用的第一列关键字长度
"""
allLines = []
DICT_D = {}
FilePath = os.path.dirname(in_file)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
if os.path.isfile(in_file) and os.path.getsize(in_file) != 0:
fp = open(in_file, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
# 使用字典特性,保证时间唯一,读取数据
for Line in Lines:
DICT_D[Line[:keylens]] = Line[keylens:]
# 添加或更改数据
for Line in bodys:
DICT_D[Line[:keylens]] = Line[keylens:]
# 按照时间排序
newLines = sorted(
iter(DICT_D.items()), key=lambda d: d[0], reverse=False)
for i in range(len(newLines)):
allLines.append(str(newLines[i][0]) + str(newLines[i][1]))
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(allLines)
fp.close()
else:
fp = open(in_file, 'w')
fp.write(head)
fp.writelines(bodys)
fp.close()
def str_format(string, values):
"""
格式化字符串
:param string:(str) "DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)"
:param values:(dict) {"sat_sensor": sat_sensor, "resolution": str(resolution), "ymd": ymd}
:return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)
"""
if not isinstance(string, str):
return
for k, v in values.items():
string = string.replace("%" + str(k), str(v))
return string
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
"""
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern_ymd: 匹配时间的模式, 可以是 r".*(\d{8})_(\d{4})_"
:return: list
"""
files_found = []
if pattern_ymd is not None:
pattern = pattern_ymd
else:
pattern = r".*(\d{8})"
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
re_result = re.match(pattern, file_name)
if re_result is not None:
time_file = ''.join(re_result.groups())
else:
continue
if int(time_start) <= int(time_file) <= int(time_end):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
class ReadOrbitCrossFile(object):
"""
test
"""
@staticmethod
def read_cross_file(in_file, file_type):
"""
:param in_file:
:param file_type:
:return:
"""
data = {
'ymdhms1': None,
'ymdhms2': None,
'lon1': None,
'lat1': None,
'lon2': None,
'lat2': None,
'fix_name': None # 只有固定点才有
}
if not os.path.isfile(in_file):
print('***WARNING***File is not exist: {}'.format(in_file))
return data
# with open(in_file, 'r') as fp:
# lines_10 = fp.readlines()[0: 10]
#
# count = 0
# for line in lines_10:
# print count, line.split()
# count += 1
if file_type == 'leo_area':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_leo':
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'),
'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d5']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d3']
data['lon1'] = data_raw['d4']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
elif file_type == 'leo_fix':
# 数据
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8',),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d2']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d6']
data['lon1'] = data_raw['d7']
data['lat2'] = data_raw['d4']
data['lon2'] = data_raw['d5']
data['fix_name'] = data_raw['d3']
elif file_type == 'geo_leo':
# 信息
data_raw = np.loadtxt(in_file, skiprows=10, dtype={
'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),
'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
if data_raw.size != 0:
ymd = data_raw['d1']
hms1 = data_raw['d2']
hms2 = data_raw['d3']
ymdhms1 = list(map(ymdhms2date, ymd, hms1))
ymdhms2 = list(map(ymdhms2date, ymd, hms2))
data['ymdhms1'] = ymdhms1
data['ymdhms2'] = ymdhms2
data['lat1'] = data_raw['d4']
data['lon1'] = data_raw['d5']
data['lat2'] = data_raw['d6']
data['lon2'] = data_raw['d7']
else:
raise KeyError('Cant handle this file type: {}'.format(file_type))
return data
def ymdhms2date(ymd, hms):
"""
ymd = 20180101
hms = 04:04:04
"""
ymdhms = ymd + hms
return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')
def CombineTimeList(TimeList):
# 将时间段list中有重叠的时间段进行融合为新的时间段
newTimeList = []
# 默认排序,升序
TimeList.sort()
# 标记有时间融合的时间
stime = TimeList[0][0]
etime = TimeList[0][1]
for i in range(1, len(TimeList), 1):
if TimeList[i][1] <= etime:
continue
elif TimeList[i][0] <= etime <= TimeList[i][1]:
etime = TimeList[i][1]
elif TimeList[i][0] > etime:
newTimeList.append([stime, etime])
stime = TimeList[i][0]
etime = TimeList[i][1]
newTimeList.append([stime, etime])
return newTimeList
def get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):
"""
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern: 匹配时间的模式
:return: list
"""
files_found = []
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
if pattern is not None:
re_result = re.match(pattern, file_name)
if re_result is None:
continue
if time_start is not None:
time_file = ''.join(re_result.groups())
if not int(time_start) <= int(time_file) <= int(time_end):
continue
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
if __name__ == '__main__':
pass
path_out_map = str_format('/abc/%YYYY%MM%DD', {
'YYYY': '20180101',
'MM': '01',
'DD': '01',
})
print(path_out_map)
# path1 = "E:/projects/ocrs/cfg/global.cfg"
# path2 = "E:/projects/ocrs/cfg/FY3B+MERSI.yaml"
# c = Config(path1)
# c = Config(path2)
# print c.error
# l = c.__dict__.keys()
# l = sorted(l)
# for k in l:
# print k, ":", c.__dict__[k]
# print k
# ################# test ReadOrbitCrossFile ################
# LEO_AREA
# leo_area_name = r'C:\Users\wangpeng\Desktop\tmp\cross\AQUA_australia_LEO_AREA_20171221.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_area_name, 'leo_area')
# LEO_LEO
# leo_leo_name = r'C:\Users\wangpeng\Desktop\tmp\cross\FENGYUN-3D_NPP_LEO_LEO_20180901.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_leo_name, 'leo_leo')
# LEO_FIX
# leo_fix_name = r'C:\Users\wangpeng\Desktop\tmp\cross\AQUA_FIX_LEO_FIX_20181101.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(leo_fix_name, 'leo_fix')
# GEO_LEO
# geo_leo_name = r'C:\Users\wangpeng\Desktop\tmp\cross\FENGYUN-2F_METOP-A_GEO_LEO20181101.txt'
# read_data = ReadOrbitCrossFile.read_cross_file(geo_leo_name, 'geo_leo')
# keys = read_data.keys()
# keys.sort()
# for data_name in keys:
# print data_name, type(read_data[data_name]), read_data[data_name]
| 29.332645 | 98 | 0.520885 | [
"Apache-2.0"
] | NingAnMe/snow_cover_of_remote_sensing | lib/pb_io.py | 14,837 | Python |
def hello_world(context):
context.logger.info("hello world")
| 21.666667 | 38 | 0.738462 | [
"Apache-2.0"
] | AlonMaor14/mlrun | tests/system/runtimes/assets/kubejob_function.py | 65 | Python |
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
import tensorflow as tf
slim = tf.contrib.slim
def disc_net_64(img1, img2, target_dim, scope="DISC", reuse=False):
nets_dict = dict()
nets_dict['input1'] = img1
nets_dict['input2'] = img2
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(0.00004)):
with slim.arg_scope([slim.conv2d], weights_initializer=tf.contrib.slim.variance_scaling_initializer(), stride=2, padding='SAME', activation_fn=tf.nn.relu) :
with slim.arg_scope([slim.fully_connected], biases_initializer=tf.zeros_initializer()):
nets_dict['concat'] = tf.concat([nets_dict['input1'], nets_dict['input2']], axis=3)
nets_dict['conv2d0'] = slim.conv2d(nets_dict['concat'], 32, [4, 4], scope='conv2d_0')
nets_dict['conv2d1'] = slim.conv2d(nets_dict['conv2d0'], 32, [4, 4], scope='conv2d_1')
nets_dict['conv2d2'] = slim.conv2d(nets_dict['conv2d1'], 64, [4, 4], scope='conv2d_2')
nets_dict['conv2d3'] = slim.conv2d(nets_dict['conv2d2'], 64, [4, 4], scope='conv2d_3')
n = tf.reshape(nets_dict['conv2d3'], [-1, 4*4*64])
nets_dict['fc0'] = slim.fully_connected(n, 256, activation_fn=tf.nn.relu, scope = "output_fc0")
nets_dict['output'] = slim.fully_connected(nets_dict['fc0'], target_dim, activation_fn=None, scope = "output_fc1")
return nets_dict
| 62.730769 | 168 | 0.633967 | [
"MIT"
] | zhuxinqimac/DisentanglementICML19 | Dsprites_exp/VAE-VC/local_nets.py | 1,631 | Python |
#!/usr/bin/env python3
# purpose: Mimics a simple telnet daemon login prompts and records output
# starts a tcp listener on port and address with variables defined below
# author: Raresteak
# date: 6 October 2021
# version: 3
import datetime
import socket
HOST = '127.0.0.1'
PORT = 2323
FILE = "stn-results.json"
fh = open(FILE, "a")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
with conn:
timeNow = datetime.datetime.now()
conn.send(b'Warning: Telnet is not a secure protocol, and it is recommended to use Stelnet.\n\nLogin authentication\n\n\nUsername: ')
username = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
username = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
username = "cancelledInput"
conn.send(b'Password: ')
password = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
password = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
password = "cancelledInput"
conn.sendall(b'\b \b')
break
break
output = str("{ \"time\": \""
+ timeNow.strftime('%Y-%m-%dT%H:%M:%S')
+ "\", \"src.ip\": \"" + addr[0]
+ "\", \"username\": \"" + username
+ "\", \"password\": \"" + password + "\" }")
print(output)
fh.write(output + "\n")
| 37.472727 | 145 | 0.447841 | [
"BSD-2-Clause"
] | raresteak/simple-telnet-deception | simple-telnet-deception.py | 2,061 | Python |
import unittest
import filecmp
import datetime
from utils import in_tst_dir, in_tst_output_dir
import xlwt as xlwt
ezxf = xlwt.easyxf
def write_xls(file_name, sheet_name, headings, data, heading_xf, data_xfs):
book = xlwt.Workbook()
sheet = book.add_sheet(sheet_name)
rowx = 0
for colx, value in enumerate(headings):
sheet.write(rowx, colx, value, heading_xf)
sheet.set_panes_frozen(True) # frozen headings instead of split panes
sheet.set_horz_split_pos(rowx+1) # in general, freeze after last heading row
sheet.set_remove_splits(True) # if user does unfreeze, don't leave a split there
for row in data:
rowx += 1
for colx, value in enumerate(row):
sheet.write(rowx, colx, value, data_xfs[colx])
book.save(file_name)
EXAMPLE_XLS = 'xlwt_easyxf_simple_demo.xls'
class TestUnicode0(unittest.TestCase):
def create_example_xls(self, filename):
mkd = datetime.date
hdngs = ['Date', 'Stock Code', 'Quantity', 'Unit Price', 'Value', 'Message']
kinds = 'date text int price money text'.split()
data = [
[mkd(2007, 7, 1), 'ABC', 1000, 1.234567, 1234.57, ''],
[mkd(2007, 12, 31), 'XYZ', -100, 4.654321, -465.43, 'Goods returned'],
] + [
[mkd(2008, 6, 30), 'PQRCD', 100, 2.345678, 234.57, ''],
] * 100
heading_xf = ezxf('font: bold on; align: wrap on, vert centre, horiz center')
kind_to_xf_map = {
'date': ezxf(num_format_str='yyyy-mm-dd'),
'int': ezxf(num_format_str='#,##0'),
'money': ezxf('font: italic on; pattern: pattern solid, fore-colour grey25',
num_format_str='$#,##0.00'),
'price': ezxf(num_format_str='#0.000000'),
'text': ezxf(),
}
data_xfs = [kind_to_xf_map[k] for k in kinds]
write_xls(filename, 'Demo', hdngs, data, heading_xf, data_xfs)
def test_example_xls(self):
self.create_example_xls(in_tst_output_dir(EXAMPLE_XLS))
self.assertTrue(filecmp.cmp(in_tst_dir(EXAMPLE_XLS),
in_tst_output_dir(EXAMPLE_XLS),
shallow=False))
| 40.232143 | 88 | 0.59787 | [
"Apache-2.0"
] | 10088/hue | desktop/core/ext-py/xlwt-1.3.0/tests/test_easyxf.py | 2,253 | Python |
#!c:\users\jerem\dev\ipp-core\venv\scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.344828 | 67 | 0.641866 | [
"MIT"
] | jeremycward/ipp-core | venv/Scripts/f2py.py | 793 | Python |
import sys
sys.path.append('..')
from utilities import jamfconfig
from utilities import apirequests
from computergroups import computergroups
import xml.etree.ElementTree as etree
jss_api_base_url = jamfconfig.getJSS_API_URL()
#print("JSS API Base URL: {}".format(jss_api_base_url))
def cleanupOutput(inputString):
#print str(inputString)
return inputString.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c", "\"").replace(u"\u201d", "\"")
def getAllPolicies(username, password):
''' List all policies in JSS to screen '''
#print(username)
print "We're Refactored! Getting All JAMF Policies..."
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
print 'Policy ID: ' + policyID + ', ' + 'Policy Name: ' + policyName + '\n'
def getPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - send results to Stdout '''
print 'Running refactored getpolicybyid ...'
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
## General Policy Information
name = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
print '\nGENERAL POLICY INFORMATION: '
print 'Policy Name: ' + str(name)
print 'Policy ID #: ' + str(policy_id)
print 'Policy is Enabled: ' + str(enabled)
print 'Policy Trigger: ' + str(trigger)
print 'Policy Frequency: ' + str(frequency)
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
## Add Header Row for output for info categories
# headerRow = "Computer Name, JSS ID"
# members += [ headerRow ]
for computer in computers.findall('computer'):
# compID = computer.find('id').text
name = computer.find('name').text
computerInfo = str(name)
computerInfo = cleanupOutput(computerInfo)
#print computerInfo.encode('ascii', 'ignore')
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
print '\nPOLICY SCOPE INFORMATION:'
print 'Scoped to All Computers: ' + str(allcomputers)
print '\nCOMPUTER GROUPS IN SCOPE: '
print '\n'.join (sorted(comp_groups))
if members:
print '\nADDITIONAL COMPUTERS IN SCOPE: '
print '\n'.join (sorted(members))
print '\nTotal Computers in Scope: ' + str(len(members))
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkgheaderRow = "Package Name"
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkgInfo = str(pkg_name) + ', ' + str(pkg_action)
pkgInfo = cleanupOutput(pkgInfo)
pkglist += [ pkgInfo ]
print '\nPACKAGE CONFIGURATION: '
print '\n'.join (sorted(pkglist))
else:
print 'Failed to find policy with ID ' + policyid
def listAllPolicies(username, password):
''' List all policies in JSS - for function use '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PoliciesList = []
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
PoliciesList.append({'name': policyName, 'id': policyID})
return PoliciesList
def listAllPolicyIds(username, password):
''' List all policy IDs in JSS - for function use - returns a list of Policy ID #s '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PolicyIDList = []
for policy in responseXml.findall('policy'):
policyID = policy.find('id').text
PolicyIDList.append(policyID)
return PolicyIDList
def listPolicyStatusbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return status results for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
status = general.find('enabled').text
return status
def listPolicyNamebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return name for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
name = general.find('name').text
return name
def listPolicyScopebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return scope details as a
dict for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Scope'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
scopeData = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
comp_groupIDs = []
computers = scope.find('computers')
members = []
scope_details = {}
for comp in computers.findall('computer'):
if comp.find('name').text:
name = comp.find('name').text
members.append(name)
for g in groups.findall('computer_group'):
if g.find('name').text:
group_name = g.find('name').text
groupID = computergroups.getComputerGroupId(group_name, username, password)
comp_groups.append(group_name)
comp_groupIDs.append(groupID)
scope_details = { "Policy ID: ": policyid, "All computers?: ": allcomputers, "Computer groups: ": comp_groups, "Computer group IDs: ": comp_groupIDs, "Specific computers: ": members }
return scope_details
def listPolicyPackagesbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return package details as a list
for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Packages'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
pkglist = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
if packages.findall('package'):
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkglist.append(pkg_name)
return pkglist
def listPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
policyDict = {}
## General Policy Information
general = responseXml.find('general')
polname = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
for computer in computers.findall('computer'):
name = computer.find('name').text
computerInfo = name.encode("utf-8")
# computerInfo = cleanupOutput(computerInfo)
# members.append(name)
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkglist.append({"Package Name": pkg_name, "Package Action": pkg_action})
## Add policy details to policyDict and return
policyDict = { "Policy Name": polname,
"Policy ID": policy_id,
"Policy Enabled": enabled,
"Policy Trigger": trigger,
"Policy Frequency": frequency,
"All Computers in Scope": allcomputers,
"Scoped Computers": members,
"Scoped Computer Groups": comp_groups,
"Package Configuration": pkglist
}
return policyDict
else:
print 'Failed to find policy with ID ' + policyid
| 28.345029 | 185 | 0.702909 | [
"Apache-2.0"
] | eventbrite/britetech-casper-tools | policies/policies_core.py | 9,694 | Python |
from __future__ import annotations
from typing import List, Tuple, Optional
from network_simulator.BloodType import BloodType
from network_simulator.compatibility_markers import OrganType
path_structure = Optional[List[Optional[int]]]
shortest_path_structure = Tuple[path_structure, float]
class Organ:
"""
A class representing a given organ which is available for transplant.
Each organ has a name, a unique ID, lifetime (a maximum out of body duration),
type matching, and a location.
"""
organ_count = 0
def __init__(self, organ_type: OrganType, blood_type: BloodType,
location: int, organ_list: 'OrganList' = None) -> None: # type: ignore
Organ.organ_count = Organ.organ_count + 1
self.organ_id: int = Organ.organ_count
self.organ_type: OrganType = organ_type
self.blood_type: BloodType = blood_type
self.viability: float = Organ.get_viability(self.organ_type)
self.origin_location: int = location
self.current_location: int = location
self.path: path_structure = [location]
if organ_list:
organ_list.add_organ(self)
def move_organ(self, new_location: int, cost: float,
shortest_path: shortest_path_structure) -> None:
"""
This function allows an organ's attributes to be altered to represent it's
transportation across the network. This is intended to be used with
Dijkstra.shortest_path (this will be the source of the cost parameter)
:param int new_location: node id representing the destination location
:param cost: weight/cost associated with then most efficient path
:param list shortest_path: transit path taken when moving organ
"""
if self.viability < cost:
print('ERROR: organ no longer viable!')
return
path, weight = shortest_path
self.path = path
self.current_location = new_location
self.viability -= cost
@staticmethod
def get_viability(organ_type: OrganType) -> float:
"""
Gets viability rating for each organ individually
Viability is represented by hours an organ can be out of body * 10
:param int organ_type: constant corresponding to an organ type
:return: int viability rating (used in __init__())
"""
viability = {
OrganType.Heart.value: 60,
OrganType.Kidney.value: 300,
OrganType.Liver.value: 120,
OrganType.Lungs.value: 60,
OrganType.Pancreas.value: 120,
OrganType.Intestines.value: 80}
return viability[organ_type.value]
def __str__(self) -> str:
"""
Builds an easily readable string representing an organ
:return: str
"""
return f'Organ:\n' \
f'\tOrgan ID: {"{:05d}".format(self.organ_id)}\n' \
f'\tOrgan type: {OrganType(self.organ_type).name}\n' \
f'\tBlood type: {self.blood_type}\n' \
f'\tViability: {self.viability}\n' \
f'\tOrigin location: {self.origin_location}\n' \
f'\tCurrent location: {self.current_location}\n' \
f'\tTransit path: {self.path}\n'
| 36.477778 | 88 | 0.63905 | [
"MIT"
] | zspatter/Network-Simulation | network_simulator/Organ.py | 3,283 | Python |
from invoke import task
@task
def precheck(ctx):
ctx.run("black .")
ctx.run("pre-commit run -a")
ctx.run("interrogate -c pyproject.toml", pty=True)
@task
def clean(ctx):
ctx.run("python setup.py clean")
ctx.run("rm -rf netcfgbu.egg-info")
ctx.run("rm -rf .pytest_cache .pytest_tmpdir .coverage")
ctx.run("rm -rf htmlcov")
| 20.823529 | 60 | 0.649718 | [
"MIT"
] | andyfcx/demo-beginner-concurrency | tasks.py | 354 | Python |
"""
Example how setup service
"""
from typing import Optional, Any
from orchestrator_service import Message
from orchestrator_service.service import CommandHandlerPostStrategy
from orchestrator_service.service import CommandHandlerStrategy
from orchestrator_service.service import ServiceBlock, ServiceBuilder, Service
class FirstCommand(CommandHandlerStrategy):
"""
Example first command
"""
target_command = 'first_command'
def process(self, msg: Message) -> Message:
print('process 1')
# set to global scope
self.set_to_swap_scope('val', 1)
return msg
class SecondCommand(CommandHandlerStrategy):
"""
Example second command
"""
target_command = 'second_command'
def process(self, msg: Message) -> Message:
print('process 2')
# get value from scope
print(self.get_from_swap_scope('val'))
return msg
class ThirdCommand(CommandHandlerStrategy):
"""
Example third command
"""
target_command = 'third_command'
def process(self, msg: Message) -> Message:
# example call another command in current
command = self.get_service_command(SecondCommand.target_command, # type: CommandHandlerStrategy
is_process=True)
msg = command.process(msg)
return msg
class PPFirstCommand(CommandHandlerPostStrategy):
"""
Example first post process handler
"""
def post_process(self, msg: Message, additional_data: Optional[Any] = None) -> None:
print('post_process 1')
class PPSecondCommand(CommandHandlerPostStrategy):
"""
Example second post process handler
"""
def post_process(self, msg: Message, additional_data: Optional[Any] = None) -> None:
print('post_process 2')
# example builder
example_service_builder = ServiceBuilder(
ServiceBlock(process=FirstCommand(),
post_process=PPFirstCommand()),
ServiceBlock(process=SecondCommand()),
ServiceBlock(process=ThirdCommand),
default_post_process=PPSecondCommand())
class MyService(Service):
"""
Custom service
second use case
"""
_default_command = 'first_command'
service_commands = example_service_builder
if __name__ == '__main__':
service = Service(example_service_builder,
default_command='first_command')
my_service = MyService()
msg_1 = Message(body={'val': 1}, header={'command': 'first_command'})
msg_2 = Message(body={'val': 1}, header={'command': 'second_command'})
msg_3 = Message(body={'val': 1}, header={'command': 'third_command'})
msg_4 = Message(body={'val': 1}, header={})
# running correct with there is both a handler
# and a post handler and a command is specified
service.handle(msg_1)
# >>> process 1
# >>> post_process 1
# running command with default PP handlers
service.handle(msg_2)
# >>> process 2
# >>> post_process 2
service.handle(msg_3)
# >>> process 3
# >>> post_process 2
# running default command
service.handle(msg_4)
# >>> process 1
# >>> post_process 1
# Run overridden service
my_service.handle(msg_1)
# >>> process 1
# >>> post_process 1
my_service.handle(msg_2)
# >>> process 2
# >>> post_process 2
my_service.handle(msg_3)
# >>> process 3
# >>> post_process 2
my_service.handle(msg_4)
# >>> process 1
# >>> post_process 1
| 27.240602 | 105 | 0.634281 | [
"Unlicense"
] | Shchusia/orchestrator | examples/example_service.py | 3,623 | Python |
"""
Expose top-level symbols that are safe for import *
"""
from __future__ import print_function, division, absolute_import
import re
from . import testing, decorators
from ._version import get_versions
from . import special, types, config
# Re-export typeof
from .special import *
from .pycc.decorators import export, exportmany
# Version
__version__ = get_versions()['version']
# Re-export all type names
from .types import *
# Re export decorators
jit = decorators.jit
autojit = decorators.autojit
njit = decorators.njit
# Re export vectorize decorators
from .npyufunc import vectorize, guvectorize
# Re export from_dtype
from .numpy_support import from_dtype
# Re-export test entrypoint
test = testing.test
# Try initialize cuda
from . import cuda
__all__ = """
jit
autojit
njit
vectorize
guvectorize
export
exportmany
cuda
from_dtype
""".split() + types.__all__ + special.__all__
def _sentry_llvm_version():
"""
Make sure we meet min llvmpy version
"""
import warnings
import llvm
min_version = (0, 12, 6)
# Only look at the the major, minor and bugfix version numbers.
# Ignore other stuffs
regex = re.compile(r'(\d+)\.(\d+).(\d+)')
m = regex.match(llvm.__version__)
if m:
ver = tuple(map(int, m.groups()))
if ver < min_version:
msg = ("Numba requires at least version %d.%d.%d of llvmpy.\n"
"Installed version is %s.\n"
"Please update llvmpy." %
(min_version + (llvm.__version__,)))
raise ImportError(msg)
else:
# Not matching?
warnings.warn("llvmpy version format not recognized!")
_sentry_llvm_version()
| 22.223684 | 74 | 0.67614 | [
"BSD-2-Clause"
] | meawoppl/numba | numba/__init__.py | 1,689 | Python |
from string import ascii_lowercase
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.colors import hex_to_rgb
from src.timeit import timeit
@timeit
def plotOverTime(FSCPData: pd.DataFrame, FSCPDataSteel: pd.DataFrame, config: dict):
# select which lines to plot based on function argument
FSCPsCols, plotFSCP, plotLines = __selectPlotFSCPs(FSCPData, config['showFSCPs'], config['refFuelTop'],
config['n_samples'])
FSCPsCols, plotFSCPSteel, plotLinesSteel = __selectPlotFSCPs(FSCPDataSteel, config['showFSCPs'],
config['refFuelBottom'], config['n_samples'])
# produce figure
fig = __produceFigure(FSCPsCols, plotFSCP, plotFSCPSteel, plotLines, plotLinesSteel, config)
# styling figure
__styling(fig, config)
return {'fig3': fig}
def __selectPlotFSCPs(FSCPData: pd.DataFrame, showFSCPs: dict, refFuel: str, n_samples: int):
FSCPsCols = [None] * len(showFSCPs)
listOfFSCPs = pd.DataFrame(columns=(FSCPData.keys().tolist() + ['plotIndex']))
for index, args in enumerate(showFSCPs):
cols, fuel_x, fuel_y = args
if fuel_x == 'ref': fuel_x = refFuel
addFSCP = FSCPData.query(f"fuel_x=='{fuel_x}' & fuel_y=='{fuel_y}' & year_x==year_y").reset_index(drop=True)
if fuel_x == refFuel: addFSCP.loc[:, 'fuel_x'] = 'ref'
addFSCP.insert(1, 'plotIndex', len(addFSCP) * [index])
FSCPsCols[index] = cols
listOfFSCPs = pd.concat([listOfFSCPs, addFSCP], ignore_index=True)
# year_x == year_y, so we only need one of them from now on
listOfFSCPs['year'] = listOfFSCPs['year_x']
# return FSCPs for scatter plots
plotFSCP = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'fscp', 'fscp_uu', 'fscp_ul']]
# return costs and GHGIs for line plots
plotLines = listOfFSCPs[['plotIndex', 'fuel_x', 'fuel_y', 'year', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']]
# interpolation of plotLines
t = np.linspace(plotLines['year'].min(), plotLines['year'].max(), n_samples)
dtypes = {'year': float, 'cost_x': float, 'cost_y': float, 'ghgi_x': float, 'ghgi_y': float}
allEntries = []
for index in plotLines['plotIndex'].unique():
samples = plotLines.query(f"plotIndex=={index}").reset_index(drop=True).astype(dtypes)
fuel_x = samples.fuel_x.iloc[0]
fuel_y = samples.fuel_y.iloc[0]
new = dict(
plotIndex=n_samples * [int(index)],
fuel_x=n_samples * [fuel_x],
fuel_y=n_samples * [fuel_y],
year=t,
)
tmp = pd.DataFrame(new, columns=plotLines.keys())
tmp.index = np.arange(len(samples), len(tmp) + len(samples))
tmp = tmp.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes)
allEntries.append(tmp.interpolate())
plotLinesInterpolated = pd.concat(allEntries, ignore_index=True)
plotLinesInterpolated['fscp'] = (plotLinesInterpolated['cost_x'] - plotLinesInterpolated['cost_y']) / (
plotLinesInterpolated['ghgi_y'] - plotLinesInterpolated['ghgi_x'])
return FSCPsCols, plotFSCP, plotLinesInterpolated
def __produceFigure(FSCPsCols: list, plotFSCP: pd.DataFrame, plotFSCPSteel: pd.DataFrame,
plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
# plot
fig = make_subplots(
rows=2,
cols=2,
subplot_titles=ascii_lowercase,
shared_yaxes=True,
horizontal_spacing=0.025,
vertical_spacing=0.1,
)
# add FSCP traces for heating
traces = __addFSCPTraces(plotFSCP, plotLines, len(FSCPsCols), config['refFuelTop'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
if j: trace.showlegend = False
fig.add_trace(trace, row=1, col=col)
# add FSCP traces for steel
traces = __addFSCPTraces(plotFSCPSteel, plotLinesSteel, len(FSCPsCols), config['refFuelBottom'], config)
for id, trace in traces:
for j, col in enumerate(FSCPsCols[id]):
trace.showlegend = False
fig.add_trace(trace, row=2, col=col)
# compute and plot carbon price tracjetory
cpTrajData = __computeCPTraj(config['co2price_traj']['years'], config['co2price_traj']['values'], config['n_samples'])
traces = __addCPTraces(cpTrajData, config)
for trace in traces:
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
if i or j: trace.showlegend = False
fig.add_trace(trace, row=i + 1, col=j + 1)
# zero y line
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
fig.add_hline(0.0, line_width=config['global']['lw_thin'], line_color='black', row=i + 1, col=j + 1)
# add text annotations explaining figure content
annotationStyling = dict(xanchor='center', yanchor='middle', showarrow=False,
bordercolor='black', borderwidth=2, borderpad=3, bgcolor='white')
for i in range(2):
axisNumb = str(i+1) if i else ''
blueTech = config['annotationLabels']['blueTechs'][i]
fig.add_annotation(
x=0.50,
xref=f"x{axisNumb} domain",
y=1.15,
yref=f"y{axisNumb} domain",
text=f"Blue H<sub>2</sub> from {blueTech}",
**annotationStyling
)
for i in range(2):
axisNumb = str(i+2) if i else ''
application = config['annotationLabels']['applications'][i]
fig.add_annotation(
x=-0.17,
xref=f"x{axisNumb} domain",
y=0.5,
yref=f"y{axisNumb} domain",
text=f"{application}",
textangle=-90,
**annotationStyling
)
# add circles on intersects
__addAnnotations(fig, cpTrajData, plotLines, plotLinesSteel, config)
# add arrows in 2025
__addAnnotationArrows(fig, config)
# add legend for annotations
__addAnnotationsLegend(fig, config)
# update axes titles and ranges
fig.update_layout(
xaxis=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis2=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis3=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
xaxis4=dict(
title=config['labels']['time'],
range=[config['plotting']['t_min'], config['plotting']['t_max']]
),
yaxis=dict(
title=config['labels']['fscp'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
yaxis3=dict(
title=config['labels']['fscp_steel'],
range=[config['plotting']['fscp_min'], config['plotting']['fscp_max']]
),
margin_l=180.0,
margin_b=520.0,
)
return fig
def __addAnnotations(fig: go.Figure, cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, plotLinesSteel: pd.DataFrame, config: dict):
traceArgs = [
dict(row=1, col=1, lines=plotLines, anno=config['annotationFuels']['left']),
dict(row=1, col=2, lines=plotLines, anno=config['annotationFuels']['right']),
dict(row=2, col=1, lines=plotLinesSteel, anno=config['annotationFuels']['left']),
dict(row=2, col=2, lines=plotLinesSteel, anno=config['annotationFuels']['right']),
]
for args in traceArgs:
points = __calcPoints(cpTrajData, args['lines'], args['anno'])
data = pd.DataFrame(points).T
fig.add_trace(go.Scatter(
x=data.year,
y=data.fscp,
text=data.index,
mode='markers+text',
marker=dict(symbol='circle-open', size=config['global']['highlight_marker'], line={'width': config['global']['lw_thin']}, color='Black'),
textposition='bottom center',
showlegend=False,
# hovertemplate = f"{name}<br>Carbon price: %{{x:.2f}}±%{{error_x.array:.2f}}<extra></extra>",
), row=args['row'], col=args['col'])
def __calcPoints(cpTrajData: pd.DataFrame, plotLines: pd.DataFrame, fuels: list) -> dict:
points = {}
fuelRef, fuelGreen, fuelBlue = fuels
dropCols = ['plotIndex', 'fuel_x', 'fuel_y', 'cost_x', 'cost_y', 'ghgi_x', 'ghgi_y']
greenLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
blueLine = plotLines.query(f"fuel_x=='{fuelRef}' & fuel_y=='{fuelBlue}'").drop(columns=dropCols).reset_index(drop=True)
redLine = plotLines.query(f"fuel_x=='{fuelBlue}' & fuel_y=='{fuelGreen}'").drop(columns=dropCols).reset_index(drop=True)
purpleLine = cpTrajData.drop(columns=['name', 'CP_u', 'CP_l'])
for i, line in enumerate([blueLine, greenLine, redLine]):
diffLines = pd.merge(line, purpleLine, on=['year'])
diffLines['delta'] = (diffLines['fscp'] - diffLines['CP']).abs()
points[i+2] = diffLines.nsmallest(1, 'delta').drop(columns=['CP', 'delta']).iloc[0]
diffLines = pd.merge(blueLine, greenLine, on=['year'], suffixes=('', '_right'))
diffLines['delta'] = (diffLines['fscp'] - diffLines['fscp_right']).abs()
points[5] = diffLines.nsmallest(1, 'delta').drop(columns=['fscp_right', 'delta']).iloc[0]
points[6] = redLine.abs().nsmallest(1, 'fscp').iloc[0]
return points
def __addAnnotationArrows(fig: go.Figure, config: dict):
__addArrow(fig, 2025.0, 150.0, 600.0, 1, 1, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 1, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=1, showarrow=False)
__addArrow(fig, 2025.0, 150.0, 300.0, 1, 2, config)
__addArrow(fig, 2025.5, 150.0, 800.0, 1, 2, config)
fig.add_annotation(text='1', x=2024.5, y=200.0, row=1, col=2, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 1, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=1, showarrow=False)
__addArrow(fig, 2024.5, 90.0, 200.0, 2, 2, config)
fig.add_annotation(text='1', x=2024.0, y=150.0, row=2, col=2, showarrow=False)
def __addArrow(fig: go.Figure, x: float, y1: float, y2: float, row: int, col: int, config: dict):
xaxes = [['x', 'x2'], ['x3', 'x4']]
yaxes = [['y', 'y2'], ['y3', 'y4']]
for ay, y in [(y1, y2), (y2, y1)]:
fig.add_annotation(
axref=xaxes[row-1][col-1],
xref=xaxes[row-1][col-1],
ayref=yaxes[row-1][col-1],
yref=yaxes[row-1][col-1],
ax=x,
x=x,
ay=ay,
y=y,
arrowcolor='black',
arrowwidth=config['global']['lw_thin'],
#arrowsize=config['global']['highlight_marker_sm'],
arrowhead=2,
showarrow=True,
row=row,
col=col,
)
def __addAnnotationsLegend(fig: go.Figure, config: dict):
y0 = -0.40
fig.add_shape(
type='rect',
x0=0.0,
y0=y0,
x1=0.80,
y1=y0-0.2,
xref='paper',
yref='paper',
line_width=2,
fillcolor='white',
)
fig.add_annotation(
text=f"<b>{config['annotationTexts']['heading1']}:</b><br><br><br><b>{config['annotationTexts']['heading2']}:</b>",
align='left',
xanchor='left',
x=0.0,
yanchor='top',
y=y0,
xref='paper',
yref='paper',
showarrow=False,
)
for i in range(6):
fig.add_annotation(
text=f"{i+1}: "+config['annotationTexts'][f"point{i+1}"],
align='left',
xanchor='left',
x=0.0 + i%3 * 0.22,
yanchor='top',
y=y0-(0.03 if i<3 else 0.13),
xref='paper',
yref='paper',
showarrow=False,
)
def __addFSCPTraces(plotData: pd.DataFrame, plotLines: pd.DataFrame, n_lines: int, refFuel: str, config: dict, sensitivityNG: bool = False):
traces = []
for index in range(n_lines):
thisDataScatter = plotData.query(f"plotIndex=={index}").reset_index(drop=True)
thisDataLine = plotLines.query(f"plotIndex=={index}").reset_index(drop=True)
# styling of individual lines
truncated = (thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB' and thisDataScatter.loc[0, 'fuel_y'] == 'green RE') or \
thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
dashed = thisDataScatter.loc[0, 'fuel_y'] in ['green pure RE', 'blue LEB lowscco2']
longdashed = thisDataScatter.loc[0, 'fuel_x'] == 'blue LEB lowscco2'
shift = 0
if thisDataScatter.loc[0, 'fuel_y'] == 'green RE':
if thisDataScatter.loc[0, 'fuel_x'] == 'ref':
shift = -1
else:
shift = +1
elif thisDataScatter.loc[0, 'fuel_y'] == 'green pure RE':
shift = +2
thisDataScatter = thisDataScatter.query(f"year<=2035")
thisDataLine = thisDataLine.query(f"year<=2035")
# line properties
fuel_x = thisDataScatter.iloc[thisDataScatter.first_valid_index()]['fuel_x']
fuel_y = thisDataScatter.iloc[0]['fuel_y']
name = f"Fossil→{config['names'][fuel_y]}" if fuel_x == 'ref' else f"{config['names'][fuel_x]}→{config['names'][fuel_y]}"
col = config['fscp_colours'][f"{fuel_x} to {fuel_y}"] if f"{fuel_x} to {fuel_y}" in config['fscp_colours'] else \
config['colours'][fuel_y]
# do not plot awkward red line in sensitivity analysis row 2
if sensitivityNG and fuel_x == 'blue LEB':
continue
# scatter plot
traces.append((index, go.Scatter(
x=thisDataScatter['year'],
y=thisDataScatter['fscp'],
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'solid'),
marker=dict(symbol='x-thin', size=config['global']['highlight_marker_sm'], line={'width': config['global']['lw_thin'], 'color': col}, ),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
# remove unphysical negative FSCPs
if truncated and not sensitivityNG:
thisDataLine = thisDataLine.query(f"(year>=2030 & fscp>0.0) | year>=2040")
# line plot
traces.append((index, go.Scatter(
x=thisDataLine['year'],
y=thisDataLine['fscp'],
legendgroup=0 if fuel_x == 'ref' else 1,
legendgrouptitle=dict(text=f"<b>{config['legendlabels'][0]}:</b>" if fuel_x=='ref' else f"<b>{config['legendlabels'][0]}:</b>"),
name=name,
mode='lines',
line=dict(color=col, width=config['global']['lw_default'], dash='dot' if dashed else 'dash' if longdashed else 'solid'),
)))
# error bars
thisDataScatter = thisDataScatter.query(f"year==[2030,2040,2050]")
thisDataScatter = thisDataScatter.query(f"fscp<={config['plotting']['fscp_max']} and (fscp>0.0 | year > 2040)")
traces.append((index, go.Scatter(
x=thisDataScatter['year'] + shift * 0.1,
y=thisDataScatter['fscp'],
error_y=dict(type='data', array=thisDataScatter['fscp_uu'], arrayminus=thisDataScatter['fscp_ul'],
thickness=config['global']['lw_thin']),
name=name,
legendgroup=0 if fuel_x == 'ref' else 1,
showlegend=False,
mode='markers',
marker=dict(symbol='x-thin', size=0.00001,),
line_color=('rgba({}, {}, {}, {})'.format(*hex_to_rgb(col), .4)),
hovertemplate=f"<b>{name}</b><br>Year: %{{x:d}}<br>FSCP: %{{y:.2f}}±%{{error_y.array:.2f}}<extra></extra>",
)))
return traces
# compute carbon price trajectories
def __computeCPTraj(years: list, values: dict, n_samples: int):
v_mean = []
v_upper = []
v_lower = []
for i, year in enumerate(years):
vals = [v[i] for v in values.values()]
mean = sum(vals)/len(vals)
v_mean.append(mean)
v_upper.append(max(vals)-mean)
v_lower.append(mean-min(vals))
# create data frame with time and cp values
cpData = pd.DataFrame({
'year': years,
'CP': v_mean,
'CP_u': v_upper,
'CP_l': v_lower,
})
# interpolate in between
samples = pd.DataFrame({'year': np.linspace(years[0], years[-1], n_samples)})
dtypes = {'year': float, 'CP': float, 'CP_u': float, 'CP_l': float}
cpData = cpData.merge(samples, how='outer').sort_values(by=['year']).astype(dtypes).interpolate()
# add name to dataframe
cpData['name'] = 'cp'
return cpData
# plot traces
def __addCPTraces(cpTrajData: pd.DataFrame, config: dict):
traces = []
name = config['carbon_price_config']['name']
colour = config['carbon_price_config']['colour']
# add main graphs (FSCP and CP)
traces.append(go.Scatter(
name=name,
legendgroup=1,
mode='lines',
x=cpTrajData['year'],
y=cpTrajData['CP'],
line_color=colour,
line_width=config['global']['lw_thin'],
showlegend=True,
hovertemplate=f"<b>{name}</b><br>Time: %{{x:.2f}}<br>Carbon price: %{{y:.2f}}<extra></extra>"
))
data_x = cpTrajData['year']
data_yu = cpTrajData['CP'] + cpTrajData['CP_u']
data_yl = cpTrajData['CP'] - cpTrajData['CP_l']
errorBand = go.Scatter(
name='Uncertainty Range',
legendgroup=1,
x=pd.concat([data_x, data_x[::-1]], ignore_index=True),
y=pd.concat([data_yl, data_yu[::-1]], ignore_index=True),
mode='lines',
marker=dict(color=colour),
fillcolor=("rgba({}, {}, {}, 0.1)".format(*hex_to_rgb(colour))),
fill='toself',
line=dict(width=config['global']['lw_ultrathin']),
showlegend=False,
hoverinfo='skip'
)
traces.append(errorBand)
return traces
def __styling(fig: go.Figure, config: dict):
# update legend styling
fig.update_layout(
legend=dict(
orientation='h',
xanchor='left',
x=0.0,
yanchor='top',
y=-0.1,
bgcolor='rgba(255,255,255,1.0)',
bordercolor='black',
borderwidth=2,
),
)
# update axis styling
for axis in ['xaxis', 'xaxis2', 'xaxis3', 'xaxis4', 'yaxis', 'yaxis2', 'yaxis3', 'yaxis4']:
update = {axis: dict(
showline=True,
linewidth=2,
linecolor='black',
showgrid=False,
zeroline=False,
mirror=True,
ticks='outside',
)}
fig.update_layout(**update)
# update figure background colour and font colour and type
fig.update_layout(
paper_bgcolor='rgba(255, 255, 255, 1.0)',
plot_bgcolor='rgba(255, 255, 255, 0.0)',
font_color='black',
font_family='Helvetica',
)
# move title annotations
for i, annotation in enumerate(fig['layout']['annotations'][:len(config['subplot_title_positions'])]):
x_pos, y_pos = config['subplot_title_positions'][i]
annotation['xanchor'] = 'left'
annotation['yanchor'] = 'top'
annotation['xref'] = 'paper'
annotation['yref'] = 'paper'
annotation['x'] = x_pos
annotation['y'] = y_pos
annotation['text'] = "<b>{0}</b>".format(annotation['text'])
| 37.185393 | 149 | 0.584177 | [
"MIT"
] | PhilippVerpoort/blue-green-H2 | src/plotting/plots/plotOverTime.py | 19,861 | Python |
suite = {
"mxversion": "5.156.0",
"name": "truffleruby",
"imports": {
"suites": [
{ # Import only the tools suite which depends on truffle, to avoid duplicating import versions.
# We want tools to be reliably available with TruffleRuby, even with "mx build", so this is a static import.
"name": "tools",
"subdir": True,
# version must always be equal to the version of the "sulong" import below
"version": "aacc6652e247841b2bfa9bdba308021049c2e215",
"urls": [
{"url": "https://github.com/oracle/graal.git", "kind": "git"},
{"url": "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind": "binary"},
]
},
{
"name": "sulong",
"subdir": True,
# version must always be equal to the version of the "tools" import above
"version": "aacc6652e247841b2bfa9bdba308021049c2e215",
"urls": [
{"url": "https://github.com/oracle/graal.git", "kind": "git"},
{"url": "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind": "binary"},
]
},
],
},
"licenses": {
"EPL-1.0": {
"name": "Eclipse Public License 1.0",
"url": "https://opensource.org/licenses/EPL-1.0",
},
"BSD-simplified": {
"name": "Simplified BSD License (2-clause BSD license)",
"url": "http://opensource.org/licenses/BSD-2-Clause"
},
"MIT": {
"name": "MIT License",
"url": "http://opensource.org/licenses/MIT"
},
},
"repositories": {
"truffleruby-binary-snapshots": {
"url": "https://curio.ssw.jku.at/nexus/content/repositories/snapshots",
"licenses": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"BSD-simplified", # MRI
"BSD-new", # Rubinius, FFI
"MIT", # JCodings, minitest, did_you_mean, rake
]
},
},
"libraries": {
# ------------- Libraries -------------
"JONI": {
"maven": {
"groupId": "org.jruby.joni",
"artifactId": "joni",
"version": "2.1.25"
},
"sha1": "5dbb09787a9b8780737b71fbf942235ef59051b9",
"sourceSha1": "505a09064f6e2209616f38724f6d97d8d889aa92",
"license": [
"MIT", # Joni
],
},
"JCODINGS": {
"maven": {
"groupId": "org.jruby.jcodings",
"artifactId": "jcodings",
"version": "1.0.40"
},
"sha1": "2838952e91baa37ac73ed817451268a193ba440a",
"sourceSha1": "0ed89e096c83d540acac00d6ee3ea935b4c905ff",
"license": [
"MIT", # JCodings
],
},
},
"externalProjects": {
"truffleruby-root": {
"type": "ruby",
"path": '.',
"source": [
"lib/json",
"lib/mri",
"lib/truffle",
],
"load_path": ["src/main/ruby/core"],
"test": ["spec", "test"],
"excluded": [
"bench",
"dumps",
"logo",
"mxbuild",
"truffleruby-gem-test-pack",
"lib/ruby",
"test/truffle/ecosystem/blog",
"test/truffle/ecosystem/hello-world",
"test/truffle/ecosystem/rails-app",
]
},
},
"projects": {
# ------------- Projects -------------
"org.truffleruby.annotations": {
"dir": "src/annotations",
"sourceDirs": ["java"],
"javaCompliance": "1.8",
"workingSets": "TruffleRuby",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.shared": {
"dir": "src/shared",
"sourceDirs": ["java"],
"dependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"sdk:GRAAL_SDK",
],
"annotationProcessors": [
"TRUFFLERUBY-PROCESSOR",
],
"javaCompliance": "1.8",
"workingSets": "TruffleRuby",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.processor": {
"dir": "src/processor",
"sourceDirs": ["java"],
"dependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
],
"javaCompliance": "1.8",
"workingSets": "TruffleRuby",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.services": {
"dir": "src/services",
"sourceDirs": ["java"],
"dependencies": [
"sdk:GRAAL_SDK",
],
"javaCompliance": "1.8",
"workingSets": "TruffleRuby",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby": {
"dir": "src/main",
"sourceDirs": ["java"],
"dependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"truffleruby:TRUFFLERUBY-SHARED",
"truffle:TRUFFLE_API",
"truffle:JLINE",
"JONI",
"JCODINGS",
],
"annotationProcessors": [
"truffle:TRUFFLE_DSL_PROCESSOR",
"TRUFFLERUBY-PROCESSOR",
],
"javaCompliance": "1.8",
"checkstyle" : "org.truffleruby",
"workingSets": "TruffleRuby",
"findbugsIgnoresGenerated" : True,
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"BSD-new", # Rubinius
"BSD-simplified", # MRI
"MIT", # Joni, JCodings
],
"externalProjects": {
"ruby-core" : {
"type": "ruby",
"path": "ruby",
"source": ["core", "post-boot"],
"load_path": ["core"]
}
}
},
"org.truffleruby.launcher": {
"dir": "src/launcher",
"sourceDirs": ["java"],
"dependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"truffleruby:TRUFFLERUBY-SHARED",
"sdk:GRAAL_SDK",
"sdk:LAUNCHER_COMMON",
],
"javaCompliance": "1.8",
"workingSets": "TruffleRuby",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.core": {
"class": "ArchiveProject",
"outputDir": "src/main/ruby",
"prefix": "truffleruby",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"BSD-new", # Rubinius
],
},
"org.truffleruby.test": {
"dir": "src/test",
"sourceDirs": ["java"],
"dependencies": [
"org.truffleruby",
"org.truffleruby.services",
"truffle:TRUFFLE_TCK",
"mx:JUNIT",
],
"javaCompliance": "1.8",
"checkPackagePrefix": "false",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.test-ruby": {
"class": "ArchiveProject",
"outputDir": "src/test/ruby",
"prefix": "src/test/ruby",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.cext": {
"native": True,
"dir": "src/main/c",
"buildDependencies": [
"TRUFFLERUBY", # We need truffleruby.jar to run extconf.rb
"org.truffleruby.bin", # bin/truffleruby
"org.truffleruby.sulong-libs", # polyglot.h
],
"output": ".",
"results": [], # Empty results as they overlap with org.truffleruby.lib
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"BSD-simplified", # MRI
],
},
# Copy the files from SULONG_LIBS to lib/cext/sulong-libs.
# Used by native images, which need a relative path from the Ruby home
# to these libraries to pass to Sulong so it can find them outside GraalVM.
"org.truffleruby.sulong-libs": {
"class": "TruffleRubySulongLibsProject",
"outputDir": "lib/cext/sulong-libs",
"prefix": "lib/cext/sulong-libs",
"buildDependencies": [
"sulong:SULONG_LIBS",
],
},
"org.truffleruby.lib": {
"class": "ArchiveProject",
"dependencies": [
"org.truffleruby.cext",
"org.truffleruby.sulong-libs",
],
"outputDir": "lib",
"prefix": "lib",
"license": [
"EPL-1.0",
"MIT", # minitest, did_you_mean, rake
"BSD-simplified", # MRI
"BSD-new", # Rubinius, FFI and RubySL
],
},
"org.truffleruby.bin": {
"class": "TruffleRubyLauncherProject",
"buildDependencies": [
"TRUFFLERUBY",
"TRUFFLERUBY-LAUNCHER",
"sulong:SULONG",
"tools:CHROMEINSPECTOR",
"tools:TRUFFLE_PROFILER",
],
"outputDir": "bin",
"prefix": "bin",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"org.truffleruby.doc": {
"class": "TruffleRubyDocsProject",
"outputDir": "",
"prefix": "",
},
"org.truffleruby.specs": {
"class": "ArchiveProject",
"prefix": "spec",
"outputDir": "spec",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"MIT", # Ruby Specs
],
},
},
"distributions": {
# ------------- Distributions -------------
"TRUFFLERUBY-ANNOTATIONS": {
"dependencies": [
"org.truffleruby.annotations"
],
"description": "TruffleRuby Annotations",
"license": ["EPL-1.0"]
},
# Required to share code between the launcher and the rest,
# since the rest cannot depend on the launcher and the shared code cannot be there.
# This code is loaded twice in different classloaders, therefore any created instances should not be passed around.
"TRUFFLERUBY-SHARED": {
"dependencies": [
"org.truffleruby.shared"
],
"distDependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"sdk:GRAAL_SDK",
],
"description": "TruffleRuby Shared constants and predicates",
"license": ["EPL-1.0"]
},
"TRUFFLERUBY-PROCESSOR": {
"dependencies": [
"org.truffleruby.processor"
],
"distDependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
],
"description": "TruffleRuby Annotation Processor",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"TRUFFLERUBY-SERVICES": {
"dependencies": [
"org.truffleruby.services"
],
"distDependencies": [
"sdk:GRAAL_SDK",
],
"description": "TruffleRuby services",
"license": ["EPL-1.0"]
},
"TRUFFLERUBY": {
"mainClass": "org.truffleruby.launcher.RubyLauncher",
"dependencies": [
"org.truffleruby",
"org.truffleruby.core",
],
"distDependencies": [
"truffle:TRUFFLE_API",
"truffle:TRUFFLE_NFI",
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"truffleruby:TRUFFLERUBY-SHARED",
],
"description": "TruffleRuby",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"BSD-new", # Rubinius
"BSD-simplified", # MRI
"MIT", # Joni, JCodings
],
},
"TRUFFLERUBY-LAUNCHER": {
"dependencies": [
"org.truffleruby.launcher"
],
"distDependencies": [
"truffleruby:TRUFFLERUBY-ANNOTATIONS",
"truffleruby:TRUFFLERUBY-SHARED",
"truffleruby:TRUFFLERUBY-SERVICES", # For the file type detection service
"sdk:GRAAL_SDK",
"sdk:LAUNCHER_COMMON",
],
"description": "TruffleRuby Launcher",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
# Set of extra files to extract to run Ruby
"TRUFFLERUBY-ZIP": {
"native": True, # Not Java
"relpath": True,
"platformDependent": True, # org.truffleruby.cext, org.truffleruby.bin
"dependencies": [
"org.truffleruby.bin",
"org.truffleruby.lib",
"org.truffleruby.doc",
],
"description": "TruffleRuby libraries, documentation, bin directory",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"MIT", # minitest, did_you_mean, rake
"BSD-simplified", # MRI
"BSD-new", # Rubinius, FFI
],
},
"TRUFFLERUBY_GRAALVM_SUPPORT" : {
"native": True,
"platformDependent": True,
"description" : "TruffleRuby support distribution for the GraalVM",
"dependencies" : [
"org.truffleruby.cext",
],
"layout" : {
"./" : [
"file:lib", # contains some results from org.truffleruby.cext
"file:CHANGELOG.md",
"file:README.md",
"file:mx.truffleruby/native-image.properties",
],
"LICENSE_TRUFFLERUBY.md" : "file:LICENCE.md",
"3rd_party_licenses_truffleruby.txt" : "file:3rd_party_licenses.txt",
"bin/" : [
"file:bin/gem",
"file:bin/irb",
"file:bin/rake",
"file:bin/rdoc",
"file:bin/ri",
"file:bin/testrb",
],
"doc/" : [
"file:doc/legal",
"file:doc/user",
],
"src/main/c/openssl/": [
"file:src/main/c/openssl/deprecation.rb",
"file:src/main/c/openssl/extconf.rb",
"file:src/main/c/openssl/*.c",
{
"source_type": "file",
"path": "src/main/c/openssl/*.h",
"exclude": ["src/main/c/openssl/extconf.h"]
},
],
},
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"MIT", # minitest, did_you_mean, rake
"BSD-simplified", # MRI
"BSD-new", # Rubinius, FFI
],
},
"TRUFFLERUBY-TEST": {
"dependencies": [
"org.truffleruby.test",
"org.truffleruby.test-ruby",
],
"exclude": [
"mx:HAMCREST",
"mx:JUNIT"
],
"distDependencies": [
"TRUFFLERUBY",
"truffle:TRUFFLE_TCK"
],
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
],
},
"TRUFFLERUBY-SPECS": {
"native": True, # Not Java
"relpath": True,
"dependencies": [
"org.truffleruby.specs",
],
"description": "TruffleRuby spec files from ruby/spec",
"license": [
"EPL-1.0", # JRuby (we're choosing EPL out of EPL,GPL,LGPL)
"MIT", # Ruby Specs
],
},
},
}
| 34.466793 | 123 | 0.428265 | [
"EPL-1.0",
"Apache-2.0"
] | Quintasan/truffleruby | mx.truffleruby/suite.py | 18,164 | Python |
import torch
import time
from math import pi
import numpy as np
from ..utils.tensorboard import Tensorboard
from ..utils.output import progress
from .convergence import Convergence
from ..model.deepmod import DeepMoD
from typing import Optional
def train(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data)
MSE = torch.mean((prediction - target)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE + Reg) # 1e-5 for numerical stability
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ====================== Logging =======================
# We calculate the normalization factor and the l1_norm
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
# Write progress to command line and tensorboard
if iteration % write_iterations == 0:
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
if model.estimator_coeffs() is None:
estimator_coeff_vectors = [torch.zeros_like(coeff) for coeff in model.constraint_coeffs(sparse=True, scaled=False)] # It doesnt exist before we start sparsity, so we use zeros
else:
estimator_coeff_vectors = model.estimator_coeffs()
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors)
# ================== Validation and sparsity =============
# Updating sparsity and or convergence
sparsity_scheduler(iteration, torch.sum(l1_norm))
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
# Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
def train_auto_split(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE + Reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ====================== Logging =======================
# We calculate the normalization factor and the l1_norm
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
# Validation loss
with torch.no_grad():
prediction_test = model.func_approx(data_test)[0]
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
# Write progress to command line and tensorboard
if iteration % write_iterations == 0:
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test)
# ================== Validation and sparsity =============
# Updating sparsity and or convergence
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
#sparsity_scheduler(torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
# Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
def train_auto_split_scaled(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
theta_norms = [torch.norm(theta, dim=0) for theta in thetas]
time_deriv_norms = [torch.norm(dt, dim=0) for dt in time_derivs]
normed_thetas = [theta / norm for theta, norm in zip(thetas, theta_norms)]
normed_time_derivs = [dt / norm for dt, norm in zip(time_derivs, time_deriv_norms)]
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(normed_time_derivs, normed_thetas, model.constraint_coeffs(scaled=True, sparse=True))])
loss = torch.sum(MSE + Reg) # 1e-5 for numerical stability
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ====================== Logging =======================
# We calculate the normalization factor and the l1_norm
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
# Validation loss
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + Reg_test)
# Write progress to command line and tensorboard
if iteration % write_iterations == 0:
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
# ================== Validation and sparsity =============
# Updating sparsity and or convergence
sparsity_scheduler(loss_test, model, optimizer)
#sparsity_scheduler(torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
checkpoint = torch.load(sparsity_scheduler.path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
# Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
def train_auto_split_MSE(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data)
MSE = torch.mean((prediction - target)**2, dim=0) # loss per output
#Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
# for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE) # 1e-5 for numerical stability
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ====================== Logging =======================
with torch.no_grad():
# We calculate the normalization factor and the l1_norm
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint.coeff_vectors, dim=1)), dim=0)
# Validation loss
prediction_test = model.func_approx(data)[0]
MSE_test = torch.mean((prediction_test - target)**2, dim=0) # loss per output
# Write progress to command line and tensorboard
if iteration % write_iterations == 0:
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(MSE).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, MSE, l1_norm, model.constraint.coeff_vectors, model.constraint.coeff_vectors, estimator_coeff_vectors, MSE_test=MSE_test)
# ================== Validation and sparsity =============
# Updating sparsity and or convergence
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
print(model.sparsity_masks)
# Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
def train_split_full(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
test = 'mse',
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE + Reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + Reg_test)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
# ================== Sparsity update =============
# Updating sparsity and or convergence
#sparsity_scheduler(iteration, l1_norm)
if iteration % write_iterations == 0:
if test == 'mse':
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
# ================= Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close() | 46.811189 | 246 | 0.60736 | [
"MIT"
] | GJBoth/MultiTaskPINN | src/multitaskpinn/training/.ipynb_checkpoints/training-checkpoint.py | 20,082 | Python |
#!/usr/bin/env python
import paho.mqtt.client as mqtt
import random
from logger import error, info
from message import Message
from pubsub import publish
MQTT_ERR_SUCCESS = 0
# MQTT client wrapper for use with Mainflux.
class MQTT:
# Initialize the class with topics that will be used
# during publish and possibly subscribe.
def __init__(self, topics, client_id='mqtt-client', clean_session=True, qos=0, queue=None):
info('mqtt', 'init')
self.connected = False
self.qos = qos
self.queue = queue
self.subscribe = queue != None
# Handle topics string or slice.
if isinstance(topics, basestring):
topics = topics.split(',')
self.topics = topics
# Add randomness to client_id.
client_id = client_id+'-'+str(random.randint(1000, 9999))
self.client = mqtt.Client(client_id=client_id, clean_session=clean_session)
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.on_message = self.on_message
self.client.on_publish = self.on_publish
self.client.on_subscribe = self.on_subscribe
# Connect to the MQTT adapter endpoint and start the internal
# paho-mqtt loop.
def connect(self, host, port, username, password):
info('mqtt', 'connect')
self.client.username_pw_set(username, password)
self.client.connect(host, port=port, keepalive=60)
self.client.loop_start()
# Disconnect the client.
def disconnect(self):
self.connected = False
self.client.loop_stop()
self.client.disconnect()
dur_count = 0.0
dur_total = 0.0
def dur_avg(self):
return self.dur_total / self.dur_count
# The callback for when the client receives a CONNACK response from the server.
def on_connect(self, client, userdata, flags, rc):
info('mqtt', 'on_connect '+str(rc))
if rc == MQTT_ERR_SUCCESS:
self.connected = True
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
if self.subscribe:
subs = []
for topic in self.topics:
info('mqtt', 'subscribe: channels/'+topic+'/messages, qos: '+str(self.qos))
subs.append(('channels/'+topic+'/messages', self.qos))
info('mqtt', 'subscriptions: '+str(subs))
self.client.subscribe(subs)
# When the client disconnects make sure to stop the loop.
def on_disconnect(self, client, userdata, rc):
info('mqtt', 'on_disconnect')
if rc != MQTT_ERR_SUCCESS:
info('mqtt', 'on_disconnect unexpected: '+str(rc))
#self.disconnect()
#self.client.reconnect()
# The callback for when a PUBLISH message is received from the server.
def on_message(self, client, userdata, msg):
info('mqtt', 'on_message:'+msg.topic+': '+str(msg.payload))
try:
if self.queue:
m = Message(msg.topic, msg.payload)
if m.is_valid():
if m.for_device() and not m.get_name() in ['CONNECTED', 'REGISTERED', 'TX']:
self.queue.put(m)
publish(m, channel='inbound')
else:
publish(m, channel='inbound')
except Exception as ex:
error('mqtt', 'on_message: '+str(ex))
# When a message has been published.
def on_publish(self, client, userdata, mid):
info('mqtt', 'on_publish: '+str(mid))
# When a subscription is complete.
def on_subscribe(client, userdata, mid, granted_qos):
info('mqtt', 'on_subscribe mid: '+mid)
# Publish a message to the topic provided on init.
def publish(self, msg=None):
if msg:
info('mqtt', 'publish: '+str(msg))
mid = self.client.publish(msg.topic, payload=msg.payload_str(), qos=self.qos)
self.dur_count += 1
self.dur_total += msg.get_duration()
info('mqtt', 'published '+str(self.dur_count)+' for an avg. duration of '+str(self.dur_avg())+' secs. with '+str(self.dur_total)+' secs. in total')
| 37.333333 | 159 | 0.613487 | [
"MIT"
] | dustinengle/smart-mailbox | gateway/kit/mqtt.py | 4,256 | Python |
from .results import Result
class K4AException(Exception):
pass
class K4ATimeoutException(K4AException):
pass
def _verify_error(res: int):
"""
Validate k4a_module result
"""
res = Result(res)
if res == Result.Failed:
raise K4AException()
elif res == Result.Timeout:
raise K4ATimeoutException()
| 16.571429 | 40 | 0.658046 | [
"MIT"
] | 1079931505/ME336-Yellow-Team-SUSTech | deepclaw/driver/sensors/camera/pyk4a_cfg/errors.py | 348 | Python |
#!/usr/bin/env python
# Copyright (c) 2014, OpenCog Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the OpenCog Foundation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "James Diprose"
import bpy
import mathutils
from mathutils import Matrix, Vector
from math import acos, degrees
from xml.dom.minidom import parseString
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
class LinkRef(object):
def __init__(self, name, link_type):
self.name = name
self.link_type = link_type
def to_xml(self, xml_parent):
link_ref = SubElement(xml_parent, self.link_type)
link_ref.set('link', self.name)
return link
class Origin(object):
def __init__(self, x, y, z, roll, pitch, yaw):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
def to_xml(self, xml_parent):
origin = SubElement(xml_parent, 'origin')
origin.set('xyz', "{0} {1} {2}".format(self.x, self.y, self.z))
origin.set('rpy', "{0} {1} {2}".format(self.roll, self.pitch, self.yaw))
return origin
class Axis(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def to_xml(self, xml_parent):
axis = SubElement(xml_parent, 'axis')
axis.set('xyz', "{0} {1} {2}".format(self.x, self.y, self.z))
return axis
class Limit(object):
def __init__(self, velocity, effort, lower, upper):
self.velocity = velocity
self.effort = effort
self.lower = lower
self.upper = upper
def to_xml(self, xml_parent):
limit = SubElement(xml_parent, 'limit')
limit.set('velocity', str(self.velocity))
limit.set('effort', str(self.effort))
limit.set('lower', str(self.lower))
limit.set('upper', str(self.upper))
return limit
class Joint(object):
def __init__(self, name, type, parent_link, child_link, origin, axis, limit):
self.name = name
self.type = type
self.parent_link = parent_link
self.child_link = child_link
self.origin = origin
self.axis = axis
self.limit = limit
@staticmethod
def to_link_name(name):
return name.replace('_joint', '_link')
def to_xml(self, xml_parent):
joint = SubElement(xml_parent, 'joint')
joint.set('name', self.name)
joint.set('type', self.type)
self.parent_link.to_xml(joint)
self.child_link.to_xml(joint)
self.origin.to_xml(joint)
self.axis.to_xml(joint)
self.limit.to_xml(joint)
return joint
def get_root_bone(bones):
for bone in bones:
if bone.parent is None:
return bone
def get_bone(name, bones):
for bone in bones:
if bone.name == name:
return bone
def is_joint(bone):
return bone.name.endswith('joint')
def to_ros_coord(x, y, z):
return (z, -x, y)
def add_prefix(name):
if name.startswith('l_'):
return name.replace('l_', '${prefix}_')
elif name.startswith('r_'):
return name.replace('r_', '${prefix}_')
return name
class Link(object):
def __init__(self, name):
self.name = name
def to_xml(self, xml_parent):
link = SubElement(xml_parent, 'name')
link.set('name', self.name)
return link
# joints, links
#if is_root:
# global visited_joints
# global links
# links = []
# visited_joints = {}
#joints = []
# visited_joints[parent_bone.name] = True
def has_been_visited(name, joints):
visited = False
for joint in joints:
if name == joint.name:
visited = True
break
return visited
def generate_links_and_joints(parent_pose_bone, links=[], joints=[]):
try:
if len(visited_joints) == 0:
global visited_joints
visited_joints = {}
except NameError:
global visited_joints
visited_joints = {}
print(len(visited_joints))
visited_joints[parent_pose_bone.name] = True
if is_joint(parent_pose_bone):
parent_edit_bone = parent_pose_bone.bone
for child_pose_bone in parent_pose_bone.children:
child_edit_bone = child_pose_bone.bone
if is_joint(child_pose_bone) and child_pose_bone.name not in visited_joints:
# Parent & child
parent_link = LinkRef(Joint.to_link_name(parent_pose_bone.name), 'parent')
child_link = LinkRef(Joint.to_link_name(child_pose_bone.name), 'child')
# Origin
dX = round(parent_pose_bone.head[0] - child_pose_bone.head[0], 4)
dY = round(parent_pose_bone.head[1] - child_pose_bone.head[1], 4)
dZ = round(parent_pose_bone.head[2] - child_pose_bone.head[2], 4)
point = to_ros_coord(dX, dY, dZ)
#rot = parent_edit_bone.localOrientation.to_euler()
#parent_pose_bone.worldOrientation
#matrix_final = parent_edit_bone.id_data.matrix_world * parent_edit_bone.matrix
#angles = get_bone_rotation(parent_edit_bone)
mat = child_pose_bone.id_data.matrix_world
#print(str((parent_pose_bone.matrix).to_euler()))
print("angle of " + child_pose_bone.name + ": " + str((mat * child_pose_bone.matrix).to_euler()) )
origin = Origin(point[0], point[1], point[2], 0, 0, 0)
axis = Axis(1, 0, 0)
limit = Limit(0, 0, 0, 0)
# Joint
joint = Joint(child_pose_bone.name, 'revolute', parent_link, child_link, origin, axis, limit)
joints.append(joint)
link = Link(Joint.to_link_name(child_pose_bone.name))
links.append(link)
(joints, links) = generate_links_and_joints(child_pose_bone, links, joints)
print("{0} to {1}: ({2}, {3}, {4}, {5})".format(child_pose_bone.name, child_edit_bone.name, point[0], point[1], point[2], parent_pose_bone.vector))
return (joints, links)
def pretty_print(xml_element):
ugly_str = tostring(xml_element, 'utf-8')
mini_dom_str = parseString(ugly_str)
return mini_dom_str.toprettyxml(indent="\t")
rig = bpy.data.objects['Armature']
root_pose_bone = get_root_bone(rig.pose.bones)
print("root_bone: " + str(root_pose_bone))
print("is joint: " + str(is_joint(root_pose_bone)))
robot = Element('robot')
robot.set('xmlns:xacro', 'http://ros.org/wiki/xacro')
macro = SubElement(robot, 'xacro:macro')
macro.set('name', 'blender_generated_urdf')
(joints, links) = generate_links_and_joints(root_pose_bone)
print(len(joints))
bone = get_bone('big_bone', rig.pose.bones )
print("BONE: " + str((bone.id_data.matrix_world * bone.matrix).to_euler()))
for link in links:
link.to_xml(macro)
for joint in joints:
joint.to_xml(macro) | 30.733577 | 163 | 0.644698 | [
"BSD-3-Clause"
] | geni-lab/ros_blender_bridge | src/ros_blender_plugin/urdf_exporter.py | 8,421 | Python |
# -*- coding: utf-8 -*-
"""Cisco Identity Services Engine SXPConnections API wrapper.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
get_next_page,
)
class SxpConnections(object):
"""Identity Services Engine SXPConnections API (version: 3.0.0).
Wraps the Identity Services Engine SXPConnections
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new SxpConnections
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(SxpConnections, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_sxp_connections_by_id(self,
id,
headers=None,
**query_parameters):
"""This API allows the client to get a SXP connection by ID.
Args:
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
e_url = ('/ers/config/sxpconnections/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a5b160a5675039b7ddf3dc960c7968_v3_0_0', _api_response)
def get_by_id(self,
id,
headers=None,
**query_parameters):
"""Alias for `get_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections_by_id>`_
"""
return self.get_sxp_connections_by_id(
id=id,
headers=headers,
**query_parameters
)
def update_sxp_connections_by_id(self,
id,
description=None,
enabled=None,
ip_address=None,
sxp_mode=None,
sxp_node=None,
sxp_peer=None,
sxp_version=None,
sxp_vpn=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""This API allows the client to update a SXP connection.
Args:
description(string): description, property of the
request body.
enabled(boolean): enabled, property of the request body.
id(string): id, property of the request body.
ip_address(string): ipAddress, property of the request
body.
sxp_mode(string): sxpMode, property of the request body.
sxp_node(string): sxpNode, property of the request body.
sxp_peer(string): sxpPeer, property of the request body.
sxp_version(string): sxpVersion, property of the request
body.
sxp_vpn(string): sxpVpn, property of the request body.
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])
if active_validation and is_xml_payload:
check_type(payload, basestring)
if active_validation and not is_xml_payload:
check_type(payload, dict)
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {
'id':
id,
'description':
description,
'sxpPeer':
sxp_peer,
'sxpVpn':
sxp_vpn,
'sxpNode':
sxp_node,
'ipAddress':
ip_address,
'sxpMode':
sxp_mode,
'sxpVersion':
sxp_version,
'enabled':
enabled,
}
_payload = {
'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation and not is_xml_payload:
self._request_validator('jsd_cab8440e21553c3a807d23d05e5e1aa_v3_0_0')\
.validate(_payload)
e_url = ('/ers/config/sxpconnections/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = {'data': _payload} if is_xml_payload else {'json': _payload}
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params,
headers=_headers,
**request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params,
**request_params)
return self._object_factory('bpm_cab8440e21553c3a807d23d05e5e1aa_v3_0_0', _api_response)
def update_by_id(self,
id,
description=None,
enabled=None,
ip_address=None,
sxp_mode=None,
sxp_node=None,
sxp_peer=None,
sxp_version=None,
sxp_vpn=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""Alias for `update_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.update_sxp_connections_by_id>`_
"""
return self.update_sxp_connections_by_id(
id=id,
description=description,
enabled=enabled,
ip_address=ip_address,
sxp_mode=sxp_mode,
sxp_node=sxp_node,
sxp_peer=sxp_peer,
sxp_version=sxp_version,
sxp_vpn=sxp_vpn,
payload=payload,
active_validation=active_validation,
headers=headers,
**query_parameters
)
def delete_sxp_connections_by_id(self,
id,
headers=None,
**query_parameters):
"""This API deletes a SXP connection.
Args:
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
e_url = ('/ers/config/sxpconnections/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_fb665776b98ba815b52515a6_v3_0_0', _api_response)
def delete_by_id(self,
id,
headers=None,
**query_parameters):
"""Alias for `delete_sxp_connections_by_id <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.delete_sxp_connections_by_id>`_
"""
return self.delete_sxp_connections_by_id(
id=id,
headers=headers,
**query_parameters
)
def get_sxp_connections(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the SXP connections.
Filter: [name, description] To search resources by
using toDate column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
check_type(sortasc, basestring)
check_type(sortdsc, basestring)
check_type(filter, (basestring, list, set, tuple))
check_type(filter_type, basestring)
_params = {
'page':
page,
'size':
size,
'sortasc':
sortasc,
'sortdsc':
sortdsc,
'filter':
filter,
'filterType':
filter_type,
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/sxpconnections')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c56dfcff6285f9b882c884873d5d6c1_v3_0_0', _api_response)
def get_all(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""Alias for `get_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections>`_
"""
return self.get_sxp_connections(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
)
def get_sxp_connections_generator(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the SXP connections.
Filter: [name, description] To search resources by
using toDate column,follow the format: DD-MON-YY
(Example:13-SEP-18) Day or Year:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13
Month:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.SEP
Date:GET
/ers/config/guestuser/?filter=toDate.CONTAINS.13-SEP-18
Sorting: [name, description].
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
sortasc(basestring): sortasc query parameter. sort asc.
sortdsc(basestring): sortdsc query parameter. sort desc.
filter(basestring, list, set, tuple): filter query
parameter. **Simple
filtering** should be available through
the filter query string parameter. The
structure of a filter is a triplet of
field operator and value separated with
dots. More than one filter can be sent.
The logical operator common to ALL
filter criteria will be by default AND,
and can be changed by using the
"filterType=or" query string parameter.
Each resource Data model description
should specify if an attribute is a
filtered field. (Operator:
Description),
(EQ: Equals), (NEQ: Not
Equals), (GT: Greater
Than), (LT: Less Then),
(STARTSW: Starts With),
(NSTARTSW: Not Starts With),
(ENDSW: Ends With),
(NENDSW: Not Ends With),
(CONTAINS: Contains),
(NCONTAINS: Not Contains),
.
filter_type(basestring): filterType query parameter. The
logical operator common to ALL filter
criteria will be by default AND, and can
be changed by using the parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
Generator: A generator object containing the following object.
+ RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
yield from get_next_page(
self.get_sxp_connections, dict(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def get_all_generator(self,
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None,
headers=None,
**query_parameters):
"""Alias for `get_sxp_connections_generator <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.get_sxp_connections_generator>`_
"""
yield from get_next_page(
self.get_sxp_connections, dict(
filter=filter,
filter_type=filter_type,
page=page,
size=size,
sortasc=sortasc,
sortdsc=sortdsc,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def create_sxp_connections(self,
description=None,
enabled=None,
ip_address=None,
sxp_mode=None,
sxp_node=None,
sxp_peer=None,
sxp_version=None,
sxp_vpn=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""This API creates a SXP connection.
Args:
description(string): description, property of the
request body.
enabled(boolean): enabled, property of the request body.
ip_address(string): ipAddress, property of the request
body.
sxp_mode(string): sxpMode, property of the request body.
sxp_node(string): sxpNode, property of the request body.
sxp_peer(string): sxpPeer, property of the request body.
sxp_version(string): sxpVersion, property of the request
body.
sxp_vpn(string): sxpVpn, property of the request body.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])
if active_validation and is_xml_payload:
check_type(payload, basestring)
if active_validation and not is_xml_payload:
check_type(payload, dict)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {
'description':
description,
'sxpPeer':
sxp_peer,
'sxpVpn':
sxp_vpn,
'sxpNode':
sxp_node,
'ipAddress':
ip_address,
'sxpMode':
sxp_mode,
'sxpVersion':
sxp_version,
'enabled':
enabled,
}
_payload = {
'ERSSxpConnection': dict_from_items_with_values(_tmp_payload)
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation and not is_xml_payload:
self._request_validator('jsd_c371214c759f791c0a522b9eaf5b5_v3_0_0')\
.validate(_payload)
e_url = ('/ers/config/sxpconnections')
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = {'data': _payload} if is_xml_payload else {'json': _payload}
if with_custom_headers:
_api_response = self._session.post(endpoint_full_url, params=_params,
headers=_headers,
**request_params)
else:
_api_response = self._session.post(endpoint_full_url, params=_params,
**request_params)
return self._object_factory('bpm_c371214c759f791c0a522b9eaf5b5_v3_0_0', _api_response)
def create(self,
description=None,
enabled=None,
ip_address=None,
sxp_mode=None,
sxp_node=None,
sxp_peer=None,
sxp_version=None,
sxp_vpn=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""Alias for `create_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.create_sxp_connections>`_
"""
return self.create_sxp_connections(
description=description,
enabled=enabled,
ip_address=ip_address,
sxp_mode=sxp_mode,
sxp_node=sxp_node,
sxp_peer=sxp_peer,
sxp_version=sxp_version,
sxp_vpn=sxp_vpn,
payload=payload,
active_validation=active_validation,
headers=headers,
**query_parameters
)
def get_version(self,
headers=None,
**query_parameters):
"""This API helps to retrieve the version information related to
the SXP connections.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/sxpconnections/versioninfo')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c1ceea62877152f6a4cf7ce709f4d0f8_v3_0_0', _api_response)
def bulk_request_for_sxp_connections(self,
operation_type=None,
resource_media_type=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""This API allows the client to submit the bulk request.
Args:
operation_type(string): operationType, property of the
request body.
resource_media_type(string): resourceMediaType, property
of the request body.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])
if active_validation and is_xml_payload:
check_type(payload, basestring)
if active_validation and not is_xml_payload:
check_type(payload, dict)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
if is_xml_payload:
_payload = payload
else:
_tmp_payload = {
'operationType':
operation_type,
'resourceMediaType':
resource_media_type,
}
_payload = {
'ConnectionBulkRequest': dict_from_items_with_values(_tmp_payload)
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation and not is_xml_payload:
self._request_validator('jsd_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0')\
.validate(_payload)
e_url = ('/ers/config/sxpconnections/bulk/submit')
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = {'data': _payload} if is_xml_payload else {'json': _payload}
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params,
headers=_headers,
**request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params,
**request_params)
return self._object_factory('bpm_e390313557e95aa9b8c2453d6f1de1e8_v3_0_0', _api_response)
def bulk_request(self,
operation_type=None,
resource_media_type=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""Alias for `bulk_request_for_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.bulk_request_for_sxp_connections>`_
"""
return self.bulk_request_for_sxp_connections(
operation_type=operation_type,
resource_media_type=resource_media_type,
payload=payload,
active_validation=active_validation,
headers=headers,
**query_parameters
)
def monitor_bulk_status_sxp_connections(self,
bulkid,
headers=None,
**query_parameters):
"""This API allows the client to monitor the bulk request.
Args:
bulkid(basestring): bulkid path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(bulkid, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'bulkid': bulkid,
}
e_url = ('/ers/config/sxpconnections/bulk/{bulkid}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c2fb20ca5eb79facdda896457507_v3_0_0', _api_response)
def monitor_bulk_status(self,
bulkid,
headers=None,
**query_parameters):
"""Alias for `monitor_bulk_status_sxp_connections <#ciscoisesdk.
api.v3_0_0.sxp_connections.
SxpConnections.monitor_bulk_status_sxp_connections>`_
"""
return self.monitor_bulk_status_sxp_connections(
bulkid=bulkid,
headers=headers,
**query_parameters
)
| 39.658273 | 128 | 0.549274 | [
"MIT"
] | CiscoISE/ciscoisesdk | ciscoisesdk/api/v3_0_0/sxp_connections.py | 44,100 | Python |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for export_table_to_gcs."""
import json
import os
import unittest
from bigquery.samples.streaming import run
from tests import CloudBaseTest
class TestStreaming(CloudBaseTest):
def test_stream_row_to_bigquery(self):
with open(
os.path.join(self.resource_path, 'streamrows.json'),
'r') as rows_file:
rows = json.load(rows_file)
for result in run(self.constants['projectId'],
self.constants['datasetId'],
self.constants['newTableId'],
rows,
5):
self.assertIsNotNone(json.loads(result))
if __name__ == '__main__':
unittest.main()
| 30.348837 | 74 | 0.658238 | [
"Apache-2.0"
] | xiaopeng163/python-docs-samples | bigquery/tests/test_streaming.py | 1,305 | Python |
import RPi.GPIO as gpio
import time
class Tank:
def __init__(self, name):
self.name = name
def init(self):
gpio.setmode(gpio.BCM)
gpio.setup(17, gpio.OUT)
gpio.setup(22, gpio.OUT)
gpio.setup(23, gpio.OUT)
gpio.setup(24, gpio.OUT)
def forward(self):
self.init()
gpio.output(17, True) #M1 FWD
gpio.output(22, False) #M1 REV
gpio.output(23, True) #M2 FWD
gpio.output(24, False) #M2 REV
#time.sleep(sec)
#gpio.cleanup()
def reverse(self, sec):
self.init()
gpio.output(17, False)
gpio.output(22, True)
gpio.output(23, False)
gpio.output(24, True)
time.sleep(sec)
gpio.cleanup()
def left(self, sec):
self.init()
gpio.output(17, False)
gpio.output(22, True)
gpio.output(23, False)
gpio.output(24, False)
time.sleep(sec)
gpio.cleanup()
def right(self, sec):
self.init()
gpio.output(17, False)
gpio.output(22, False)
gpio.output(23, False)
gpio.output(24, True)
time.sleep(sec)
gpio.cleanup()
def stop(self):
self.init()
gpio.output(17, False)
gpio.output(22, False)
gpio.output(23, False)
gpio.output(24, False)
gpio.cleanup()
def init_test(self):
self.forward(.05)
time.sleep(.1)
self.reverse(.05)
time.sleep(.1)
self.left(.05)
time.sleep(.1)
self.right(.05)
print(f"Initialization Test Passed! {self.name} is ready to roll!")
| 22.253333 | 75 | 0.52846 | [
"MIT"
] | JasonGreenwell/Tank-Platform | tank_standalone.py | 1,669 | Python |
import unittest
from ebird.api.validation import is_subnational1
class IsSubnational1Tests(unittest.TestCase):
"""Tests for the is_subnational1 validation function."""
def test_is_subnational1(self):
self.assertTrue(is_subnational1("US-NV"))
def test_invalid_code_is_not_subnational1(self):
self.assertFalse(is_subnational1("U"))
self.assertFalse(is_subnational1("US-"))
def test_country_is_not_subnational1(self):
self.assertFalse(is_subnational1("US"))
def test_subnational2_is_not_subnational1(self):
self.assertFalse(is_subnational1("US-NV-VMT"))
def test_location_is_not_subnational1(self):
self.assertFalse(is_subnational1("L123456"))
| 30.041667 | 60 | 0.744799 | [
"MIT"
] | ProjectBabbler/ebird-api | tests/validation/test_is_subnational1.py | 721 | Python |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from torch.nn import Conv2d, Linear
from sparseml.pytorch.optim import QuantizationModifier
from tests.sparseml.pytorch.helpers import LinearNet, create_optim_sgd
from tests.sparseml.pytorch.optim.test_modifier import ScheduledModifierTest
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
try:
from torch import quantization as torch_quantization
except Exception:
torch_quantization = None
QUANTIZATION_MODIFIERS = [
lambda: QuantizationModifier(
start_epoch=0.0,
disable_quantization_observer_epoch=2,
freeze_bn_stats_epoch=3.0,
),
lambda: QuantizationModifier(start_epoch=2.0, submodules=["seq"]),
lambda: QuantizationModifier(start_epoch=2.0, submodules=["seq"]),
]
def _is_valid_submodule(module_name, submodule_names):
return module_name in submodule_names or any(
module_name.startswith(name) for name in submodule_names
)
def _is_quantiable_module(module):
if isinstance(module, torch.quantization.FakeQuantize):
return False
return (
len(list(module.children())) > 0
or isinstance(module, Conv2d)
or isinstance(module, Linear)
)
def _test_qat_applied(modifier, model):
# test quantization mods are applied
if not modifier.submodules or modifier.submodules == [""]:
assert hasattr(model, "qconfig") and model.qconfig is not None
submodules = [""]
for module in model.modules():
if _is_quantiable_module(module):
assert hasattr(module, "qconfig") and module.qconfig == model.qconfig
else:
assert not hasattr(model, "qconfig") or model.qconfig is None
submodules = modifier.submodules
# check qconfig propagation
for name, module in model.named_modules():
if _is_valid_submodule(name, submodules) and _is_quantiable_module(module):
assert hasattr(module, "qconfig") and module.qconfig is not None
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_QUANT_TESTS", False),
reason="Skipping pytorch torch quantization tests",
)
@pytest.mark.skipif(
torch_quantization is None,
reason="torch quantization not available",
)
@pytest.mark.parametrize("modifier_lambda", QUANTIZATION_MODIFIERS, scope="function")
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize("optim_lambda", [create_optim_sgd], scope="function")
class TestQuantizationModifierImpl(ScheduledModifierTest):
def test_lifecycle(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
update_epochs = [modifier.start_epoch]
if modifier.disable_quantization_observer_epoch is not None:
update_epochs.append(modifier.disable_quantization_observer_epoch)
if modifier.freeze_bn_stats_epoch is not None:
update_epochs.append(modifier.freeze_bn_stats_epoch)
for epoch in update_epochs:
assert modifier.update_ready(epoch, test_steps_per_epoch)
# test update ready is still true after start epoch
# even if quantization has not been applied yet
assert modifier.update_ready(modifier.start_epoch + 0.1, test_steps_per_epoch)
# test QAT setup
if modifier.start_epoch > 0:
for module in model.modules():
assert not hasattr(module, "qconfig") or module.qconfig is None
else:
# QAT should be applied
_test_qat_applied(modifier, model)
modifier.scheduled_update(
model, optimizer, modifier.start_epoch, test_steps_per_epoch
)
# test update ready is False after start epoch is applied, before diable epochs
if (
len(update_epochs) == 1
or min(update_epochs[1:]) <= modifier.start_epoch + 1
):
# test epochs in 0.1 intervals
for epoch_interval in range(10):
epoch_interval *= 0.1
epoch = modifier.start_epoch + 0.1 * epoch_interval
assert not modifier.update_ready(epoch, test_steps_per_epoch)
_test_qat_applied(modifier, model)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_QUANT_TESTS", False),
reason="Skipping pytorch torch quantization tests",
)
@pytest.mark.skipif(
torch_quantization is None,
reason="torch quantization not available",
)
def test_quantization_modifier_yaml():
start_epoch = 0.0
submodules = ["block.0", "block.2"]
model_fuse_fn_name = "fuse_module"
disable_quantization_observer_epoch = 2.0
freeze_bn_stats_epoch = 3.0
yaml_str = """
!QuantizationModifier
start_epoch: {start_epoch}
submodules: {submodules}
model_fuse_fn_name: {model_fuse_fn_name}
disable_quantization_observer_epoch: {disable_quantization_observer_epoch}
freeze_bn_stats_epoch: {freeze_bn_stats_epoch}
""".format(
start_epoch=start_epoch,
submodules=submodules,
model_fuse_fn_name=model_fuse_fn_name,
disable_quantization_observer_epoch=disable_quantization_observer_epoch,
freeze_bn_stats_epoch=freeze_bn_stats_epoch,
)
yaml_modifier = QuantizationModifier.load_obj(
yaml_str
) # type: QuantizationModifier
serialized_modifier = QuantizationModifier.load_obj(
str(yaml_modifier)
) # type: QuantizationModifier
obj_modifier = QuantizationModifier(
start_epoch=start_epoch,
submodules=submodules,
model_fuse_fn_name=model_fuse_fn_name,
disable_quantization_observer_epoch=disable_quantization_observer_epoch,
freeze_bn_stats_epoch=freeze_bn_stats_epoch,
)
assert isinstance(yaml_modifier, QuantizationModifier)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
sorted(yaml_modifier.submodules)
== sorted(serialized_modifier.submodules)
== sorted(obj_modifier.submodules)
)
assert (
yaml_modifier.model_fuse_fn_name
== serialized_modifier.model_fuse_fn_name
== obj_modifier.model_fuse_fn_name
)
assert (
yaml_modifier.disable_quantization_observer_epoch
== serialized_modifier.disable_quantization_observer_epoch
== obj_modifier.disable_quantization_observer_epoch
)
assert (
yaml_modifier.freeze_bn_stats_epoch
== serialized_modifier.freeze_bn_stats_epoch
== obj_modifier.freeze_bn_stats_epoch
)
| 35.206278 | 87 | 0.703605 | [
"Apache-2.0"
] | bharadwaj1098/sparseml | tests/sparseml/pytorch/optim/test_modifier_quantization.py | 7,851 | Python |
# =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import time
from unis_client import UNISInstance
import settings
import pprint
from utils import merge_dicts
from requests.exceptions import ConnectionError
logger = settings.get_logger('conf')
class ServiceConfigure(object):
'''
ServiceConfigure is meant to be a generic class for any service
which registers itself to, and gets configuration from UNIS. It
was originally developed for BLiPP, but BLiPP specific features
should be in the BlippConfigure class which extends
ServiceConfigure.
'''
def __init__(self, initial_config={}, node_id=None, urn=None):
if not node_id:
node_id = settings.UNIS_ID
self.node_id = node_id
self.urn = urn
self.config = initial_config
self.unis = UNISInstance(self.config)
self.node_setup = False
self.service_setup = False
self.exponential_backoff = int(self.config["properties"]["configurations"]["unis_poll_interval"])
def initialize(self):
try:
r = self._setup_node(self.node_id)
if not self.node_setup:
return
self._setup_service()
except ConnectionError:
return
def refresh(self):
try:
r = self.unis.get("/services/" + self.config["id"])
if not r:
logger.warn('refresh', msg="refresh failed")
logger.warn('refresh', msg="re-enable service")
self._setup_service()
else:
self.config = r
if time.time() * 1e+6 + int(self.config['properties']['configurations']['unis_poll_interval']) * 1e+6 >\
self.config['ts'] + self.config['ttl'] * 1e+6:
self._setup_service()
self.exponential_backoff = int(self.config['properties']['configurations']['unis_poll_interval'])
return self.exponential_backoff
except ConnectionError:
self.exponential_backoff = self.exponential_backoff * 2
return self.exponential_backoff
def _setup_node(self, node_id):
config = self.config
props = self.config["properties"]["configurations"]
logger.debug('_setup_node', config=pprint.pformat(config))
hostname = settings.HOSTNAME
urn = settings.HOST_URN if not self.urn else self.urn
if node_id:
r = self.unis.get("/nodes/" + str(node_id))
if not r:
logger.warn('_setup_node', msg="node id %s not found" % node_id)
r = self.unis.post("/nodes",
data={
"$schema": settings.SCHEMAS["nodes"],
"name": hostname,
"urn": urn,
"id": node_id})
if not node_id:
r = self.unis.get("/nodes?urn=" + urn)
if r and len(r):
r = r[0]
logger.info('_setup_node',
msg="Found node with our URN and id %s" % r["id"])
else:
r = self.unis.post("/nodes",
data={
"$schema": settings.SCHEMAS["nodes"],
"name": hostname,
"urn": urn})
if r:
self.node_id = r["id"]
if r:
if isinstance(r, list):
r = r[0]
config["runningOn"] = {
"href": r["selfRef"],
"rel": "full"}
self.node_setup = True
else:
config["runningOn"] = {"href": ""}
logger.warn('_setup_node', msg="Unable to set up BLiPP node in UNIS at %s" % props["unis_url"])
def _setup_service(self):
config = self.config
props = self.config["properties"]["configurations"]
logger.debug('_setup_service', config=pprint.pformat(config))
r = None
if config.get("id", None):
r = self.unis.get("/services/" + config["id"])
if not r:
logger.warn('_setup_service',
msg="service id not specified or not found "\
"unis instance ...querying for service")
rlist = self.unis.get("/services?name=" + config.get("name", None) +\
"&runningOn.href=" + config["runningOn"]["href"] + "&limit=2")
# loop over the returned services and find one that
# doesn't return 410 see
# https://uisapp2.iu.edu/jira-prd/browse/GEMINI-98
if rlist:
for i in range(len(rlist)):
r = self.unis.get('/services/' + rlist[i]["id"])
if r:
logger.info('_setup_service',
msg="%s service found with id %s" % (config["name"], r["id"]))
break
else:
logger.warn('_setup_service',
msg="no service found by id or querying "\
"...creating new service at %s" % props["unis_url"])
if isinstance(r, dict) and r:
merge_dicts(config, r)
# always update UNIS with the merged config
r = None
if config.get("id", None):
r = self.unis.put("/services/" + config["id"], data=config)
if not r:
r = self.unis.post("/services", data=config)
if r and isinstance(r, dict):
merge_dicts(config, r)
if r:
self.service_setup = True
else:
logger.warn('_setup_service', msg="unable to set up service in UNIS")
def get(self, key, default=None):
try:
return self.config[key]
except KeyError:
return default
def __getitem__(self, key):
'''
This allows an object which is an instance of this class to behave
like a dictionary when queried with [] syntax
'''
return self.config[key]
| 38.835294 | 120 | 0.518025 | [
"BSD-3-Clause"
] | periscope-ps/blipp | blipp/conf.py | 6,602 | Python |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from pyrfume.odorants import from_cids
df1 = pd.read_csv('experiment1_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df1_cids = pd.read_csv('experiment1_cids.csv', index_col=0)
df1_cids = df1_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))
df1_cids
df1.loc[:, ['A','B']] = df1.loc[:, ['A','B']].applymap(lambda x:df1_cids.loc[x]['Mixture Cids'])
df1.head()
df2 = pd.read_csv('experiment2_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df2_cids = pd.read_csv('experiment2_cids.csv', index_col=0)
df2_cids = df2_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))
df2_cids
df2.loc[:, ['A','B']] = df2.loc[:, ['A','B']].applymap(lambda x:df2_cids.loc[x]['Mixture Cids'])
df2.head()
df3 = pd.read_csv('experiment3_comparisons.csv',
header=0,index_col=0,names=['A','B','Similarity'])
df3.head()
df = pd.concat([df1, df2, df3])
df.to_csv('behavior-main.csv')
cids1 = df1_cids['Mixture Cids'].apply(str.split, args=(',')).sum()
cids2 = df2_cids['Mixture Cids'].apply(str.split, args=(',')).sum()
cids3 = list(df3[['A', 'B']].values.ravel())
cids = cids1 + cids2 + cids3
cids = list(set(map(int, cids)))
molecules_info = from_cids(cids)
pd.DataFrame(molecules_info).set_index('CID').to_csv('molecules-info.csv')
| 32.076923 | 97 | 0.641487 | [
"MIT"
] | CastroLab/pyrfume-data | snitz_2013/main.py | 1,668 | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Deals with K-mers and K-mer distribution from reads or genome
"""
from __future__ import print_function
import os.path as op
import sys
import logging
import math
import numpy as np
from collections import defaultdict
from jcvi.graphics.base import (
plt,
asciiplot,
set_human_axis,
savefig,
markup,
panel_labels,
normalize_axes,
set_ticklabels_arial,
write_messages,
)
from jcvi.formats.fasta import Fasta
from jcvi.formats.base import BaseFile, must_open, get_number
from jcvi.utils.cbook import thousands, percentage
from jcvi.assembly.automaton import iter_project
from jcvi.apps.grid import MakeManager
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, need_update, Popen, PIPE
KMERYL, KSOAP, KALLPATHS = range(3)
class KmerSpectrum(BaseFile):
def __init__(self, histfile):
self.load_data(histfile)
def load_data(self, histfile):
self.data = []
self.totalKmers = 0
self.hist = {}
kformat = self.guess_format(histfile)
kformats = ("Meryl", "Soap", "AllPaths")
logging.debug("Guessed format: {0}".format(kformats[kformat]))
fp = open(histfile)
for rowno, row in enumerate(fp):
if row[0] == "#":
continue
if kformat == KSOAP:
K = rowno + 1
counts = int(row.strip())
else: # meryl histogram
K, counts = row.split()[:2]
K, counts = int(K), int(counts)
Kcounts = K * counts
self.totalKmers += Kcounts
self.hist[K] = Kcounts
self.data.append((K, counts))
def guess_format(self, histfile):
# Guess the format of the Kmer histogram
fp = open(histfile)
for row in fp:
if row.startswith("# 1:"):
return KALLPATHS
if len(row.split()) == 1:
return KSOAP
return KMERYL
def get_xy(self, vmin=1, vmax=100):
self.counts = sorted((a, b) for a, b in self.hist.items() if vmin <= a <= vmax)
return zip(*self.counts)
def analyze(self, ploidy=2, K=23, covmax=1000000):
"""
Analyze Kmer spectrum, calculations derived from
allpathslg/src/kmers/KmerSpectra.cc
"""
from math import sqrt
data = self.data
kf_ceil = max(K for (K, c) in data)
if kf_ceil > covmax:
exceeds = sum(1 for (K, c) in data if K > covmax)
logging.debug(
"A total of {0} distinct K-mers appear > "
"{1} times. Ignored ...".format(exceeds, covmax)
)
kf_ceil = covmax
nkf = kf_ceil + 1
a = [0] * nkf
for kf, c in data:
if kf > kf_ceil:
continue
a[kf] = c
ndk = a # number of distinct kmers
nk = [k * c for k, c in enumerate(a)] # number of kmers
cndk = [0] * nkf # cumulative number of distinct kmers
cnk = [0] * nkf # cumulative number of kmers
for kf in range(1, nkf):
cndk[kf] = cndk[kf - 1] + 0.5 * (ndk[kf - 1] + ndk[kf])
cnk[kf] = cnk[kf - 1] + 0.5 * (nk[kf - 1] + nk[kf])
# Separate kmer spectrum in 5 regions based on the kf
# 1 ... kf_min1 : bad kmers with low frequency
# kf_min1 ... kf_min2 : good kmers CN = 1/2 (SNPs)
# kf_min2 ... kf_min3 : good kmers CN = 1
# kf_min3 ... kf_hi : good kmers CN > 1 (repetitive)
# kf_hi ... inf : bad kmers with high frequency
# min1: find first minimum
_kf_min1 = 10
while _kf_min1 - 1 >= 2 and nk[_kf_min1 - 1] < nk[_kf_min1]:
_kf_min1 -= 1
while _kf_min1 <= kf_ceil and nk[_kf_min1 + 1] < nk[_kf_min1]:
_kf_min1 += 1
# max2: find absolute maximum mx2 above first minimum min1
_kf_max2 = _kf_min1
for kf in range(_kf_min1 + 1, int(0.8 * kf_ceil)):
if nk[kf] > nk[_kf_max2]:
_kf_max2 = kf
# max2: resetting max2 for cases of very high polymorphism
if ploidy == 2:
ndk_half = ndk[_kf_max2 / 2]
ndk_double = ndk[_kf_max2 * 2]
if ndk_double > ndk_half:
_kf_max2 *= 2
# max1: SNPs local maximum max1 as half global maximum max2
_kf_max1 = _kf_max2 / 2
# min2: SNPs local minimum min2 between max1 and max2
_kf_min2 = (
_kf_max1
* (2 * ndk[_kf_max1] + ndk[_kf_max2])
/ (ndk[_kf_max1] + ndk[_kf_max2])
)
# min1: refine between min1 and max2/2
for kf in range(_kf_min1 + 1, _kf_max1):
if nk[kf] < nk[_kf_min1]:
_kf_min1 = kf
# min3: not a minimum, really. upper edge of main peak
_kf_min3 = _kf_max2 * 3 / 2
print("kfs:", _kf_min1, _kf_max1, _kf_min2, _kf_max2, _kf_min3, file=sys.stderr)
self.min1 = _kf_min1
self.max1 = _kf_max1
self.min2 = _kf_min2
self.max2 = _kf_max2
self.min3 = _kf_min3
# Define maximum kf above which we neglect data
_kf_hi = (
_kf_max2 * sqrt(4 * ndk[2 * _kf_max2] * _kf_max2)
if 2 * _kf_max2 < len(ndk)
else _kf_max2 * sqrt(4 * ndk[len(ndk) - 1] * _kf_max2)
)
_kf_hi = int(_kf_hi)
if _kf_hi > kf_ceil:
_kf_hi = kf_ceil
_nk_total = cnk[len(cnk) - 1]
_nk_bad_low_kf = cnk[_kf_min1]
_nk_good_uniq = cnk[_kf_min3] - cnk[_kf_min2]
_nk_bad_high_kf = _nk_total - cnk[_kf_hi]
_ndk_good_snp = cndk[_kf_min2] - cndk[_kf_min1]
_ndk_good_uniq = cndk[_kf_min3] - cndk[_kf_min2]
# kmer coverage C_k
_kf_ave_uniq = _nk_good_uniq * 1.0 / _ndk_good_uniq
_genome_size = (_nk_total - _nk_bad_low_kf - _nk_bad_high_kf) / _kf_ave_uniq
_genome_size_unique = _ndk_good_uniq + _ndk_good_snp / 2
_genome_size_repetitive = _genome_size - _genome_size_unique
_coverage = _nk_total / _genome_size if _genome_size else 0
# SNP rate estimation, assumes uniform distribution of SNPs over the
# genome and accounts for the reduction in SNP kmer counts when
# polymorphism is very high
if ploidy == 2:
_d_SNP = (
1.0 / (1.0 - (1.0 - 0.5 * _ndk_good_snp / _genome_size) ** (1.0 / K))
if _ndk_good_snp > 0
else 1000000
)
G = int(_genome_size)
G1 = int(_genome_size_unique)
GR = int(_genome_size_repetitive)
coverage = int(_coverage)
m = "Kmer (K={0}) Spectrum Analysis\n".format(K)
m += "Genome size estimate = {0}\n".format(thousands(G))
m += "Genome size estimate CN = 1 = {0} ({1})\n".format(
thousands(G1), percentage(G1, G)
)
m += "Genome size estimate CN > 1 = {0} ({1})\n".format(
thousands(GR), percentage(GR, G)
)
m += "Coverage estimate: {0} x\n".format(coverage)
self.repetitive = "Repeats: {0} percent".format(GR * 100 / G)
if ploidy == 2:
d_SNP = int(_d_SNP)
self.snprate = "SNP rate ~= 1/{0}".format(d_SNP)
else:
self.snprate = "SNP rate not computed (Ploidy = {0})".format(ploidy)
m += self.snprate + "\n"
self.genomesize = int(round(self.totalKmers * 1.0 / self.max2))
print(m, file=sys.stderr)
class KMCComplex(object):
def __init__(self, indices):
self.indices = indices
def write(self, outfile, filename="stdout", action="union"):
assert action in ("union", "intersect")
op = " + sum " if action == "union" else " * "
fw = must_open(filename, "w")
print("INPUT:", file=fw)
ss = []
pad = len(str(len(self.indices)))
for i, e in enumerate(self.indices):
s = "s{0:0{1}d}".format(i + 1, pad)
ss.append(s)
print("{} = {}".format(s, e.rsplit(".", 1)[0]), file=fw)
print("OUTPUT:", file=fw)
print("{} = {}".format(outfile, op.join(ss)), file=fw)
fw.close()
def main():
actions = (
# K-mer counting
("jellyfish", "count kmers using `jellyfish`"),
("meryl", "count kmers using `meryl`"),
("kmc", "count kmers using `kmc`"),
("kmcop", "intersect or union kmc indices"),
("entropy", "calculate entropy for kmers from kmc dump"),
("bed", "map kmers on FASTA"),
# K-mer histogram
("histogram", "plot the histogram based on meryl K-mer distribution"),
("multihistogram", "plot histogram across a set of K-mer sizes"),
# These forms a pipeline to count K-mers for given FASTA seq
("dump", "convert FASTA sequences to list of K-mers"),
("bin", "serialize counts to bitarrays"),
("bincount", "count K-mers in the bin"),
("count", "run dump - jellyfish - bin - bincount in serial"),
("logodds", "compute log likelihood between two db"),
("model", "model kmer distribution given error rate"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def entropy_score(kmer):
"""
Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics
https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic
"""
l = len(kmer) - 2
k = l if l < 64 else 64
counts = defaultdict(int)
for i in range(l):
trinuc = kmer[i : i + 3]
counts[trinuc] += 1
logk = math.log(k)
res = 0
for k, v in counts.items():
f = v * 1.0 / l
res += f * math.log(f) / logk
return res * -100
def entropy(args):
"""
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
"""
p = OptionParser(entropy.__doc__)
p.add_option(
"--threshold", default=0, type="int", help="Complexity needs to be above"
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(kmc_out,) = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score))))
def bed(args):
"""
%prog bed fastafile kmer.dump.txt
Map kmers on FASTA.
"""
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, dumpfile = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug("Imported {} {}-mers".format(len(KMERS), K))
for name, seq in parse_fasta(fastafile):
name = name.split()[0]
for i in range(len(seq) - K):
if i % 5000000 == 0:
print("{}:{}".format(name, i), file=sys.stderr)
kmer = seq[i : i + K]
if kmer in KMERS:
print("\t".join(str(x) for x in (name, i, i + K, kmer)))
def kmcop(args):
"""
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
"""
p = OptionParser(kmcop.__doc__)
p.add_option(
"--action", choices=("union", "intersect"), default="union", help="Action"
)
p.add_option("-o", default="results", help="Output name")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
def kmc(args):
"""
%prog kmc folder
Run kmc3 on Illumina reads.
"""
p = OptionParser(kmc.__doc__)
p.add_option("-k", default=21, type="int", help="Kmer size")
p.add_option(
"--ci", default=2, type="int", help="Exclude kmers with less than ci counts"
)
p.add_option("--cs", default=2, type="int", help="Maximal value of a counter")
p.add_option(
"--cx", default=None, type="int", help="Exclude kmers with more than cx counts"
)
p.add_option(
"--single",
default=False,
action="store_true",
help="Input is single-end data, only one FASTQ/FASTA",
)
p.add_option(
"--fasta",
default=False,
action="store_true",
help="Input is FASTA instead of FASTQ",
)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(folder,) = args
K = opts.k
n = 1 if opts.single else 2
pattern = (
"*.fa,*.fa.gz,*.fasta,*.fasta.gz"
if opts.fasta
else "*.fq,*.fq.gz,*.fastq,*.fastq.gz"
)
mm = MakeManager()
for p, pf in iter_project(folder, pattern=pattern, n=n, commonprefix=False):
pf = pf.split("_")[0] + ".ms{}".format(K)
infiles = pf + ".infiles"
fw = open(infiles, "w")
print("\n".join(p), file=fw)
fw.close()
cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus)
cmd += " -ci{} -cs{}".format(opts.ci, opts.cs)
if opts.cx:
cmd += " -cx{}".format(opts.cx)
if opts.fasta:
cmd += " -fm"
cmd += " @{} {} .".format(infiles, pf)
outfile = pf + ".kmc_suf"
mm.add(p, outfile, cmd)
mm.write()
def meryl(args):
"""
%prog meryl folder
Run meryl on Illumina reads.
"""
p = OptionParser(meryl.__doc__)
p.add_option("-k", default=19, type="int", help="Kmer size")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(folder,) = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for p, pf in iter_project(folder):
cmds = []
mss = []
for i, ip in enumerate(p):
ms = "{}{}.ms{}".format(pf, i + 1, K)
mss.append(ms)
cmd = "meryl -B -C -m {} -threads {}".format(K, cpus)
cmd += " -s {} -o {}".format(ip, ms)
cmds.append(cmd)
ams, bms = mss
pms = "{}.ms{}".format(pf, K)
cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms)
cmds.append(cmd)
cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, pms + ".mcdat", cmds)
mm.write()
def model(args):
"""
%prog model erate
Model kmer distribution given error rate. See derivation in FIONA paper:
<http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
"""
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option("-k", default=23, type="int", help="Kmer size")
p.add_option("--cov", default=50, type="int", help="Expected coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(erate,) = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
# Range include c although it is unclear what it means to have c=0
for c in range(0, cov * 2 + 1):
Prob_Yk = 0
for i in range(k + 1):
# Probability of having exactly i errors
pi_i = binom.pmf(i, k, erate)
# Expected coverage of kmer with exactly i errors
mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)
# Probability of seeing coverage of c
Prob_Yk_i = poisson.pmf(c, mu_i)
# Sum i over 0, 1, ... up to k errors
Prob_Yk += pi_i * Prob_Yk_i
xy.append((c, Prob_Yk))
x, y = zip(*xy)
asciiplot(x, y, title="Model")
def logodds(args):
"""
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
"""
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score))
def get_K(jfdb):
"""
Infer K from jellyfish db.
"""
j = jfdb.rsplit("_", 1)[0].rsplit("-", 1)[-1]
assert j[0] == "K"
return int(j[1:])
def count(args):
"""
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
"""
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug(
"Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".format(
K, fastafile, jfdb, binfile
)
)
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
def bincount(args):
"""
%prog bincount fastafile binfile
Count K-mers in the bin.
"""
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, binfile = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, "w")
for name, seqlen in f.iter_sizes():
ksize = seqlen - K + 1
b = a[tsize : tsize + ksize]
bcount = b.count()
print("\t".join(str(x) for x in (name, bcount)), file=fw)
tsize += ksize
def bin(args):
"""
%prog bin filename filename.bin
Serialize counts to bitarrays.
"""
from bitarray import bitarray
p = OptionParser(bin.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inp, outp = args
fp = must_open(inp)
fw = must_open(outp, "w")
a = bitarray()
for row in fp:
c = row.split()[-1]
a.append(int(c))
a.tofile(fw)
fw.close()
def make_kmers(seq, K):
seq = str(seq).upper().replace("N", "A")
seqlen = len(seq)
for i in range(seqlen - K + 1):
yield seq[i : i + K]
def dump(args):
"""
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
"""
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(fastafile,) = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=fw)
fw.close()
def jellyfish(args):
"""
%prog jellyfish [*.fastq|*.fasta]
Run jellyfish to dump histogram to be used in kmer.histogram().
"""
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.add_option(
"--coverage", default=40, type="int", help="Expected sequence coverage",
)
p.add_option("--prefix", default="jf", help="Database prefix")
p.add_option(
"--nohist", default=False, action="store_true", help="Do not print histogram",
)
p.set_home("jellyfish")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum(getfilesize(x) for x in fastqfiles)
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith(".gz")
hashsize = totalfilesize / coverage
logging.debug(
"Total file size: {0}, hashsize (-s): {1}".format(
human_size(totalfilesize, a_kilobyte_is_1024_bytes=True), hashsize
)
)
jfpf = "{0}-K{1}".format(pf, K)
jfdb = jfpf
fastqfiles = " ".join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, "jellyfish")
cmd = jfcmd
cmd += " count -t {0} -C -o {1}".format(opts.cpus, jfpf)
cmd += " -s {0} -m {1}".format(hashsize, K)
if gzip:
cmd = "gzip -dc {0} | ".format(fastqfiles) + cmd + " /dev/fd/0"
else:
cmd += " " + fastqfiles
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = jfpf + ".histogram"
cmd = jfcmd + " histo -t 64 {0} -o {1}".format(jfdb, jfhisto)
if need_update(jfdb, jfhisto):
sh(cmd)
def merylhistogram(merylfile):
"""
Run meryl to dump histogram to be used in kmer.histogram(). The merylfile
are the files ending in .mcidx or .mcdat.
"""
pf, sf = op.splitext(merylfile)
outfile = pf + ".histogram"
if need_update(merylfile, outfile):
cmd = "meryl -Dh -s {0}".format(pf)
sh(cmd, outfile=outfile)
return outfile
def multihistogram(args):
"""
%prog multihistogram *.histogram species
Plot the histogram based on a set of K-mer hisotograms. The method is based
on Star et al.'s method (Atlantic Cod genome paper).
"""
p = OptionParser(multihistogram.__doc__)
p.add_option("--kmin", default=15, type="int", help="Minimum K-mer size, inclusive")
p.add_option("--kmax", default=30, type="int", help="Maximum K-mer size, inclusive")
p.add_option("--vmin", default=2, type="int", help="Minimum value, inclusive")
p.add_option("--vmax", default=100, type="int", help="Maximum value, inclusive")
opts, args, iopts = p.set_image_options(args, figsize="10x5", dpi=300)
if len(args) < 1:
sys.exit(not p.print_help())
histfiles = args[:-1]
species = args[-1]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([0.08, 0.12, 0.38, 0.76])
B = fig.add_axes([0.58, 0.12, 0.38, 0.76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
x, y = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split(".")[0].split("-")[-1])
if not opts.kmin <= K <= opts.kmax:
continue
(line,) = A.plot(x, y, "-", lw=1)
lines.append(line)
legends.append("K = {0}".format(K))
ks.analyze(K=K)
genomesizes.append((K, ks.genomesize / 1e6))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
title = "{0} genome K-mer histogram".format(species)
A.set_title(markup(title))
xlabel, ylabel = "Coverage (X)", "Counts"
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = "{0} genome size estimate".format(species)
B.set_title(markup(title))
x, y = zip(*genomesizes)
B.plot(x, y, "ko", mfc="w")
t = np.linspace(opts.kmin - 0.5, opts.kmax + 0.5, 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), "r:")
xlabel, ylabel = "K-mer size", "Estimated genome size (Mb)"
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_arial(B)
labels = ((0.04, 0.96, "A"), (0.54, 0.96, "B"))
panel_labels(root, labels)
normalize_axes(root)
imagename = species + ".multiK.pdf"
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
def histogram(args):
"""
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic.
"""
p = OptionParser(histogram.__doc__)
p.add_option(
"--vmin", dest="vmin", default=1, type="int", help="minimum value, inclusive",
)
p.add_option(
"--vmax", dest="vmax", default=100, type="int", help="maximum value, inclusive",
)
p.add_option(
"--pdf",
default=False,
action="store_true",
help="Print PDF instead of ASCII plot",
)
p.add_option(
"--coverage", default=0, type="int", help="Kmer coverage [default: auto]"
)
p.add_option(
"--nopeaks",
default=False,
action="store_true",
help="Do not annotate K-mer peaks",
)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
histfile, species, N = args
ascii = not opts.pdf
peaks = not opts.nopeaks
N = int(N)
if histfile.rsplit(".", 1)[-1] in ("mcdat", "mcidx"):
logging.debug("CA kmer index found")
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
ks.analyze(K=N)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = ks.max2 if not coverage else coverage
Genome_size = int(round(Total_Kmers * 1.0 / Kmer_coverage))
Total_Kmers_msg = "Total {0}-mers: {1}".format(N, thousands(Total_Kmers))
Kmer_coverage_msg = "{0}-mer coverage: {1}".format(N, Kmer_coverage)
Genome_size_msg = "Estimated genome size: {0:.1f}Mb".format(Genome_size / 1e6)
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
x, y = ks.get_xy(opts.vmin, opts.vmax)
title = "{0} {1}-mer histogram".format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (6, 6))
plt.plot(x, y, "g-", lw=2, alpha=0.5)
ax = plt.gca()
if peaks:
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for x, y in ks.counts if x in t]
if tcounts:
x, y = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, "ko", lw=2, mec="k", mfc="w")
ax.text(ks.max1, tcounts[ks.max1], "SNP peak", va="top")
ax.text(ks.max2, tcounts[ks.max2], "Main peak")
messages = [
Total_Kmers_msg,
Kmer_coverage_msg,
Genome_size_msg,
Repetitive_msg,
SNPrate_msg,
]
write_messages(ax, messages)
ymin, ymax = ax.get_ylim()
ymax = ymax * 7 / 6
ax.set_title(markup(title))
ax.set_ylim((ymin, ymax))
xlabel, ylabel = "Coverage (X)", "Counts"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = histfile.split(".")[0] + ".pdf"
savefig(imagename, dpi=100)
return Genome_size
if __name__ == "__main__":
main()
| 29.156021 | 116 | 0.565364 | [
"BSD-2-Clause"
] | lufuhao/jcvi | jcvi/assembly/kmer.py | 27,844 | Python |
from contextlib import contextmanager
import sys
import importlib
from .base_evaluator import BaseEvaluator
class ModuleEvaluator(BaseEvaluator):
def __init__(self, internal_module):
super().__init__()
self._internal_module = internal_module
@classmethod
def from_configs(cls, config):
module = config['module']
module_config = config.get('module_config')
python_path = config.get('python_path')
return cls(load_module(module, python_path).from_configs(module_config))
def process_dataset(self, stored_predictions, progress_reporter, *args, **kwargs):
self._internal_module.process_dataset(stored_predictions, progress_reporter, *args, **kwargs)
def compute_metrics(self, print_results=True, ignore_results_formatting=False):
return self._internal_module.compute_metrics(print_results, ignore_results_formatting)
def print_metrics_results(self, ignore_results_formatting=False):
self._internal_module.print_metrics(ignore_results_formatting)
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False):
return self._internal_module.extract_metrics_results(print_results, ignore_results_formatting)
def release(self):
self._internal_module.release()
del self._internal_module
def reset(self):
self._internal_module.reset()
@staticmethod
def get_processing_info(config):
module = config['module']
python_path = config.get('python_path')
return load_module(module, python_path).get_processing_info(config)
def load_module(model_cls, python_path=None):
module_parts = model_cls.split(".")
model_cls = module_parts[-1]
model_path = ".".join(module_parts[:-1])
with append_to_path(python_path):
module_cls = importlib.import_module(model_path).__getattribute__(model_cls)
return module_cls
@contextmanager
def append_to_path(path):
if path:
sys.path.append(path)
yield
if path:
sys.path.remove(path)
| 32.34375 | 102 | 0.735266 | [
"Apache-2.0"
] | AnthonyQuantum/open_model_zoo | tools/accuracy_checker/accuracy_checker/evaluators/module_evaluator.py | 2,070 | Python |
import serial
import time
import datetime
import logging
from recordtype import recordtype
from enum import IntEnum, unique
from datetime import timedelta
import _thread
import readchar
import os
import sys
import threading
from collections import deque
import shutil
import binascii
START_CHAR = '02'
#START_BUF = '2a2a2a' # this is not really a buffer
BAUD_RATE = 1000000 #921600 #1000000
SERIAL_PORT = "/dev/ttyS0" #"/dev/ttyACM0" #"/dev/ttyS0"
if len(sys.argv)>1:
SERIAL_PORT=sys.argv[1]
if len(sys.argv)>2:
BAUD_RATE=int(sys.argv[2])
logfolderpath = os.path.dirname(os.path.realpath(__file__))+'/log/'
if not os.path.exists(logfolderpath):
try:
os.mkdir(logfolderpath)
net.addPrint("Log directory not found.")
net.addPrint("%s Created " % logfolderpath)
except Exception as e:
net.addPrint("Can't get the access to the log folder %s." % logfolderpath)
net.addPrint("Exception: %s" % str(e))
endchar = 0x0a
SER_END_CHAR = endchar.to_bytes(1, byteorder='big', signed=False)
SINGLE_NODE_REPORT_SIZE = 9 #must be the same as in common/inc/contraints.h
MAX_REPORTS_PER_PACKET = 22 #must be the same as in common/inc/contraints.h
MAX_PACKET_PAYLOAD = SINGLE_NODE_REPORT_SIZE*MAX_REPORTS_PER_PACKET
MAX_BEACONS = 100
MAX_ONGOING_SEQUENCES = 10
MAX_TRICKLE_C_VALUE = 256
ser = serial.Serial()
ser.port = SERIAL_PORT
ser.baudrate = BAUD_RATE
ser.timeout = 100
MessageSequence = recordtype("MessageSequence", "timestamp,nodeid,lastPktnum,sequenceSize,datacounter,"
"datalist,latestTime")
messageSequenceList = []
ContactData = recordtype("ContactData", "nodeid lastRSSI maxRSSI pktCounter")
firstMessageInSeq = False
NodeDropInfo = recordtype("NodeDropInfo", "nodeid, lastpkt")
nodeDropInfoList = []
ActiveNodes = []
deliverCounter = 0
dropCounter = 0
headerDropCounter = 0
defragmentationCounter = 0
# Timeout variables
previousTimeTimeout = 0
currentTime = 0
timeoutInterval = 300
timeoutTime = 60
previousTimePing = 0
pingInterval = 115
btPreviousTime = 0
btToggleInterval = 1800
btToggleBool = True
NODE_TIMEOUT_S = 60*10
NETWORK_PERIODIC_CHECK_INTERVAL = 2
CLEAR_CONSOLE = True
# Loggers
printVerbosity = 10
LOG_LEVEL = logging.DEBUG
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
timestr = time.strftime("%Y%m%d-%H%M%S")
nameConsoleLog = "consoleLogger"
filenameConsoleLog = timestr + "-console.log"
fullpathConsoleLog = logfolderpath+filenameConsoleLog
consolloghandler = logging.FileHandler(fullpathConsoleLog)
consolloghandler.setFormatter(formatter)
consoleLogger = logging.getLogger(nameConsoleLog)
consoleLogger.setLevel(LOG_LEVEL)
consoleLogger.addHandler(consolloghandler)
nameUARTLog = "UARTLogger"
filenameUARTLog = timestr + "-UART.log"
fullpathUARTLog = logfolderpath+filenameUARTLog
handler = logging.FileHandler(fullpathUARTLog)
handler.setFormatter(formatter)
UARTLogger = logging.getLogger(nameUARTLog)
UARTLogger.setLevel(LOG_LEVEL)
UARTLogger.addHandler(handler)
nameContactLog = "contactLogger"
filenameContactLog = timestr + "-contact.log"
fullpathContactLog = logfolderpath+filenameContactLog
contactlog_handler = logging.FileHandler(fullpathContactLog)
contact_formatter = logging.Formatter('%(message)s')
contactlog_handler.setFormatter(contact_formatter)
contactLogger = logging.getLogger(nameContactLog)
contactLogger.setLevel(LOG_LEVEL)
contactLogger.addHandler(contactlog_handler)
nameErrorLog = "errorLogger"
filenameErrorLog = timestr + "-errorLogger.log"
fullpathErrorLog = logfolderpath+filenameErrorLog
errorlog_handler = logging.FileHandler(fullpathErrorLog)
errorlog_formatter= logging.Formatter('%(message)s')
errorlog_handler.setFormatter(errorlog_formatter)
errorLogger = logging.getLogger(nameErrorLog)
errorLogger.setLevel(LOG_LEVEL)
errorLogger.addHandler(errorlog_handler)
class Network(object):
def __init__(self):
self.__nodes = []
threading.Timer(NETWORK_PERIODIC_CHECK_INTERVAL,self.__periodicNetworkCheck).start()
self.__consoleBuffer = deque()
self.console_queue_lock = threading.Lock()
self.__lastNetworkPrint=float(time.time())
self.__netMaxTrickle=0
self.__netMinTrickle=MAX_TRICKLE_C_VALUE
self.__expTrickle=0
self.__uartErrors=0
self.__trickleQueue = deque()
self.showHelp=False
def getNode(self, label):
for n in self.__nodes:
if n.name == label:
return n
return None
def addNode(self,n):
self.__nodes.append(n)
if len(self.__nodes) == 1:
self.__expTrickle=n.lastTrickleCount
def removeNode(self,label):
n = self.getNode(label)
if n != None:
self.__nodes.remove(n)
def removeNode(self,n):
self.__nodes.remove(n)
def __trickleCheck(self):
for n in self.__nodes:
#with n.lock:
#self.addPrint("Node "+ n.name + "lastTrickleCount: " + str(n.lastTrickleCount))
if n==self.__nodes[0]:
self.__netMaxTrickle = n.lastTrickleCount
self.__netMinTrickle = n.lastTrickleCount
if n.lastTrickleCount > self.__netMaxTrickle or ( n.lastTrickleCount == 0 and self.__netMaxTrickle>=(MAX_TRICKLE_C_VALUE-1) ):
self.__netMaxTrickle = n.lastTrickleCount
if n.lastTrickleCount < self.__netMinTrickle or ( n.lastTrickleCount >= (MAX_TRICKLE_C_VALUE-1) and self.__netMinTrickle==0 ):
self.__netMinTrickle = n.lastTrickleCount #todo: it seems that this does't work. The __netMinTrickle goes up before all the nodes have sent their new values, sometimes it is __netMaxTrickle that doesn't update as soon the first new value arrives..
if self.__netMinTrickle == self.__netMaxTrickle and self.__netMaxTrickle == self.__expTrickle:
return True
else:
return False
def sendNewTrickle(self, message,forced=False):
if forced:
self.__trickleQueue.clear()
send_serial_msg(message)
net.addPrint("[APPLICATION] Trickle message: 0x {0} force send".format((message).hex()))
else:
if self.__trickleCheck():
self.__expTrickle = (self.__netMaxTrickle + 1)%MAX_TRICKLE_C_VALUE
send_serial_msg(message)
net.addPrint("[APPLICATION] Trickle message: 0x {0} sent".format((message).hex()))
else:
self.__trickleQueue.append(message)
self.addPrint("[APPLICATION] Trickle message: 0x {0} queued".format((message).hex()))
def __periodicNetworkCheck(self):
threading.Timer(NETWORK_PERIODIC_CHECK_INTERVAL,self.__periodicNetworkCheck).start()
nodes_removed = False;
for n in self.__nodes:
if n.getLastMessageElapsedTime() > NODE_TIMEOUT_S and n.online:
if printVerbosity > 2:
self.addPrint("[APPLICATION] Node "+ n.name +" timed out. Elasped time: %.2f" %n.getLastMessageElapsedTime() +" Removing it from the network.")
#self.removeNode(n)
n.online=False
nodes_removed = True
#self.__trickleCheck()
if nodes_removed:
self.__trickleCheck()
self.printNetworkStatus()
def printNetworkStatus(self):
if(float(time.time()) - self.__lastNetworkPrint < 0.2): #this is to avoid too fast call to this function
return
__lastNetworkPrint = float(time.time())
netSize = len(self.__nodes)
if CLEAR_CONSOLE:
cls()
print("|------------------------------------------------------------------------------|")
print("|--------------------------| Network size %3s errors: %4s |------------------|" %(str(netSize), self.__uartErrors))
print("|------------| Trickle: min %3d; max %3d; exp %3d; queue size %2d |-------------|" %(self.__netMinTrickle, self.__netMaxTrickle, self.__expTrickle, len(self.__trickleQueue)))
print("|------------------------------------------------------------------------------|")
print("| NodeID | Battery | Last | Trick | #BT |")
print("| | Volt SoC Capacty Cons Temp | seen[s] | Count | Rep |")
#print("| | | | | |")
for n in self.__nodes:
n.printNodeInfo()
#print("| | | | | |")
print("|------------------------------------------------------------------------------|")
if self.showHelp:
print("| AVAILABLE COMMANDS:")
print("| key command\n|")
print("| 1 request ping\n"
"| 2 enable bluetooth\n"
"| 3 disable bluetooth\n"
"| 4 bt_def\n"
"| 5 bt_with_params\n"
"| 6 enable battery info\n"
"| 7 disable battery info\n"
"| 8 reset nordic\n"
"| 9 set time between sends\n"
"| >9 set keep alive interval in seconds")
else:
print("| h+enter : Show available commands |")
print("|------------------------------------------------------------------------------|")
print("|------------------| CONSOLE |------------------|")
print("|------------------------------------------------------------------------------|")
terminalSize = shutil.get_terminal_size(([80,20]))
if net.showHelp:
availableLines = terminalSize[1] - (24 + len(self.__nodes))
else:
availableLines = terminalSize[1] - (12 + len(self.__nodes))
while( (len(self.__consoleBuffer) > availableLines) and self.__consoleBuffer):
with self.console_queue_lock:
self.__consoleBuffer.popleft()
with self.console_queue_lock:
for l in self.__consoleBuffer:
print(l)
def processKeepAliveMessage(self, label, trickleCount, batteryVoltage, capacity):
n = self.getNode(label)
if n != None:
n.updateTrickleCount(trickleCount)
n.updateBatteryVoltage(batteryVoltage)
n.updateBatteryCapacity(capacity)
n.online=True
else:
n=Node(label, trickleCount)
n.updateBatteryVoltage(batteryVoltage)
n.updateBatteryCapacity(capacity)
self.addNode(n)
if len(self.__trickleQueue) != 0:
if self.__trickleCheck():
self.__expTrickle = (self.__netMaxTrickle + 1)%MAX_TRICKLE_C_VALUE
message=self.__trickleQueue.popleft()
send_serial_msg(message)
self.addPrint("[APPLICATION] Trickle message: 0x {0} automatically sent".format((message).hex()))
else:
self.__trickleCheck()
self.printNetworkStatus()
def processBatteryDataMessage(self, label, voltage, capacity, soc=None, consumption=None, temperature=None):
n = self.getNode(label)
if n != None:
n.updateBatteryVoltage(voltage)
n.updateBatteryCapacity(capacity)
n.updateBatterySOC(soc)
n.updateBatteryConsumption(consumption)
n.updateBatteryTemperature(temperature)
#else:
# n=Node(label, 0)
# self.addNode(n)
# n.updateBatteryVoltage(batteryVoltage)
def processBTReportMessage(self, label):
n = self.getNode(label)
if n != None:
n.BTReportHandler()
#else:
# n=Node(label, 0)
# self.addNode(n)
# n.BTReportHandler()
def processUARTError(self):
self.__uartErrors=self.__uartErrors+1
def addPrint(self, text):
consoleLogger.info(text)
terminalSize = shutil.get_terminal_size(([80,20]))
if(len(text) > terminalSize[0]):
if(len(text) > 2*terminalSize[0]): #clip the text if it is longer than 2 lines...
text=text[:2*terminalSize[0]]
with self.console_queue_lock:
self.__consoleBuffer.append(text[:terminalSize[0]])
self.__consoleBuffer.append(text[terminalSize[0]:])
else:
with self.console_queue_lock:
self.__consoleBuffer.append(text)
self.printNetworkStatus()
#for l in self.__consoleBuffer:
# print(l)
def resetCounters(self):
for n in self.__nodes:
n.amountOfBTReports=0
self.__trickleCheck()
def resetTrickle(self):
self.__trickleCheck()
self.__expTrickle = self.__netMaxTrickle
class Node(object):
# The class "constructor" - It's actually an initializer
def __init__(self, label):
self.lock = threading.Lock()
self.name = label
self.lastTrickleCount = 0
self.lastMessageTime = float(time.time())
self.online=True
def __init__(self, label, trickleCount):
self.lock = threading.Lock()
self.name = label
self.lastTrickleCount = trickleCount
self.lastMessageTime = float(time.time())
self.batteryVoltage = None
self.batteryCapacity = None
self.batterySoc = None
self.batteryConsumption = None
self.batteryTemperature = None
self.amountOfBTReports = 0
self.online=True
def updateTrickleCount(self,trickleCount):
with self.lock:
self.lastTrickleCount = trickleCount
self.lastMessageTime = float(time.time())
def updateBatteryVoltage(self, batteryVoltage):
with self.lock:
self.batteryVoltage = batteryVoltage
self.lastMessageTime = float(time.time())
def updateBatteryCapacity(self, capacity):
with self.lock:
self.batteryCapacity = capacity
self.lastMessageTime = float(time.time())
def updateBatterySOC(self, soc):
with self.lock:
self.batterySoc = soc
self.lastMessageTime = float(time.time())
def updateBatteryConsumption(self, consumption):
with self.lock:
self.batteryConsumption = consumption
self.lastMessageTime = float(time.time())
def updateBatteryTemperature(self, temperature):
with self.lock:
self.batteryTemperature = temperature
self.lastMessageTime = float(time.time())
def BTReportHandler(self):
with self.lock:
self.amountOfBTReports = self.amountOfBTReports + 1
self.lastMessageTime = float(time.time())
def getLastMessageElapsedTime(self):
now = float(time.time())
return now-self.lastMessageTime
def printNodeInfo(self):
if self.online:
if self.batteryConsumption != None:
print("| %3s | %3.2fV %3.0f%% %4.0fmAh %6.1fmA %5.1f° | %3.0f | %3d |%5d |" % (str(self.name), self.batteryVoltage, self.batterySoc, self.batteryCapacity, self.batteryConsumption, self.batteryTemperature, self.getLastMessageElapsedTime(), self.lastTrickleCount, self.amountOfBTReports))
else:
print("| %3s | %3.2fV | %3.0f | %3d |%5d |" % (str(self.name), self.batteryVoltage, self.getLastMessageElapsedTime(), self.lastTrickleCount, self.amountOfBTReports))
else:
if self.batteryConsumption != None:
print("| %3s * | %3.2fV %3.0f%% %4.0fmAh %6.1fmA %5.1f° | %3.0f | %3d |%5d |" % (str(self.name), self.batteryVoltage, self.batterySoc, self.batteryCapacity, self.batteryConsumption, self.batteryTemperature, self.getLastMessageElapsedTime(), self.lastTrickleCount, self.amountOfBTReports))
else:
print("| %3s * | %3.2fV | %3.0f | %3d |%5d |" % (str(self.name), self.batteryVoltage, self.getLastMessageElapsedTime(), self.lastTrickleCount, self.amountOfBTReports))
@unique
class PacketType(IntEnum):
network_new_sequence = 0x0100
network_active_sequence = 0x0101
network_last_sequence = 0x0102
network_bat_data = 0x0200
network_set_time_between_send = 0x0601
network_request_ping = 0xF000
network_respond_ping = 0xF001
network_keep_alive = 0xF010
nordic_turn_bt_off = 0xF020
nordic_turn_bt_on = 0xF021
nordic_turn_bt_on_w_params = 0xF022
nordic_turn_bt_on_low = 0xF023 #deprecated
nordic_turn_bt_on_def = 0xF024
nordic_turn_bt_on_high = 0xF025 #deprecated
ti_set_batt_info_int = 0xF026
nordic_reset = 0xF027
nordic_ble_tof_enable = 0xF030
ti_set_keep_alive = 0xF801
def cls():
os.system('cls' if os.name=='nt' else 'clear')
def handle_user_input():
ble_tof_enabled=False
while 1:
try:
input_str = input()
if len(input_str)>=2 and input_str[0] == 'f':
forced=True
user_input = int(input_str[1:])
else:
if input_str=='h':
if net.showHelp:
net.showHelp=False
else:
net.showHelp=True
net.printNetworkStatus()
continue
elif input_str=='r':
net.resetCounters()
net.printNetworkStatus()
continue
elif input_str=='t':
net.resetTrickle()
net.printNetworkStatus()
continue
else:
forced=False
user_input = int(input_str)
if user_input == 1:
payload = 233
net.addPrint("[USER_INPUT] Ping request")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.network_request_ping, payload.to_bytes(1, byteorder="big", signed=False)),forced)
elif user_input == 2:
net.addPrint("[USER_INPUT] Turn bluetooth on")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_on, None),forced)
elif user_input == 3:
net.addPrint("[USER_INPUT] Turn bluetooth off")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_off, None),forced)
#elif user_input == 4:
# net.addPrint("Turning bt on low")
# appLogger.debug("[SENDING] Enable Bluetooth LOW")
# net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_on_low, None),forced)
#elif user_input == 4:
# net.addPrint("[USER_INPUT] Turn bluetooth on with default settings stored on the nodes")
# net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_on_def, None),forced)
#elif user_input == 6:
# net.addPrint("Turning bt on high")
# appLogger.debug("[SENDING] Enable Bluetooth HIGH")
# net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_on_high, None),forced)
elif user_input == 4:
if ble_tof_enabled:
net.addPrint("[USER_INPUT] disabling ble tof")
ble_tof_enabled=False
else:
net.addPrint("[USER_INPUT] enabling ble tof")
ble_tof_enabled=True
net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_ble_tof_enable, ble_tof_enabled.to_bytes(1, byteorder="big", signed=False)),forced)
elif user_input == 5:
SCAN_INTERVAL_MS = 1500
SCAN_WINDOW_MS = 1500
SCAN_TIMEOUT_S = 0
REPORT_TIMEOUT_S = 15
active_scan = 1
scan_interval = int(SCAN_INTERVAL_MS*1000/625)
scan_window = int(SCAN_WINDOW_MS*1000/625)
timeout = int(SCAN_TIMEOUT_S)
report_timeout_ms = int(REPORT_TIMEOUT_S*1000)
bactive_scan = active_scan.to_bytes(1, byteorder="big", signed=False)
bscan_interval = scan_interval.to_bytes(2, byteorder="big", signed=False)
bscan_window = scan_window.to_bytes(2, byteorder="big", signed=False)
btimeout = timeout.to_bytes(2, byteorder="big", signed=False)
breport_timeout_ms = report_timeout_ms.to_bytes(4, byteorder="big", signed=False)
payload = bactive_scan + bscan_interval + bscan_window + btimeout + breport_timeout_ms
net.addPrint("[USER_INPUT] Turn bluetooth on with parameters: scan_int="+str(SCAN_INTERVAL_MS)+"ms, scan_win="+str(SCAN_WINDOW_MS)+"ms, timeout="+str(SCAN_TIMEOUT_S)+"s, report_int="+str(REPORT_TIMEOUT_S)+"s")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_turn_bt_on_w_params, payload),forced)
elif user_input == 6:
bat_info_interval_s = 90
net.addPrint("[USER_INPUT] Enable battery info with interval: "+str(bat_info_interval_s))
net.sendNewTrickle(build_outgoing_serial_message(PacketType.ti_set_batt_info_int, bat_info_interval_s.to_bytes(1, byteorder="big", signed=False)),forced)
elif user_input == 7:
bat_info_interval_s = 0
net.addPrint("[USER_INPUT] Disable battery info")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.ti_set_batt_info_int, bat_info_interval_s.to_bytes(1, byteorder="big", signed=False)),forced)
elif user_input == 8:
net.addPrint("[USER_INPUT] Reset nordic")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.nordic_reset, None),forced)
elif user_input == 9:
time_between_send_ms = 0
time_between_send = time_between_send_ms.to_bytes(2, byteorder="big", signed=False)
net.addPrint("[USER_INPUT] Set time between sends to "+ str(time_between_send_ms) + "ms")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.network_set_time_between_send, time_between_send),forced)
elif user_input > 9:
interval = user_input
net.addPrint("[USER_INPUT] Set keep alive interval to "+ str(interval) + "s")
net.sendNewTrickle(build_outgoing_serial_message(PacketType.ti_set_keep_alive, interval.to_bytes(1, byteorder="big", signed=False)),forced)
except ValueError:
net.addPrint("[USER_INPUT] Read failed. Read data: "+ input_str)
def build_outgoing_serial_message(pkttype, ser_payload):
payload_size = 0
if ser_payload is not None:
payload_size = len(ser_payload)
packet = payload_size.to_bytes(length=1, byteorder='big', signed=False) + pkttype.to_bytes(length=2, byteorder='big', signed=False)
if ser_payload is not None:
packet=packet+ser_payload
ascii_packet=''.join('%02X'%i for i in packet)
return ascii_packet.encode('utf-8')
def send_serial_msg(message):
line = message + SER_END_CHAR
ser.write(line)
UARTLogger.info('[GATEWAY] ' + str(line))
def decode_payload(payload, seqid, size, packettype, pktnum): #TODO: packettype can be removed
#raw_data = "Node {0} ".format(messageSequenceList[seqid].nodeid) + "0x {:04X} ".format(packettype) + "0x {:02X}".format(pktnum) + " 0x "
cur=0
try:
for x in range(round(size / SINGLE_NODE_REPORT_SIZE)):
nid = int(payload[cur:cur+12], 16) #int(ser.read(6).hex(), 16)
cur=cur+12
lastrssi = int(payload[cur:cur+2], 16) #int(ser.read(1).hex(), 16)
cur=cur+2
maxrssi = int(payload[cur:cur+2], 16) #int(ser.read(1).hex(), 16)
cur=cur+2
pktcounter = int(payload[cur:cur+2], 16) #int(ser.read(1).hex(), 16)
cur=cur+2
#net.addPrint("id = {:012X}".format(nid, 8))
contact = ContactData(nid, lastrssi, maxrssi, pktcounter)
messageSequenceList[seqid].datalist.append(contact)
messageSequenceList[seqid].datacounter += SINGLE_NODE_REPORT_SIZE
#raw_data += "{:012X}".format(nid, 8) + '{:02X}'.format(lastrssi) + '{:02X}'.format(maxrssi) + '{:02X}'.format(pktcounter)
except ValueError:
net.addPrint("[Node {0}] Requested to decode more bytes than available. Requested: {1}"
.format(messageSequenceList[seqid].nodeid, size))
#appLogger.warning("[Node {0}] Requested to decode more bytes than available. Requested: {1}"
# .format(messageSequenceList[seqid].nodeid, size))
finally:
#dataLogger.info(raw_data)
return
def log_contact_data(seqid):
seq = messageSequenceList[seqid]
timestamp = seq.timestamp
source = seq.nodeid
logstring = "{0} Node {1} ".format(timestamp, source)
for x in range(len(seq.datalist)):
logstring += "{:012X}".format(seq.datalist[x].nodeid) + '{:02X}'.format(seq.datalist[x].lastRSSI) + '{:02X}'.format(seq.datalist[x].maxRSSI) + '{:02X}'.format(seq.datalist[x].pktCounter)
contactLogger.info(logstring)
def get_sequence_index(nodeid):
for x in range(len(messageSequenceList)):
if messageSequenceList[x].nodeid == nodeid:
return x
return -1
def check_for_packetdrop(nodeid, pktnum):
if pktnum == 0:
return
for x in range(len(nodeDropInfoList)):
if nodeDropInfoList[x].nodeid == nodeid:
global dropCounter
dropCounter += pktnum - nodeDropInfoList[x].lastpkt - 1
#appLogger.debug("[Node {0}] Dropped {1} packet(s). Latest packet: {2}, new packet: {3}"
# .format(nodeid, pktnum - nodeDropInfoList[x].lastpkt - 1, nodeDropInfoList[x].lastpkt,
# pktnum))
return
nodeDropInfoList.append(NodeDropInfo(nodeid, pktnum))
def to_byte(hex_text):
return binascii.unhexlify(hex_text)
_thread.start_new_thread(handle_user_input, ())
net = Network()
net.addPrint("[APPLICATION] Starting...")
net.addPrint("[APPLICATION] Logs are in: %s" %logfolderpath)
if ser.is_open:
net.addPrint("[UART] Serial Port already open! "+ ser.port + " open before initialization... closing first")
ser.close()
time.sleep(1)
startCharErr=False
#startBuffErr=False
otherkindofErr=False
try:
while 1:
if ser.is_open:
try:
bytesWaiting = ser.in_waiting
except Exception as e:
net.addPrint("[UART] Serial Port input exception:"+ str(e))
#appLogger.error("Serial Port input exception: {0}".format(e))
bytesWaiting = 0
ser.close()
time.sleep(1)
continue
try:
if bytesWaiting > 0:
rawline = ser.readline() #can block if no timeout is provided when the port is open
UARTLogger.info('[SINK] ' + str(rawline))
start = rawline[0:1].hex()
if start == START_CHAR:
if startCharErr or otherkindofErr:
net.processUARTError()
startCharErr=False
otherkindofErr=False
line = to_byte(rawline[1:-1])
cursor=0
deliverCounter += 1
# TODO change the way pktnum's are being tracked
nodeid = int.from_bytes(line[cursor:cursor+1], "little", signed=False) #int.from_bytes(ser.read(1), "little", signed=False)
cursor+=1
pkttype = int(line[cursor:cursor+2].hex(),16) #int(ser.read(2).hex(), 16)
cursor+=2
pktnum = int.from_bytes(line[cursor:cursor+1], "little", signed=False) #int.from_bytes(ser.read(1), "little", signed=False)
cursor+=1
if printVerbosity > 0:
net.addPrint("[NODEID " + str(nodeid) + "] pkttype " + hex(pkttype) + ", pktnum " + str(pktnum))
#appLogger.info("[New message] nodeid {0}, pkttype {1} pktnum {2}".format(nodeid, hex(pkttype), pktnum))
check_for_packetdrop(nodeid, pktnum)
#TODO: add here the check for duplicates. Any duplicate should be discarded HERE!!!
if PacketType.network_new_sequence == pkttype:
datalen = int(line[cursor:cursor+2].hex(), 16) #int(ser.read(2).hex(), 16)
cursor+=2
payload = line[cursor:].hex()
cursor = len(line)
if datalen % SINGLE_NODE_REPORT_SIZE != 0:
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Invalid datalength: "+ str(datalen))
#appLogger.warning("[Node {0}] invalid sequencelength: {1}".format(nodeid, datalen))
seqid = get_sequence_index(nodeid)
#at this point, since we just received 'network_new_sequence', there should not be any sequence associated with this nodeid
if seqid != -1:
if messageSequenceList[seqid].lastPktnum == pktnum: #in this case we just received the 'network_new_sequence' packet twice, shall I discard duplicates before this?
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Duplicate packet from node "+ str(nodeid)+ " with pktnum "+ str(pktnum))
#appLogger.info("[Node {0}] duplicate packet, pktnum: {1}".format(nodeid, pktnum))
# remove duplicate packet data from uart buffer
#remainingDataSize = messageSequenceList[seqid].sequenceSize - messageSequenceList[seqid].datacounter
#if remainingDataSize >= MAX_PACKET_PAYLOAD:
# payload=line[10:10+MAX_PACKET_PAYLOAD].hex()
#else:
# payload=line[10:10+remainingDataSize].hex()
#dataLogger.info("Node {0} 0x {1} 0x {2} 0x {3}".format(nodeid, '{:04x}'.format(pkttype), '{:02x}'.format(pktnum),
# '{:x}'.format(int(payload, 16))))
continue
else: #in this case the 'network_new_sequence' arrived before 'network_last_sequence'.
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Previous sequence has not been completed yet")
# TODO what to do now? For now, assume last packet was dropped
# TODO send received data instead of deleting it all
#appLogger.info("[Node {0}] Received new sequence packet "
# "while old sequence has not been completed".format(nodeid))
log_contact_data(seqid)
del messageSequenceList[seqid]
messageSequenceList.append(MessageSequence(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
nodeid, pktnum, 0, 0, [], time.time()))
seqid = len(messageSequenceList) - 1
messageSequenceList[seqid].sequenceSize += datalen
if messageSequenceList[seqid].sequenceSize > MAX_PACKET_PAYLOAD: #this is the normal behaviour
decode_payload(payload, seqid, MAX_PACKET_PAYLOAD, PacketType.network_new_sequence, pktnum)
else: #this is when the sequence is made by only one packet.
decode_payload(payload, seqid, datalen, PacketType.network_new_sequence, pktnum)
# TODO Only 1 packet in this sequence, so upload this packet already
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Bluetooth sequence decoded. Contact elements: "+ str(len(messageSequenceList[seqid].datalist)))
#appLogger.debug("[Node {0}] Single packet sequence complete".format(nodeid))
log_contact_data(seqid)
net.processBTReportMessage(str(nodeid))
del messageSequenceList[seqid]
elif PacketType.network_active_sequence == pkttype:
seqid = get_sequence_index(nodeid)
payload = line[cursor:].hex()
cursor = len(line)
if seqid == -1: #this is when we received 'network_active_sequence' before receiving a valid 'network_new_sequence'
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] First part of sequence dropped, creating incomplete sequence at index "+ str(len(messageSequenceList)) +" from pktnum "+ str(pktnum))
messageSequenceList.append(
MessageSequence(datetime.datetime.fromtimestamp(time.time()).
strftime('%Y-%m-%d %H:%M:%S'), nodeid,
pktnum, -1, 0, [], time.time()))
seqid = len(messageSequenceList) - 1
headerDropCounter += 1
elif messageSequenceList[seqid].lastPktnum == pktnum: #in this case we just received the same 'network_active_sequence' packet twice,
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Duplicate packet from node "+ str(nodeid) +" with pktnum "+ str(pktnum))
# remove duplicate packet data from uart buffer
#remainingDataSize = messageSequenceList[seqid].sequenceSize - messageSequenceList[seqid].datacounter
#if remainingDataSize >= MAX_PACKET_PAYLOAD:
#ser.read(MAX_PACKET_PAYLOAD) #TODO:if it is a duplicate no need to store it, but it MUST be a duplicate
#else:
#ser.read(remainingDataSize)
continue
messageSequenceList[seqid].lastPktnum = pktnum
messageSequenceList[seqid].latestTime = time.time()
decode_payload(payload,seqid, MAX_PACKET_PAYLOAD, PacketType.network_active_sequence, pktnum)
elif PacketType.network_last_sequence == pkttype:
# TODO upload data before deleting element from list
#if printVerbosity > 1:
# net.addPrint("[INFO] Full message received")
seqid = get_sequence_index(nodeid)
payload = line[cursor:].hex()
cursor = len(line)
if seqid == -1: #this is when we receive 'network_last_sequence' before receiving a valid 'network_new_sequence'
messageSequenceList.append(MessageSequence(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), nodeid,
pktnum, 0, 0, [], time.time()))
seqid = len(messageSequenceList) - 1
decode_payload(payload,seqid, MAX_PACKET_PAYLOAD, PacketType.network_last_sequence, pktnum)
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Bluetooth sequence decoded but header files were never received. datacounter: "+ str(messageSequenceList[seqid].datacounter) +" ContactData elements: "+ str(len(messageSequenceList[seqid].datalist)))
# net.addPrint("[INFO] Nodeid: "+ str(messageSequenceList[seqid].nodeid) +" datacounter: "+
# str(messageSequenceList[seqid].datacounter)+ " ContactData elements: "+
# str(len(messageSequenceList[seqid].datalist)))
headerDropCounter += 1
#appLogger.info("[Node {0}] Message defragmented but header files were never received"
# " - datacounter: {1} ContactData elements: {2}"
# .format(nodeid, messageSequenceList[seqid].datacounter,
# len(messageSequenceList[seqid].datalist)))
log_contact_data(seqid)
del messageSequenceList[seqid]
elif messageSequenceList[seqid].sequenceSize == -1: #what is this???
decode_payload(payload,seqid, MAX_PACKET_PAYLOAD, PacketType.network_last_sequence, pktnum)
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Bluetooth sequence decoded but header files were never received. datacounter: "+ str(messageSequenceList[seqid].datacounter) +" ContactData elements: "+ str(len(messageSequenceList[seqid].datalist)))
#
# net.addPrint("[WARNING] Message defragmented but header files were never received")
# net.addPrint("[INFO] Nodeid: "+ str(messageSequenceList[seqid].nodeid) +" datacounter: "+ str(messageSequenceList[seqid].datacounter)+ " ContactData elements: "+ str(len(messageSequenceList[seqid].datalist)))
#appLogger.info("[Node {0}] Message defragmented but header files were never received"
# " - datacounter: {1} ContactData elements: "
# .format(nodeid, messageSequenceList[seqid].datacounter,
# len(messageSequenceList[seqid].datalist)))
log_contact_data(seqid)
del messageSequenceList[seqid]
else:
remainingDataSize = messageSequenceList[seqid].sequenceSize - messageSequenceList[seqid].datacounter
if remainingDataSize > MAX_PACKET_PAYLOAD: #when is this supposed to happen???
decode_payload(payload,seqid, MAX_PACKET_PAYLOAD, PacketType.network_last_sequence, pktnum)
else:
decode_payload(payload,seqid, remainingDataSize, PacketType.network_last_sequence, pktnum)
if messageSequenceList[seqid].sequenceSize != messageSequenceList[seqid].datacounter:
if printVerbosity > 1:
#net.addPrint("ERROR: Messagesequence ended, but datacounter is not equal to sequencesize")
net.addPrint(" [PACKET DECODE] ERROR: Messagesequence ended, but datacounter is not equal to sequencesize")
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Bluetooth sequence decoded. "+" sequencesize: "+ str(messageSequenceList[seqid].sequenceSize)+ " ContactData elements: "+ str(len(messageSequenceList[seqid].datalist)))
#appLogger.warning("[Node {0}] Messagesequence ended, but datacounter is not equal to sequencesize - datacounter: {1}" #WHY IS THIS WARNING HERE?????
# " ContactData elements: {2}".format(nodeid, messageSequenceList[seqid].datacounter,
# len(messageSequenceList[seqid].datalist)))
log_contact_data(seqid)
net.processBTReportMessage(str(messageSequenceList[seqid].nodeid))
del messageSequenceList[seqid]
elif PacketType.network_bat_data == pkttype:
batCapacity = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=False)) #int.from_bytes(ser.read(2), byteorder="big", signed=False)
cursor+=2
batSoC = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=False)) / 10 #int.from_bytes(ser.read(2), byteorder="big", signed=False) / 10
cursor+=2
bytesELT = line[cursor:cursor+2] #ser.read(2)
cursor+=2
batELT = str(timedelta(minutes=int.from_bytes(bytesELT, byteorder="big", signed=False)))[:-3] # Convert minutes to hours and minutes
batAvgConsumption = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=True)) / 10 #int.from_bytes(ser.read(2), byteorder="big", signed=True) / 10
cursor+=2
batAvgVoltage = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=False))/1000 #int.from_bytes(ser.read(2), byteorder="big", signed=False)
cursor+=2
batAvgTemp = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=True)) / 100 #int.from_bytes(ser.read(2), byteorder="big", signed=True) / 100
cursor+=2
#processBatteryDataMessage(self, label, voltage, capacity, soc=None, consumption=None, temperature=None)
net.processBatteryDataMessage(str(nodeid), batAvgVoltage, batCapacity, batSoC, batAvgConsumption, batAvgTemp)
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Battery data, Cap: %.0f mAh SoC: %.1f ETA: %s (hh:mm) Consumption: %.1f mA Voltage: %.3f Temperature %.2f"% (batCapacity, batSoC, batELT, batAvgConsumption, batAvgVoltage, batAvgTemp))
#appLogger.info("[Node {0}] Received bat data, Capacity: {1} mAh | State Of Charge: {2}% | Estimated Lifetime: {3} (hh:mm) | "
# "Average Consumption: {4} mA | Average Battery Voltage: {5} mV | Temperature: {6} *C"
# .format(nodeid, batCapacity, batSoC, batELT, batAvgConsumption,
# batAvgVoltage, batAvgTemp))
#dataLogger.info("Node {0} 0x {1} 0x {2} 0x {3}{4}{5}{6}{7}{8}".format(nodeid, '{:04x}'.format(pkttype), '{:02x}'.format(pktnum), '{:02x}'.format(batCapacity), '{:02x}'.format(int(batSoC * 10)),
# '{:02x}'.format(int.from_bytes(bytesELT, byteorder="big", signed=False)), '{:02x}'.format(int(batAvgConsumption * 10)),
# '{:02x}'.format(int(batAvgVoltage)), '{:02x}'.format(100 * int(batAvgTemp))))
elif PacketType.network_respond_ping == pkttype:
payload = int(line[cursor:cursor+2].hex(), 16) #int(ser.read(1).hex(), 16)
cursor+=2
#dataLogger.info("Node {0} 0x {1} 0x {2} 0x {3}".format(nodeid, '{:04x}'.format(pkttype), '{:02x}'.format(pktnum), '{:x}'.format(payload)))
if payload == 233:
net.addPrint(" [PACKET DECODE] Node id "+ str(nodeid) +" pinged succesfully!")
#appLogger.debug("[Node {0}] pinged succesfully!".format(nodeid))
if nodeid not in ActiveNodes:
ActiveNodes.append(nodeid)
else:
net.addPrint(" [PACKET DECODE] Node id "+ str(nodeid)+" wrong ping payload: %d" % payload )
#appLogger.info("[Node {0}] pinged wrong payload: ".format(nodeid, payload))
elif PacketType.network_keep_alive == pkttype:
cap = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=False)) #int.from_bytes(ser.read(2), byteorder="big", signed=False)
cursor+=2
batAvgVoltage = float(int.from_bytes(line[cursor:cursor+2], byteorder="big", signed=False))/1000 #int.from_bytes(ser.read(2), byteorder="big", signed=False)
cursor+=2
trickle_count = int.from_bytes(line[cursor:cursor+1], byteorder="big", signed=False) #int.from_bytes(ser.read(1), byteorder="big", signed=False)
cursor+=1
net.processKeepAliveMessage(str(nodeid), trickle_count, batAvgVoltage, cap)
if printVerbosity > 1:
net.addPrint(" [PACKET DECODE] Keep alive packet. Cap: "+ str(cap) +" Voltage: "+ str(batAvgVoltage*1000) +" Trickle count: "+ str(trickle_count))
#appLogger.info("[Node {0}] Received keep alive message with capacity: {1} and voltage: {2} trickle_count {3}".format(nodeid, cap, batAvgVoltage,trickle_count))
#dataLogger.info("Node {0} 0x {1} 0x {2} 0x {3}{4} {5}".format(nodeid, '{:02x}'.format(pkttype), '{:x}'.format(pktnum), '{:02x}'.format(cap), '{:02x}'.format(batAvgVoltage),trickle_count))
else:
net.addPrint(" [PACKET DECODE] Unknown packet (unrecognized packet type): "+str(rawline))
cursor = len(line)
#appLogger.warning("[Node {0}] Received unknown packettype: {1}".format(nodeid, hex(pkttype)))
#dataLogger.info("Node {0} 0x {1} 0x {2}".format(nodeid, '{:02x}'.format(pkttype), '{:x}'.format(pktnum)))
else: #start == START_CHAR
startCharErr=True
net.addPrint("[UART] Unknown START_CHAR: "+ start + ". The line was: "+str(rawline))
errorLogger.info("%s" %(str(rawline)))
except Exception as e:
otherkindofErr=True
net.addPrint("[ERROR] Unknown error during line decoding. Exception: %s. Line was: %s" %( str(e), str(rawline)))
errorLogger.info("%s" %(str(rawline)))
currentTime = time.time()
if currentTime - previousTimeTimeout > timeoutInterval:
previousTimeTimeout = time.time()
deletedCounter = 0
for x in range(len(messageSequenceList)):
if currentTime - messageSequenceList[x - deletedCounter].latestTime > timeoutTime:
deleted_nodeid=messageSequenceList[x - deletedCounter].nodeid
del messageSequenceList[x - deletedCounter]
deletedCounter += 1
if printVerbosity > 1:
xd = x + deletedCounter
net.addPrint("[APPLICATION] Deleted seqid %d of node %d because of timeout" %(xd, deleted_nodeid))
if currentTime - btPreviousTime > btToggleInterval:
ptype = 0
if btToggleBool:
# net.addPrint("Turning bt off")
# appLogger.debug("[SENDING] Disable Bluetooth")
ptype = PacketType.nordic_turn_bt_off
# btToggleBool = False
else:
# net.addPrint("Turning bt on")
# appLogger.debug("[SENDING] Enable Bluetooth")
ptype = PacketType.nordic_turn_bt_on
btToggleBool = True
# send_serial_msg(ptype, None)
btPreviousTime = currentTime
else: # !ser.is_open (serial port is not open)
net.addPrint('[UART] Serial Port closed! Trying to open port: %s'% ser.port)
try:
ser.open()
except Exception as e:
net.addPrint("[UART] Serial Port open exception:"+ str(e))
#appLogger.debug("Serial Port exception: %s", e)
time.sleep(5)
continue
net.addPrint("[UART] Serial Port open!")
#appLogger.debug("Serial Port open")
except UnicodeDecodeError as e:
pass
except KeyboardInterrupt:
print("[APPLICATION] -----Packet delivery stats summary-----")
print("[APPLICATION] Total packets delivered: ", deliverCounter)
print("[APPLICATION] Total packets dropped: ", dropCounter)
print("[APPLICATION] Total header packets dropped: ", headerDropCounter)
print("[APPLICATION] Packet delivery rate: ", 100 * (deliverCounter / (deliverCounter + dropCounter)))
print("[APPLICATION] Messages defragmented: ", defragmentationCounter)
print("[APPLICATION] Logs are in: "+logfolderpath)
#appLogger.info("-----Packet delivery stats summary-----")
#appLogger.info("Total packets delivered: {0}".format(deliverCounter))
#appLogger.info("Total packets dropped: {0}".format(dropCounter))
#appLogger.info("Total header packets dropped: {0}".format(headerDropCounter))
#appLogger.info("Packet delivery rate: {0}".format(100 * (deliverCounter / (deliverCounter + dropCounter))))
#appLogger.info("Messages defragmented: {0}".format(defragmentationCounter))
raise
finally:
print("done")
| 53.631256 | 320 | 0.552652 | [
"Apache-2.0"
] | smartcommunitylab/vela-lab | gateway/dev/sink_integration/main.py | 52,507 | Python |
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import six
from art import NUMPY_DTYPE
from art.attacks.attack import Attack
from art.utils import compute_success, get_labels_np_array
logger = logging.getLogger(__name__)
class ElasticNet(Attack):
"""
The elastic net attack of Pin-Yu Chen et al. (2018). Paper link: https://arxiv.org/abs/1709.04114.
"""
attack_params = Attack.attack_params + ['confidence', 'targeted', 'learning_rate', 'max_iter', 'beta',
'binary_search_steps', 'initial_const', 'batch_size', 'decision_rule']
def __init__(self, classifier, confidence=0.0, targeted=True, learning_rate=1e-2, binary_search_steps=9,
max_iter=10000, beta=1e-3, initial_const=1e-3, batch_size=128, decision_rule='EN'):
"""
Create an ElasticNet attack instance.
:param classifier: A trained model.
:type classifier: :class:`.Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther
away, from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: Number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param beta: Hyperparameter trading off L2 minimization for L1 minimization.
:type beta: `float`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance
and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.
:type decision_rule: `string`
"""
super(ElasticNet, self).__init__(classifier)
kwargs = {'confidence': confidence,
'targeted': targeted,
'learning_rate': learning_rate,
'binary_search_steps': binary_search_steps,
'max_iter': max_iter,
'beta': beta,
'initial_const': initial_const,
'batch_size': batch_size,
'decision_rule': decision_rule
}
assert self.set_params(**kwargs)
def _loss(self, x, x_adv):
"""
Compute the loss function values.
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:return: A tuple holding the current logits, l1 distance, l2 distance and elastic net loss.
:rtype: `(np.ndarray, float, float, float)`
"""
l1dist = np.sum(np.abs(x - x_adv).reshape(x.shape[0], -1), axis=1)
l2dist = np.sum(np.square(x - x_adv).reshape(x.shape[0], -1), axis=1)
endist = self.beta * l1dist + l2dist
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
return np.argmax(z, axis=1), l1dist, l2dist, endist
def _gradient_of_loss(self, target, x, x_adv, c):
"""
Compute the gradient of the loss function.
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
"""
# Compute the current logits
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1)
loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x.shape)
c_mult = c
for _ in range(len(x.shape)-1):
c_mult = c_mult[:, np.newaxis]
loss_gradient *= c_mult
loss_gradient += 2 * (x_adv - x)
return loss_gradient
def _decay_learning_rate(self, global_step, end_learning_rate, decay_steps):
"""
Applies a square-root decay to the learning rate.
:param global_step: Global step to use for the decay computation.
:type global_step: `int`
:param end_learning_rate: The minimal end learning rate.
:type end_learning_rate: `float`
:param decay_steps: Number of decayed steps.
:type decay_steps: `int`
:return: The decayed learning rate
:rtype: `float`
"""
decayed_learning_rate = (self.learning_rate - end_learning_rate) * (1 - global_step / decay_steps)**2 + \
end_learning_rate
return decayed_learning_rate
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y` represents the target labels. Otherwise, the targets are the
original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str('y'), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y is provided:
if self.targeted and y is None:
raise ValueError('Target labels `y` need to be provided for a targeted attack.')
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self.classifier.predict(x, logits=False))
# Compute adversarial examples with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug('Processing batch %i out of %i', batch_id, nb_batches)
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
x_adv[batch_index_1:batch_index_2] = self._generate_batch(x_batch, y_batch)
# Apply clip
x_adv = np.clip(x_adv, clip_min, clip_max)
# Compute success rate of the EAD attack
logger.info('Success rate of EAD attack: %.2f%%',
100 * compute_success(self.classifier, x, y, x_adv, self.targeted))
return x_adv
def _generate_batch(self, x_batch, y_batch):
"""
Run the attack on a batch of images and labels.
:param x_batch: A batch of original examples.
:type x_batch: `np.ndarray`
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:return: A batch of adversarial examples.
:rtype: `np.ndarray`
"""
# Initialize binary search:
c = self.initial_const * np.ones(x_batch.shape[0])
c_lower_bound = np.zeros(x_batch.shape[0])
c_upper_bound = 10e10 * np.ones(x_batch.shape[0])
# Initialize best distortions and best attacks globally
o_best_dist = np.inf * np.ones(x_batch.shape[0])
o_best_attack = x_batch.copy()
# Start with a binary search
for bss in range(self.binary_search_steps):
logger.debug('Binary search step %i out of %i (c_mean==%f)', bss, self.binary_search_steps, np.mean(c))
# Run with 1 specific binary search step
best_dist, best_label, best_attack = self._generate_bss(x_batch, y_batch, c)
# Update best results so far
o_best_attack[best_dist < o_best_dist] = best_attack[best_dist < o_best_dist]
o_best_dist[best_dist < o_best_dist] = best_dist[best_dist < o_best_dist]
# Adjust the constant as needed
c, c_lower_bound, c_upper_bound = self._update_const(y_batch, best_label, c, c_lower_bound, c_upper_bound)
return o_best_attack
def _update_const(self, y_batch, best_label, c, c_lower_bound, c_upper_bound):
"""
Update constants.
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:param best_label: A batch of best labels.
:type best_label: `np.ndarray`
:param c: A batch of constants.
:type c: `np.ndarray`
:param c_lower_bound: A batch of lower bound constants.
:type c_lower_bound: `np.ndarray`
:param c_upper_bound: A batch of upper bound constants.
:type c_upper_bound: `np.ndarray`
:return: A tuple of three batches of updated constants and lower/upper bounds.
:rtype: `tuple`
"""
def compare(o1, o2):
if self.targeted:
return o1 == o2
else:
return o1 != o2
for i in range(len(c)):
if compare(best_label[i], np.argmax(y_batch[i])) and best_label[i] != -np.inf:
# Successful attack
c_upper_bound[i] = min(c_upper_bound[i], c[i])
if c_upper_bound[i] < 1e9:
c[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0
else:
# Failure attack
c_lower_bound[i] = max(c_lower_bound[i], c[i])
if c_upper_bound[i] < 1e9:
c[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0
else:
c[i] *= 10
return c, c_lower_bound, c_upper_bound
def _generate_bss(self, x_batch, y_batch, c):
"""
Generate adversarial examples for a batch of inputs with a specific batch of constants.
:param x_batch: A batch of original examples.
:type x_batch: `np.ndarray`
:param y_batch: A batch of targets (0-1 hot).
:type y_batch: `np.ndarray`
:param c: A batch of constants.
:type c: `np.ndarray`
:return: A tuple of best elastic distances, best labels, best attacks
:rtype: `tuple`
"""
def compare(o1, o2):
if self.targeted:
return o1 == o2
else:
return o1 != o2
# Initialize best distortions and best changed labels and best attacks
best_dist = np.inf * np.ones(x_batch.shape[0])
best_label = [-np.inf] * x_batch.shape[0]
best_attack = x_batch.copy()
# Implement the algorithm 1 in the EAD paper
x_adv = x_batch.copy()
y_adv = x_batch.copy()
for it in range(self.max_iter):
logger.debug('Iteration step %i out of %i', it, self.max_iter)
# Update learning rate
lr = self._decay_learning_rate(global_step=it, end_learning_rate=0, decay_steps=self.max_iter)
# Compute adversarial examples
grad = self._gradient_of_loss(target=y_batch, x=x_batch, x_adv=y_adv, c=c)
x_adv_next = self._shrinkage_threshold(y_adv - lr * grad, x_batch, self.beta)
y_adv = x_adv_next + (1.0 * it / (it + 3)) * (x_adv_next - x_adv)
x_adv = x_adv_next
# Adjust the best result
(z, l1dist, l2dist, endist) = self._loss(x=x_batch, x_adv=x_adv)
if self.decision_rule == 'EN':
zip_set = zip(endist, z)
elif self.decision_rule == 'L1':
zip_set = zip(l1dist, z)
elif self.decision_rule == 'L2':
zip_set = zip(l2dist, z)
else:
raise ValueError("The decision rule only supports `EN`, `L1`, `L2`.")
for j, (d, s) in enumerate(zip_set):
if d < best_dist[j] and compare(s, np.argmax(y_batch[j])):
best_dist[j] = d
best_attack[j] = x_adv[j]
best_label[j] = s
return best_dist, best_label, best_attack
@staticmethod
def _shrinkage_threshold(z, x, beta):
"""
Implement the element-wise projected shrinkage-threshold function.
:param z: a batch of examples.
:type z: `np.ndarray`
:param x: a batch of original examples.
:type x: `np.ndarray`
:param beta: the shrink parameter.
:type beta: `float`
:return: a shrinked version of z.
:rtype: `np.ndarray`
"""
cond1 = (z - x) > beta
cond2 = np.abs(z - x) <= beta
cond3 = (z - x) < -beta
upper = np.minimum(z - beta, 1.0)
lower = np.maximum(z + beta, 0.0)
result = cond1 * upper + cond2 * x + cond3 * lower
return result
def set_params(self, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther
away, from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: Number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param beta: Hyperparameter trading off L2 minimization for L1 minimization.
:type beta: `float`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance
and confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param decision_rule: Decision rule. 'EN' means Elastic Net rule, 'L1' means L1 rule, 'L2' means L2 rule.
:type decision_rule: `string`
"""
# Save attack-specific parameters
super(ElasticNet, self).set_params(**kwargs)
if type(self.binary_search_steps) is not int or self.binary_search_steps < 0:
raise ValueError("The number of binary search steps must be a non-negative integer.")
if type(self.max_iter) is not int or self.max_iter < 0:
raise ValueError("The number of iterations must be a non-negative integer.")
if type(self.batch_size) is not int or self.batch_size < 1:
raise ValueError("The batch size must be an integer greater than zero.")
if not isinstance(self.decision_rule, six.string_types) or self.decision_rule not in ['EN', 'L1', 'L2']:
raise ValueError("The decision rule only supports `EN`, `L1`, `L2`.")
return True
| 43.621622 | 120 | 0.625099 | [
"MIT"
] | Viktour19/adversarial-robustness-toolbox | art/attacks/elastic_net.py | 17,754 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPolicyDefinitionsOperations(object):
"""ServiceEndpointPolicyDefinitionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicyDefinition"
"""Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicyDefinition"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_endpoint_policy_definitions, 'ServiceEndpointPolicyDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
service_endpoint_policy_definition_name, # type: str
service_endpoint_policy_definitions, # type: "_models.ServiceEndpointPolicyDefinition"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicyDefinition"]
"""Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
service_endpoint_policy_definitions=service_endpoint_policy_definitions,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyDefinitionListResult"]
"""Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions'} # type: ignore
| 54.466216 | 285 | 0.695613 | [
"MIT"
] | AriZavala2/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_service_endpoint_policy_definitions_operations.py | 24,183 | Python |
from typing import List, Optional
from .result import Result, Ok, Err
res1 = Ok('hello') # type: Result[str, int]
if isinstance(res1, Ok):
ok = res1 # type: Ok[str]
okValue = res1.ok() # type: str
mapped_to_float = res1.map_or(1.0, lambda s: len(s) * 1.5) # type: float
else:
err = res1 # type: Err[int]
errValue = err.err() # type: int
mapped_to_list = res1.map_err(lambda e: [e]).err() # type: Optional[List[int]]
# Test constructor functions
res1 = Ok()
res2 = Ok(42)
res3 = Err(1)
| 26 | 83 | 0.628846 | [
"MIT"
] | PhilipTrauner/result | result/typetests.py | 520 | Python |
"""PyTorch optimizer builders."""
import argparse
import torch
from espnet.optimizer.factory import OptimizerFactoryInterface
from espnet.optimizer.parser import adadelta
from espnet.optimizer.parser import adam
from espnet.optimizer.parser import sgd
class AdamFactory(OptimizerFactoryInterface):
"""Adam factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adam(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.Adam(
target,
lr=args.lr,
weight_decay=args.weight_decay,
betas=(args.beta1, args.beta2),
)
class SGDFactory(OptimizerFactoryInterface):
"""SGD factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return sgd(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.SGD(
target,
lr=args.lr,
weight_decay=args.weight_decay,
)
class AdadeltaFactory(OptimizerFactoryInterface):
"""Adadelta factory."""
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Register args."""
return adadelta(parser)
@staticmethod
def from_args(target, args: argparse.Namespace):
"""Initialize optimizer from argparse Namespace.
Args:
target: for pytorch `model.parameters()`,
for chainer `model`
args (argparse.Namespace): parsed command-line args
"""
return torch.optim.Adadelta(
target,
rho=args.rho,
eps=args.eps,
weight_decay=args.weight_decay,
)
OPTIMIZER_FACTORY_DICT = {
"adam": AdamFactory,
"sgd": SGDFactory,
"adadelta": AdadeltaFactory,
}
| 26.265957 | 82 | 0.624949 | [
"Apache-2.0"
] | 18445864529/espnet | espnet/optimizer/pytorch.py | 2,469 | Python |
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from salmonella.admin import SalmonellaMixin
from edw.models.postal_zone import PostZoneModel
# ===========================================================================================
# PostalZoneAdmin
# ===========================================================================================
class PostalZoneAdmin(SalmonellaMixin, admin.ModelAdmin):
model = PostZoneModel
list_display = ['name', 'active']
fields = ['term', 'postal_codes', 'active']
search_fields = ('term__name', 'postal_codes')
salmonella_fields = ('term',)
admin.site.register(PostZoneModel, PostalZoneAdmin) | 28.48 | 93 | 0.549157 | [
"BSD-3-Clause"
] | EagerBeager/django-edw | backend/edw/admin/postal_zone.py | 712 | Python |
"""The Admin module for the Bower Cache registry."""
import logging
from django import forms
from django.core.exceptions import ValidationError
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin, messages
from django.contrib.admin.views.main import ChangeList
from django.shortcuts import redirect, get_object_or_404
from django.views.decorators.http import require_POST
from . import bowerlib
from .models import ClonedRepo, Package
from .query import ClonedRepoQuerySet
LOG = logging.getLogger(__name__)
class ClonedRepoChangeList(ChangeList):
def get_query_set(self, request):
return self.root_queryset
def get_results(self, request):
all_repo_count = len(self.root_queryset)
self.result_count = all_repo_count
self.full_result_count = all_repo_count
self.result_list = list(self.root_queryset)
self.can_show_all = True
self.multi_page = False
self.paginator = self.model_admin.get_paginator(request,
self.result_list,
self.list_per_page)
class ClonedRepoAdmin(admin.ModelAdmin):
actions = None
def get_urls(self):
urls = super(ClonedRepoAdmin, self).get_urls()
more_urls = patterns('',
url(r'^(.+)/pull/$',
self.admin_site.admin_view(self.git_pull_view),
name='pull'),
url(r'^update-all$',
self.admin_site.admin_view(require_POST(self.update_all_view)),
name='update-all'),
)
return more_urls + urls
def queryset(self, request):
return ClonedRepoQuerySet(model=ClonedRepo)
def get_changelist(self, request, **kwargs):
return ClonedRepoChangeList
def get_form(self, request, obj=None, **kwargs):
if obj is not None:
return super(ClonedRepoAdmin, self).get_form(request, obj, **kwargs)
else:
# Here we override the form for creation.
return NewRepoForm
def save_form(self, request, form, change):
"""Here we pluck out the data to create a new cloned repo.
Form is an instance of NewRepoForm.
"""
name = form.cleaned_data['name']
origin_url = form.cleaned_data['origin_url']
res = ClonedRepo(name=name, origin=origin_url)
LOG.info("New repo form produced %s" % str(res))
form.save(commit=False)
return res
def get_readonly_fields(self, request, obj=None):
"""Hide the origin field from editing, but not creation."""
return ('origin',) if obj else ()
def add_view(self, request, **kwargs):
"""A custom add_view, to catch exceptions from 'save_model'.
Just to be clear, this is very filthy.
"""
try:
return super(ClonedRepoAdmin, self).add_view(request, **kwargs)
except ValidationError:
# Rerender the form, having messaged the user.
return redirect(request.path)
def save_model(self, request, obj, form, change):
try:
obj.save()
except Exception as exc:
self.message_user(request, "Save failed: %s" % str(exc),
level=messages.ERROR)
raise ValidationError(str(exc))
# A success message will be flashed by default
def git_pull_view(self, request, repo_name):
"""Perform a git pull and redirect back to the repo."""
LOG.info("Pull requested for %s." % repo_name)
repo = get_object_or_404(self.model, name=repo_name)
repo.pull()
self.message_user(request, "Repo %s successfully updated." % repo_name,
level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_change', repo_name)
def update_all_view(self, request):
"""Update all repositories and redirect back to the repo list."""
LOG.info("Total update requested.")
total_count = errors = 0
for repo in self.model.objects.all():
total_count += 1
try:
repo.pull()
except:
LOG.exception('While updating %s.' % repo)
errors += 1
msg = "{0} repos successfully updated, {1} failed.".format(total_count,
errors)
self.message_user(request, msg, level=messages.SUCCESS)
return redirect('admin:registry_clonedrepo_changelist')
class NewRepoForm(forms.ModelForm):
"""A special form for creating cloned repositories."""
origin_url = forms.CharField(required=False)
choices = [('upstream', 'from upstream Bower'),
('origin_url', 'from git repo:')]
origin_widget = forms.RadioSelect
origin_source = forms.ChoiceField(choices=choices, widget=origin_widget,
initial=choices[0][0])
def clean(self):
"""Validate the new repo form.
Might perform a request to upstream Bower."""
cleaned_data = super(NewRepoForm, self).clean()
origin_url = cleaned_data['origin_url']
origin_source = cleaned_data['origin_source']
if origin_source == 'origin_url' and not origin_url:
msg = 'Please provide an origin URL.'
self._errors['origin_url'] = self.error_class([msg])
del cleaned_data['origin_url']
del cleaned_data['origin_source']
elif origin_source == 'upstream':
upstream = settings.UPSTREAM_BOWER_REGISTRY
name = cleaned_data['name']
try:
upstream_pkg = bowerlib.get_package(upstream, name)
except IOError as exc:
msg = str(exc)
self._errors['origin_source'] = self.error_class([msg])
else:
if not upstream_pkg:
msg = 'Upstream registry has no knowledge of %s.' % name
self._errors['name'] = self.error_class([msg])
del cleaned_data['name']
else:
upstream_origin_url = upstream_pkg['url']
cleaned_data['origin_url'] = upstream_origin_url
return cleaned_data
class Meta:
model = ClonedRepo
exclude = ['origin']
admin.site.register(Package)
admin.site.register(ClonedRepo, ClonedRepoAdmin)
| 36.830508 | 80 | 0.606228 | [
"MIT"
] | Tinche/django-bower-cache | registry/admin.py | 6,519 | Python |
import re
from typing import Pattern
from unittest import mock
import pytest
from faker import Faker, providers
from faker.providers.address.cs_CZ import Provider as CsCzAddressProvider
from faker.providers.address.da_DK import Provider as DaDkAddressProvider
from faker.providers.address.de_AT import Provider as DeAtAddressProvider
from faker.providers.address.de_CH import Provider as DeChAddressProvider
from faker.providers.address.de_DE import Provider as DeDeAddressProvider
from faker.providers.address.el_GR import Provider as ElGrAddressProvider
from faker.providers.address.en_AU import Provider as EnAuAddressProvider
from faker.providers.address.en_CA import Provider as EnCaAddressProvider
from faker.providers.address.en_GB import Provider as EnGbAddressProvider
from faker.providers.address.en_IE import Provider as EnIeAddressProvider
from faker.providers.address.en_IN import Provider as EnInAddressProvider
from faker.providers.address.en_PH import Provider as EnPhAddressProvider
from faker.providers.address.en_US import Provider as EnUsAddressProvider
from faker.providers.address.es_ES import Provider as EsEsAddressProvider
from faker.providers.address.es_MX import Provider as EsMxAddressProvider
from faker.providers.address.fa_IR import Provider as FaIrAddressProvider
from faker.providers.address.fi_FI import Provider as FiFiAddressProvider
from faker.providers.address.fr_FR import Provider as FrFrAddressProvider
from faker.providers.address.he_IL import Provider as HeIlAddressProvider
from faker.providers.address.hi_IN import Provider as HiInAddressProvider
from faker.providers.address.hr_HR import Provider as HrHrAddressProvider
from faker.providers.address.hy_AM import Provider as HyAmAddressProvider
from faker.providers.address.ja_JP import Provider as JaJpAddressProvider
from faker.providers.address.ne_NP import Provider as NeNpAddressProvider
from faker.providers.address.no_NO import Provider as NoNoAddressProvider
from faker.providers.address.pt_BR import Provider as PtBrAddressProvider
from faker.providers.address.pt_PT import Provider as PtPtAddressProvider
from faker.providers.address.ro_RO import Provider as RoRoAddressProvider
from faker.providers.address.ru_RU import Provider as RuRuAddressProvider
from faker.providers.address.sk_SK import Provider as SkSkAddressProvider
from faker.providers.address.ta_IN import Provider as TaInAddressProvider
from faker.providers.address.th_TH import Provider as ThThAddressProvider
from faker.providers.address.zh_CN import Provider as ZhCnAddressProvider
from faker.providers.address.zh_TW import Provider as ZhTwAddressProvider
class TestBaseProvider:
"""Test address provider methods"""
def test_alpha_2_country_codes(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code(representation="alpha-2")
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_2_country_codes_as_default(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code()
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_3_country_codes(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code(representation="alpha-3")
assert len(country_code) == 3
assert country_code.isalpha()
def test_bad_country_code_representation(self, faker, num_samples):
for _ in range(num_samples):
with pytest.raises(ValueError):
faker.country_code(representation="hello")
def _collect_fakers_for_locales(self):
cached_locales = []
language_locale_codes = providers.BaseProvider.language_locale_codes
for code, countries in language_locale_codes.items():
for country in countries:
name = f"{code}_{country}"
try:
faker = Faker(name)
cached_locales.append(faker)
except AttributeError as e:
print(f"Cannot generate faker for {name}: {e}. Skipped")
return cached_locales
def _fakers_for_locales(self):
if not hasattr(self.__class__, "cached_locales"):
self.__class__.cached_locales = self._collect_fakers_for_locales()
return self.cached_locales
def test_administrative_unit_all_locales(self):
for faker in self._fakers_for_locales():
if faker.current_country_code() not in ["IL", "GE", "TW", "UA", "NZ"]:
try:
assert isinstance(faker.administrative_unit(), str)
except Exception as e:
raise e.__class__(faker.current_country_code(), *e.args)
def test_country_code_all_locales(self):
for faker in self._fakers_for_locales():
assert isinstance(faker.current_country(), str)
def test_current_country_errors(self):
dt = providers.date_time
countries_duplicated = [*dt.Provider.countries, *dt.Provider.countries]
with mock.patch.object(dt.Provider, "countries", countries_duplicated), pytest.raises(ValueError) as e:
Faker("en_US").current_country()
assert "Ambiguous" in str(e)
country_code = "faker.providers.address.Provider.current_country_code"
with pytest.raises(ValueError), mock.patch(country_code, lambda self: "en_ZZ"):
Faker("en_US").current_country()
class TestCsCz:
"""Test cs_CZ address provider methods"""
def test_street_suffix_short(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_short = faker.street_suffix_short()
assert isinstance(street_suffix_short, str)
assert street_suffix_short in CsCzAddressProvider.street_suffixes_short
def test_street_suffix_long(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_long = faker.street_suffix_long()
assert isinstance(street_suffix_long, str)
assert street_suffix_long in CsCzAddressProvider.street_suffixes_long
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in CsCzAddressProvider.cities
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in CsCzAddressProvider.streets
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in CsCzAddressProvider.states
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{3} \d{2}", postcode)
def test_city_with_postcode(self, faker, num_samples):
for _ in range(num_samples):
city_with_postcode = faker.city_with_postcode()
assert isinstance(city_with_postcode, str)
match = re.fullmatch(r"\d{3} \d{2} (?P<city>.*)", city_with_postcode)
assert match.group("city") in CsCzAddressProvider.cities
class TestDaDk:
"""Test dk_DK address provider methods"""
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in DaDkAddressProvider.street_prefixes
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in DaDkAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in DaDkAddressProvider.states
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{4}", postcode)
class TestDeAt:
"""Test de_AT address provider methods"""
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in DeAtAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in DeAtAddressProvider.states
def test_street_suffix_short(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_short = faker.street_suffix_short()
assert isinstance(street_suffix_short, str)
assert street_suffix_short in DeAtAddressProvider.street_suffixes_short
def test_street_suffix_long(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_long = faker.street_suffix_long()
assert isinstance(street_suffix_long, str)
assert street_suffix_long in DeAtAddressProvider.street_suffixes_long
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in DeAtAddressProvider.countries
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{4}", postcode)
def test_city_with_postcode(self, faker, num_samples):
for _ in range(num_samples):
city_with_postcode = faker.city_with_postcode()
assert isinstance(city_with_postcode, str)
match = re.fullmatch(r"\d{4} (?P<city>.*)", city_with_postcode)
assert match.groupdict()["city"] in DeAtAddressProvider.cities
class TestDeDe:
"""Test de_DE address provider methods"""
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in DeDeAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in DeDeAddressProvider.states
def test_street_suffix_short(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_short = faker.street_suffix_short()
assert isinstance(street_suffix_short, str)
assert street_suffix_short in DeDeAddressProvider.street_suffixes_short
def test_street_suffix_long(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_long = faker.street_suffix_long()
assert isinstance(street_suffix_long, str)
assert street_suffix_long in DeDeAddressProvider.street_suffixes_long
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in DeDeAddressProvider.countries
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{5}", postcode)
def test_city_with_postcode(self, faker, num_samples):
for _ in range(num_samples):
city_with_postcode = faker.city_with_postcode()
assert isinstance(city_with_postcode, str)
match = re.fullmatch(r"\d{5} (?P<city>.*)", city_with_postcode)
assert match.groupdict()["city"] in DeDeAddressProvider.cities
class TestElGr:
"""Test el_GR address provider methods"""
def test_line_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.line_address()
assert isinstance(address, str)
def test_street_prefix_short(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_short = faker.street_prefix_short()
assert isinstance(street_prefix_short, str)
assert street_prefix_short in ElGrAddressProvider.street_prefixes_short
def test_street_prefix_long(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_long = faker.street_prefix_long()
assert isinstance(street_prefix_long, str)
assert street_prefix_long in ElGrAddressProvider.street_prefixes_long
def test_street(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street()
assert isinstance(street, str)
assert street in ElGrAddressProvider.localities
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in ElGrAddressProvider.cities
def test_region(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in ElGrAddressProvider.regions
class TestEnAu:
"""Test en_AU address provider methods"""
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{4}", postcode)
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in EnAuAddressProvider.states
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in EnAuAddressProvider.city_prefixes
def test_state_abbr(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in EnAuAddressProvider.states_abbr
assert state_abbr.isupper()
class TestEnNz:
"""Test en_NZ address provider methods"""
def test_state(self, faker, num_samples):
for _ in range(num_samples):
# No states in New Zealand
with pytest.raises(AttributeError):
faker.state()
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{4}", postcode)
class TestEnCa:
"""Test en_CA address provider methods"""
valid_postcode_letter_re = r"[{}]".format("".join(EnCaAddressProvider.postal_code_letters))
valid_postcode_re = r"{0}[0-9]{0} ?[0-9]{0}[0-9]".format(valid_postcode_letter_re)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(self.valid_postcode_re, postcode)
def test_postcode_in_province(self, faker, num_samples):
for _ in range(num_samples):
for province_abbr in EnCaAddressProvider.provinces_abbr:
code = faker.postcode_in_province(province_abbr)
assert code[0] in EnCaAddressProvider.provinces_postcode_prefixes[province_abbr]
with pytest.raises(Exception):
faker.postcode_in_province("XX")
def test_postalcode(self, faker, num_samples):
for _ in range(num_samples):
postalcode = faker.postalcode()
assert isinstance(postalcode, str)
assert re.fullmatch(self.valid_postcode_re, postalcode)
def test_postal_code_letter(self, faker, num_samples):
for _ in range(num_samples):
postal_code_letter = faker.postal_code_letter()
assert isinstance(postal_code_letter, str)
assert re.fullmatch(self.valid_postcode_letter_re, postal_code_letter)
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in EnCaAddressProvider.provinces
def test_province_abbr(self, faker, num_samples):
for _ in range(num_samples):
province_abbr = faker.province_abbr()
assert isinstance(province_abbr, str)
assert province_abbr in EnCaAddressProvider.provinces_abbr
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in EnCaAddressProvider.city_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"(?:Apt\.|Suite) \d{3}", secondary_address)
class TestEnGb:
"""Test en_GB address provider methods"""
def test_postcode(self, faker, num_samples):
ukpcp = pytest.importorskip("ukpostcodeparser.parser")
for _ in range(num_samples):
assert isinstance(ukpcp.parse_uk_postcode(faker.postcode()), tuple)
def test_county(self, faker, num_samples):
for _ in range(num_samples):
county = faker.county()
assert isinstance(county, str)
assert county in EnGbAddressProvider.counties
class TestEnIe:
"""Test en_IE address provider methods"""
def test_postcode(self, faker, num_samples):
"""https://stackoverflow.com/questions/33391412/validation-for-irish-eircode"""
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"(?:^[AC-FHKNPRTV-Y][0-9]{2}|D6W)[ -]?[0-9AC-FHKNPRTV-Y]{4}$", postcode)
def test_county(self, faker, num_samples):
for _ in range(num_samples):
county = faker.county()
assert isinstance(county, str)
assert county in EnIeAddressProvider.counties
class TestEnUS:
"""Test en_US address provider methods"""
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in EnUsAddressProvider.city_prefixes
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in EnUsAddressProvider.states
def test_state_abbr(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
states_and_territories = EnUsAddressProvider.states_and_territories_abbr
assert state_abbr in states_and_territories
def test_state_abbr_no_territories(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr(include_territories=False)
assert isinstance(state_abbr, str)
assert state_abbr in EnUsAddressProvider.states_abbr
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
code = faker.postcode()
assert isinstance(code, str) and len(code) == 5
assert 501 <= int(code) <= 99950
def test_postcode_in_state(self, faker, num_samples):
for _ in range(num_samples):
for state_abbr in EnUsAddressProvider.states_abbr:
code = faker.postcode_in_state(state_abbr)
assert re.fullmatch(r"\d{5}", code)
assert int(code) >= EnUsAddressProvider.states_postcode[state_abbr][0]
assert int(code) <= EnUsAddressProvider.states_postcode[state_abbr][1]
with pytest.raises(Exception):
faker.postcode_in_state("XX")
def test_zipcode(self, faker, num_samples):
for _ in range(num_samples):
zipcode = faker.zipcode()
assert isinstance(zipcode, str) and len(zipcode) == 5
assert 501 <= int(zipcode) <= 99950
def test_zipcode_in_state(self, faker, num_samples):
for _ in range(num_samples):
for state_abbr in EnUsAddressProvider.states_abbr:
code = faker.zipcode_in_state(state_abbr)
assert re.fullmatch(r"\d{5}", code)
assert int(code) >= EnUsAddressProvider.states_postcode[state_abbr][0]
assert int(code) <= EnUsAddressProvider.states_postcode[state_abbr][1]
with pytest.raises(Exception):
faker.zipcode_in_state("XX")
def test_zipcode_plus4(self, faker, num_samples):
for _ in range(num_samples):
zipcode_plus4 = faker.zipcode_plus4()
assert isinstance(zipcode_plus4, str)
zipcode, plus4 = zipcode_plus4.split("-")
assert 501 <= int(zipcode) <= 99950
assert 1 <= int(plus4) <= 9999
def test_military_ship(self, faker, num_samples):
for _ in range(num_samples):
military_ship = faker.military_ship()
assert isinstance(military_ship, str)
assert military_ship in EnUsAddressProvider.military_ship_prefix
def test_military_state(self, faker, num_samples):
for _ in range(num_samples):
military_state = faker.military_state()
assert isinstance(military_state, str)
assert military_state in EnUsAddressProvider.military_state_abbr
def test_military_apo(self, faker, num_samples):
for _ in range(num_samples):
military_apo = faker.military_apo()
assert isinstance(military_apo, str)
assert re.fullmatch(r"PSC \d{4}, Box \d{4}", military_apo)
def test_military_dpo(self, faker, num_samples):
for _ in range(num_samples):
military_dpo = faker.military_dpo()
assert isinstance(military_dpo, str)
assert re.fullmatch(r"Unit \d{4} Box \d{4}", military_dpo)
def test_postalcode(self, faker, num_samples):
for _ in range(num_samples):
postalcode = faker.postalcode()
assert isinstance(postalcode, str) and len(postalcode) == 5
assert 501 <= int(postalcode) <= 99950
def test_postalcode_in_state(self, faker, num_samples):
for _ in range(num_samples):
for state_abbr in EnUsAddressProvider.states_abbr:
code = faker.postalcode_in_state(state_abbr)
assert re.fullmatch(r"\d{5}", code)
assert int(code) >= EnUsAddressProvider.states_postcode[state_abbr][0]
assert int(code) <= EnUsAddressProvider.states_postcode[state_abbr][1]
with pytest.raises(Exception):
faker.postalcode_in_state("XX")
class TestEsEs:
"""Test es_ES address provider methods"""
def test_state_name(self, faker, num_samples):
for _ in range(num_samples):
state_name = faker.state_name()
assert isinstance(state_name, str)
assert state_name in EsEsAddressProvider.states
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in EsEsAddressProvider.street_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"Apt\. \d{2}|Piso \d|Puerta \d", secondary_address)
def test_regions(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in EsEsAddressProvider.regions
def test_autonomous_community(self, faker, num_samples):
for _ in range(num_samples):
# Spanish regions, also known as "autonomous communities"
autonomous_community = faker.autonomous_community()
assert isinstance(autonomous_community, str)
assert autonomous_community in EsEsAddressProvider.regions
class TestEsMx:
"""Test es_MX address provider methods"""
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in EsMxAddressProvider.city_prefixes
def test_city_suffix(self, faker, num_samples):
for _ in range(num_samples):
city_suffix = faker.city_suffix()
assert isinstance(city_suffix, str)
assert city_suffix in EsMxAddressProvider.city_suffixes
def test_city_adjective(self, faker, num_samples):
for _ in range(num_samples):
city_adjective = faker.city_adjective()
assert isinstance(city_adjective, str)
assert city_adjective in EsMxAddressProvider.city_adjectives
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in EsMxAddressProvider.street_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(
r"\d{3} \d{3}|\d{3} Interior \d{3}|\d{3} Edif\. \d{3} , Depto\. \d{3}",
secondary_address,
)
def test_state(self, faker, num_samples):
states = [state_name for state_abbr, state_name in EsMxAddressProvider.states]
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in states
def test_state_abbr(self, faker, num_samples):
state_abbrs = [state_abbr for state_abbr, state_name in EsMxAddressProvider.states]
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in state_abbrs
class TestFaIr:
"""Test fa_IR address provider methods"""
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in FaIrAddressProvider.city_prefixes
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"(?:سوئیت|واحد) \d{3}", secondary_address)
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in FaIrAddressProvider.states
class TestFrFr:
"""Test fr_FR address provider methods"""
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in FrFrAddressProvider.street_prefixes
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in FrFrAddressProvider.city_prefixes
def test_region(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in FrFrAddressProvider.regions
def test_department(self, faker, num_samples):
for _ in range(num_samples):
department = faker.department()
assert isinstance(department, tuple)
assert department in FrFrAddressProvider.departments
def test_department_name(self, faker, num_samples):
department_names = [dept_name for dept_num, dept_name in FrFrAddressProvider.departments]
for _ in range(num_samples):
department_name = faker.department_name()
assert isinstance(department_name, str)
assert department_name in department_names
def test_department_number(self, faker, num_samples):
department_numbers = [dept_num for dept_num, dept_name in FrFrAddressProvider.departments]
for _ in range(num_samples):
department_number = faker.department_number()
assert isinstance(department_number, str)
assert department_number in department_numbers
class TestHeIl:
"""Test he_IL address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in HeIlAddressProvider.city_names
def test_street_title(self, faker, num_samples):
for _ in range(num_samples):
street_title = faker.street_title()
assert isinstance(street_title, str)
assert street_title in HeIlAddressProvider.street_titles
class TestHiIn:
"""Test hi_IN address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in HiInAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in HiInAddressProvider.states
class TestTaIn:
"""Test ta_IN address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in TaInAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in TaInAddressProvider.states
class TestFiFi:
"""Test fi_FI address provider methods"""
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in FiFiAddressProvider.cities
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.street_suffix()
assert isinstance(suffix, str)
assert suffix in FiFiAddressProvider.street_suffixes
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in FiFiAddressProvider.states
class TestHrHr:
"""Test hr_HR address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in HrHrAddressProvider.cities
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in HrHrAddressProvider.streets
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in HrHrAddressProvider.states
class TestHuHu:
"""Test hu_HU address provider methods"""
def test_postcode(self, faker, num_samples):
# Hungarian postcodes begin with 'H-' followed by 4 digits.
# The first digit may not begin with a zero.
for _ in range(num_samples):
pcd = faker.postcode()
assert re.fullmatch(r"H-[1-9]\d{3}", pcd)
def test_street_address(self, faker, num_samples):
"""
Tests street address.
A street address must consist of a street name, a place type and a number, and end in a period point.
"""
for _ in range(num_samples):
address = faker.street_address()
assert address[-1] == "."
# Check for correct capitalisation of place type
assert address.split(" ")[-2][0].islower()
# Check for street number format
assert re.fullmatch(r"\d{1,4}\.", address.split(" ")[-1])
def test_street_address_with_county(self, faker, num_samples):
"""Tests street address with country. A street address must be:
- in three rows,
- starting with a valid street address,
- contain a valid post code,
- contain the place name validly capitalized.
"""
for _ in range(num_samples):
address = faker.street_address_with_county()
# Number of rows
assert len(address.split("\n")) == 3
first, second, last = address.split("\n")
# Test street address
assert first[0].isupper()
assert first.split(" ")[-2][0].islower()
assert re.fullmatch(r"\d{1,4}\.", first.split(" ")[-1])
# Test county line
assert second.split(" ")[-1][0].islower()
assert second.split(" ")[0][0].isupper()
# Test postcode
assert re.fullmatch(r"H-[1-9]\d{3}", last.split(" ")[0])
# Test place name capitalization
assert last.split(" ")[-1][0].isupper()
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
address_with_county = faker.street_address_with_county()
assert isinstance(address_with_county, str)
class TestHyAm:
"""Test hy_AM address provider methods"""
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
def test_building_number(self, faker, num_samples):
for _ in range(num_samples):
building_number = faker.building_number()
assert isinstance(building_number, str)
assert 0 <= int(building_number) <= 999
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in HyAmAddressProvider.cities
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in HyAmAddressProvider.city_prefixes
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in HyAmAddressProvider.countries
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert 200 <= int(postcode) <= 4299
def test_postcode_in_state(self, faker, num_samples):
for _ in range(num_samples):
for state_abbr in HyAmAddressProvider.states_abbr:
code = faker.postcode_in_state(state_abbr)
assert re.fullmatch(r"\d{4}", code)
assert int(code) >= HyAmAddressProvider.states_postcode[state_abbr][0]
assert int(code) <= HyAmAddressProvider.states_postcode[state_abbr][1]
with pytest.raises(Exception):
faker.postcode_in_state("XX")
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(r"բն\. \d{1,2}", secondary_address)
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in HyAmAddressProvider.states
def test_state_abbr(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in HyAmAddressProvider.states_abbr
assert state_abbr.isupper()
def test_street(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street()
assert isinstance(street, str)
assert street in HyAmAddressProvider.streets
def test_street_address(self, faker, num_samples):
for _ in range(num_samples):
street_address = faker.street_address()
assert isinstance(street_address, str)
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in HyAmAddressProvider.street_prefixes
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.street_suffix()
assert isinstance(suffix, str)
assert suffix in HyAmAddressProvider.street_suffixes
def test_village(self, faker, num_samples):
for _ in range(num_samples):
village = faker.village()
assert isinstance(village, str)
assert village in HyAmAddressProvider.villages
def test_village_prefix(self, faker, num_samples):
for _ in range(num_samples):
village_prefix = faker.village_prefix()
assert isinstance(village_prefix, str)
assert village_prefix in HyAmAddressProvider.village_prefixes
class TestJaJp:
"""Test ja_JP address provider methods"""
def test_chome(self, faker, num_samples):
for _ in range(num_samples):
chome = faker.chome()
assert isinstance(chome, str)
match = re.fullmatch(r"(?P<chome_number>\d{1,2})丁目", chome)
assert match
assert 1 <= int(match.group("chome_number")) <= 42
def test_ban(self, faker, num_samples):
for _ in range(num_samples):
ban = faker.ban()
assert isinstance(ban, str)
match = re.fullmatch(r"(?P<ban_number>\d{1,2})番", ban)
assert match
assert 1 <= int(match.group("ban_number")) <= 27
def test_gou(self, faker, num_samples):
for _ in range(num_samples):
gou = faker.gou()
assert isinstance(gou, str)
match = re.fullmatch(r"(?P<gou_number>\d{1,2})号", gou)
assert match
assert 1 <= int(match.group("gou_number")) <= 20
def test_town(self, faker, num_samples):
for _ in range(num_samples):
town = faker.town()
assert isinstance(town, str)
assert town in JaJpAddressProvider.towns
def test_prefecture(self, faker, num_samples):
for _ in range(num_samples):
prefecture = faker.prefecture()
assert isinstance(prefecture, str)
assert prefecture in JaJpAddressProvider.prefectures
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in JaJpAddressProvider.cities
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in JaJpAddressProvider.countries
def test_building_name(self, faker, num_samples):
for _ in range(num_samples):
building_name = faker.building_name()
assert isinstance(building_name, str)
assert building_name in JaJpAddressProvider.building_names
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{3}-\d{4}", postcode)
def test_zipcode(self, faker, num_samples):
for _ in range(num_samples):
zipcode = faker.zipcode()
assert isinstance(zipcode, str)
assert re.fullmatch(r"\d{3}-\d{4}", zipcode)
class TestKoKr:
"""Test ko_KR address provider methods"""
def test_old_postal_code(self, faker, num_samples):
for _ in range(num_samples):
old_postal_code = faker.old_postal_code()
assert isinstance(old_postal_code, str)
assert re.fullmatch(r"\d{3}-\d{3}", old_postal_code)
def test_postal_code(self, faker, num_samples):
for _ in range(num_samples):
postal_code = faker.postal_code()
assert isinstance(postal_code, str)
assert re.fullmatch(r"\d{5}", postal_code)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{5}", postcode)
class TestNeNp:
"""Test ne_NP address provider methods"""
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in NeNpAddressProvider.provinces
def test_district(self, faker, num_samples):
for _ in range(num_samples):
district = faker.district()
assert isinstance(district, str)
assert district in NeNpAddressProvider.districts
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in NeNpAddressProvider.cities
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in NeNpAddressProvider.countries
class TestNoNo:
"""Test no_NO address provider methods"""
def test_postcode(self, faker):
for _ in range(100):
assert re.fullmatch(r"^[0-9]{4}$", faker.postcode())
def test_city_suffix(self, faker, num_samples):
for _ in range(num_samples):
city_suffix = faker.city_suffix()
assert isinstance(city_suffix, str)
assert city_suffix in NoNoAddressProvider.city_suffixes
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
street_suffix = faker.street_suffix()
assert isinstance(street_suffix, str)
assert street_suffix in NoNoAddressProvider.street_suffixes
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
class TestZhTw:
"""Test zh_TW address provider methods"""
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"[1-9]\d{2}(?:\d{2})?", postcode)
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in ZhTwAddressProvider.cities
def test_city_suffix(self, faker, num_samples):
for _ in range(num_samples):
city_suffix = faker.city_suffix()
assert isinstance(city_suffix, str)
assert city_suffix in ZhTwAddressProvider.city_suffixes
def test_city(self, faker, num_samples):
city_pattern: Pattern = re.compile(r"(?P<city_name>.*?)[市縣]?")
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
match = city_pattern.fullmatch(city)
assert match
assert match.group("city_name") in ZhTwAddressProvider.cities
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in ZhTwAddressProvider.countries
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in ZhTwAddressProvider.street_names
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
class TestZhCn:
"""Test zh_CN address provider methods"""
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"[1-9]\d{5}", postcode)
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in ZhCnAddressProvider.cities
def test_city_suffix(self, faker, num_samples):
for _ in range(num_samples):
city_suffix = faker.city_suffix()
assert isinstance(city_suffix, str)
assert city_suffix in ZhCnAddressProvider.city_suffixes
def test_city(self, faker, num_samples):
city_pattern: Pattern = re.compile(r".*?[市县]")
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city_pattern.fullmatch(city)
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in ZhCnAddressProvider.provinces
def test_district(self, faker, num_samples):
for _ in range(num_samples):
district = faker.district()
assert isinstance(district, str)
assert district in ZhCnAddressProvider.districts
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in ZhCnAddressProvider.countries
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
class TestPtBr:
"""Test pt_BR address provider methods"""
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in PtBrAddressProvider.countries
def test_bairro(self, faker, num_samples):
for _ in range(num_samples):
bairro = faker.bairro()
assert isinstance(bairro, str)
assert bairro in PtBrAddressProvider.bairros
def test_neighborhood(self, faker, num_samples):
for _ in range(num_samples):
neighborhood = faker.neighborhood()
assert isinstance(neighborhood, str)
assert neighborhood in PtBrAddressProvider.bairros
def test_estado(self, faker, num_samples):
for _ in range(num_samples):
estado = faker.estado()
assert isinstance(estado, tuple)
assert estado in PtBrAddressProvider.estados
def test_estado_nome(self, faker, num_samples):
state_names = [state_name for state_abbr, state_name in PtBrAddressProvider.estados]
for _ in range(num_samples):
estado_nome = faker.estado_nome()
assert isinstance(estado_nome, str)
assert estado_nome in state_names
def test_estado_sigla(self, faker, num_samples):
state_abbrs = [state_abbr for state_abbr, state_name in PtBrAddressProvider.estados]
for _ in range(num_samples):
estado_sigla = faker.estado_sigla()
assert isinstance(estado_sigla, str)
assert estado_sigla in state_abbrs
def test_address(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street_name()
assert isinstance(street, str)
city = faker.street_address()
assert isinstance(city, str)
address = faker.address()
assert isinstance(address, str)
def test_raw_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode(formatted=False)
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{8}", postcode)
def test_formatted_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{5}-?\d{3}", postcode)
class TestPtPt:
"""Test pt_PT address provider methods"""
def test_distrito(self, faker, num_samples):
for _ in range(num_samples):
distrito = faker.distrito()
assert isinstance(distrito, str)
assert distrito in PtPtAddressProvider.distritos
def test_concelho(self, faker, num_samples):
for _ in range(num_samples):
concelho = faker.concelho()
assert isinstance(concelho, str)
assert concelho in PtPtAddressProvider.concelhos
def test_freguesia(self, faker, num_samples):
for _ in range(num_samples):
freguesia = faker.freguesia()
assert isinstance(freguesia, str)
assert freguesia in PtPtAddressProvider.freguesias
def test_place_name(self, faker, num_samples):
for _ in range(num_samples):
place_name = faker.place_name()
assert isinstance(place_name, str)
assert place_name in PtPtAddressProvider.places
class TestEnPh:
"""Test en_PH address provider methods"""
@classmethod
def setup_class(cls):
cls.building_number_pattern: Pattern = re.compile(
r"(?:[1-9]|[1-9]\d{1,3})(?:[A-J]|\s[A-J]|-[A-J]|\sUnit\s[A-J])?",
)
cls.address_pattern: Pattern = re.compile(
r"(?P<street_address>.*), (?P<lgu>.*?), (?P<postcode>\d{4}) (?P<province>.*?)",
)
cls.metro_manila_postcodes = EnPhAddressProvider.metro_manila_postcodes
cls.luzon_province_postcodes = EnPhAddressProvider.luzon_province_postcodes
cls.visayas_province_postcodes = EnPhAddressProvider.visayas_province_postcodes
cls.mindanao_province_postcodes = EnPhAddressProvider.mindanao_province_postcodes
cls.postcodes = EnPhAddressProvider.postcodes
cls.provinces = EnPhAddressProvider.provinces
cls.province_lgus = EnPhAddressProvider.province_lgus
cls.metro_manila_lgus = EnPhAddressProvider.metro_manila_lgus
def test_metro_manila_postcode(self, faker, num_samples):
for _ in range(num_samples):
assert int(faker.metro_manila_postcode()) in self.metro_manila_postcodes
def test_luzon_province_postcode(self, faker, num_samples):
for _ in range(num_samples):
assert int(faker.luzon_province_postcode()) in self.luzon_province_postcodes
def test_visayas_province_postcode(self, faker, num_samples):
for _ in range(num_samples):
assert int(faker.visayas_province_postcode()) in self.visayas_province_postcodes
def test_mindanao_province_postcode(self, faker, num_samples):
for _ in range(num_samples):
assert int(faker.mindanao_province_postcode()) in self.mindanao_province_postcodes
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
assert int(faker.postcode()) in self.postcodes
def test_building_number(self, faker, num_samples):
for _ in range(num_samples):
assert self.building_number_pattern.fullmatch(faker.building_number())
def test_floor_unit_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.floor_unit_number()
assert 2 <= int(number[:-2]) <= 99
assert 1 <= int(number[-2:]) <= 40
def test_ordinal_floor_number(self, faker, num_samples):
for _ in range(num_samples):
floor_number = faker.ordinal_floor_number()
assert floor_number[-2:] in ["th", "st", "nd", "rd"]
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
match = self.address_pattern.fullmatch(address)
street_address = match.group("street_address")
lgu = match.group("lgu")
postcode = match.group("postcode")
province = match.group("province")
assert match
assert street_address
assert lgu in self.province_lgus or lgu in self.metro_manila_lgus
assert int(postcode) in self.postcodes
assert province in self.provinces or province == "Metro Manila"
class TestFilPh(TestEnPh):
"""Test fil_PH address provider methods"""
pass
class TestTlPh(TestEnPh):
"""Test tl_PH address provider methods"""
pass
class TestRuRu:
"""Test ru_RU address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in RuRuAddressProvider.city_names
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in RuRuAddressProvider.countries
def test_region(self, faker, num_samples):
region_pattern: Pattern = re.compile(
r"(?:респ\. (?P<region_republic>.*))|"
r"(?:(?P<region_krai>.*?) край)|"
r"(?:(?P<region_oblast>.*?) обл.)|"
r"(?:(?P<region_ao>.*?) АО)",
)
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
match = region_pattern.fullmatch(region)
assert match
groupdict = match.groupdict()
assert any(
[
groupdict.get("region_republic") in RuRuAddressProvider.region_republics,
groupdict.get("region_krai") in RuRuAddressProvider.region_krai,
groupdict.get("region_oblast") in RuRuAddressProvider.region_oblast,
groupdict.get("region_ao") in RuRuAddressProvider.region_ao,
]
)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{6}", postcode)
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in RuRuAddressProvider.city_prefixes
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
street_suffix = faker.street_suffix()
assert isinstance(street_suffix, str)
assert street_suffix in RuRuAddressProvider.street_suffixes
def test_street_title(self, faker, num_samples):
for _ in range(num_samples):
street_title = faker.street_title()
assert isinstance(street_title, str)
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
@pytest.mark.parametrize(
"street_title,street_suffix,expected",
[
("Фрунзе", "ул.", "ул. Фрунзе"),
("Ставропольская", "ул.", "ул. Ставропольская"),
("Фрунзе", "пр.", "пр. Фрунзе"),
("Осенняя", "пр.", "пр. Осенний"),
("Гвардейская", "пр.", "пр. Гвардейский"),
("Рыбацкая", "пр.", "пр. Рыбацкий"),
("Безымянная", "пр.", "пр. Безымянный"),
("Проезжая", "ш.", "ш. Проезжее"),
("Магистральная", "ш.", "ш. Магистральное"),
],
ids=[
"feminine_suffix_and_noflex_title",
"feminine_suffix_and_flex_title",
"non_feminine_suffix_and_noflex_title",
"masc_suffix_and_irregular_masc_title",
"masc_suffix_and_ck_street_stem",
"masc_suffix_and_uk_street_stem",
"masc_suffix_and_other_stem",
"neu_suffx_and_iregular_neu_street_title",
"neu_suffix_and_regular_street_title",
],
)
def test_street_name_lexical(self, faker, street_title, street_suffix, expected):
"""Test that random street names are formed correctly, given
the case of suffixes and streets that have been randomly selected.
"""
title_patch = mock.patch(
"faker.providers.address.ru_RU.Provider.street_title",
autospec=True,
return_value=street_title,
)
suffix_patch = mock.patch(
"faker.providers.address.ru_RU.Provider.street_suffix",
autospec=True,
return_value=street_suffix,
)
with title_patch, suffix_patch:
result = faker.street_name()
assert result == expected
class TestThTh:
"""Test th_TH address provider methods"""
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in ThThAddressProvider.countries
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in ThThAddressProvider.cities
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in ThThAddressProvider.provinces
def test_amphoe(self, faker, num_samples):
for _ in range(num_samples):
amphoe = faker.amphoe()
assert isinstance(amphoe, str)
assert amphoe in ThThAddressProvider.amphoes
def test_tambon(self, faker, num_samples):
for _ in range(num_samples):
tambon = faker.tambon()
assert isinstance(tambon, str)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"[1-9]\d{4}", postcode)
class TestEnIn:
"""Test en_IN address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in EnInAddressProvider.cities
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in EnInAddressProvider.states
class TestSkSk:
"""Test sk_SK address provider methods"""
def test_street_suffix_short(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_short = faker.street_suffix_short()
assert isinstance(street_suffix_short, str)
assert street_suffix_short in SkSkAddressProvider.street_suffixes_short
def test_street_suffix_long(self, faker, num_samples):
for _ in range(num_samples):
street_suffix_long = faker.street_suffix_long()
assert isinstance(street_suffix_long, str)
assert street_suffix_long in SkSkAddressProvider.street_suffixes_long
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in SkSkAddressProvider.cities
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in SkSkAddressProvider.streets
def test_state(self, faker, num_samples):
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in SkSkAddressProvider.states
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{3} \d{2}", postcode)
def test_city_with_postcode(self, faker, num_samples):
for _ in range(num_samples):
city_with_postcode = faker.city_with_postcode()
assert isinstance(city_with_postcode, str)
match = re.fullmatch(r"\d{3} \d{2} (?P<city>.*)", city_with_postcode)
assert match.group("city") in SkSkAddressProvider.cities
class TestDeCh:
"""Test de_CH address provider methods"""
def test_canton_name(self, faker, num_samples):
for _ in range(num_samples):
canton_name = faker.canton_name()
assert isinstance(canton_name, str)
assert any(canton_name == cantons[1] for cantons in DeChAddressProvider.cantons)
def test_canton_code(self, faker, num_samples):
for _ in range(num_samples):
canton_code = faker.canton_code()
assert isinstance(canton_code, str)
assert any(canton_code == cantons[0] for cantons in DeChAddressProvider.cantons)
def test_canton(self, faker, num_samples):
for _ in range(num_samples):
canton = faker.canton()
assert isinstance(canton, tuple)
assert canton in DeChAddressProvider.cantons
class TestRoRo:
"""Test ro_RO address provider methods"""
def test_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.address()
assert isinstance(address, str)
def test_street_address(self, faker, num_samples):
for _ in range(num_samples):
street_address = faker.street_address()
assert isinstance(street_address, str)
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in RoRoAddressProvider.street_prefixes
def test_building_number(self, faker, num_samples):
for _ in range(num_samples):
building_number = faker.building_number()
assert isinstance(building_number, str)
assert building_number[:3] == "Nr."
def test_secondary_address(self, faker, num_samples):
for _ in range(num_samples):
secondary_address = faker.secondary_address()
assert isinstance(secondary_address, str)
assert re.fullmatch(
r"Bl. \d{2} Sc. \d{2} Ap. \d{3}",
secondary_address,
)
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in RoRoAddressProvider.cities
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in RoRoAddressProvider.cities
def test_state(self, faker, num_samples):
states = [state_name for state_abbr, state_name in RoRoAddressProvider.states]
for _ in range(num_samples):
state = faker.state()
assert isinstance(state, str)
assert state in states
def test_state_abbr(self, faker, num_samples):
state_abbrs = [state_abbr for state_abbr, state_name in RoRoAddressProvider.states]
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in state_abbrs
assert state_abbr.isupper()
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{6}", postcode)
def test_city_with_postcode(self, faker, num_samples):
for _ in range(num_samples):
city_with_postcode = faker.city_with_postcode()
assert isinstance(city_with_postcode, str)
match = re.fullmatch(r"\d{6} (?P<city>.*)", city_with_postcode)
assert match.group("city") in RoRoAddressProvider.cities
| 39.442957 | 111 | 0.649366 | [
"MIT"
] | Pipoline/faker | tests/providers/test_address.py | 68,001 | Python |
from map.models import *
import requests
# initialize geo_info table, used to show choropleth map
def run():
try:
response = requests.get('http://www.ourd3js.com/map/china_provinces/beijing.json')
json_result = response.json()
for area in json_result.get('features'):
properties = area.get('properties')
id = properties.get('id')
geometry = area.get('geometry')
district = properties.get('name')
Geoinfo.create(id, district, properties, geometry).save()
except:
print("Load failed!")
| 32.555556 | 90 | 0.622867 | [
"MIT",
"Unlicense"
] | YogiAi/FinalProject_hmap | scripts/geoinfo.py | 586 | Python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import copy
import os
import platform
from dataclasses import dataclass
from pathlib import Path
from typing import List
import nox
from nox.logger import logger
BASE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
DEFAULT_OS_NAMES = ["Linux", "MacOS", "Windows"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
INSTALL_EDITABLE_MODE = os.environ.get("INSTALL_EDITABLE_MODE", 0)
INSTALL_COMMAND = (
["pip", "install", "-e"] if INSTALL_EDITABLE_MODE else ["pip", "install"]
)
# Allow limiting testing to specific plugins
# The list ['ALL'] indicates all plugins
PLUGINS = os.environ.get("PLUGINS", "ALL").split(",")
SKIP_CORE_TESTS = "0"
SKIP_CORE_TESTS = os.environ.get("SKIP_CORE_TESTS", SKIP_CORE_TESTS) != "0"
FIX = os.environ.get("FIX", "0") == "1"
VERBOSE = os.environ.get("VERBOSE", "0")
SILENT = VERBOSE == "0"
@dataclass
class Plugin:
name: str
path: str
module: str
def get_current_os() -> str:
current_os = platform.system()
if current_os == "Darwin":
current_os = "MacOS"
return current_os
print(f"Operating system\t:\t{get_current_os()}")
print(f"NOX_PYTHON_VERSIONS\t:\t{PYTHON_VERSIONS}")
print(f"PLUGINS\t\t\t:\t{PLUGINS}")
print(f"SKIP_CORE_TESTS\t\t:\t{SKIP_CORE_TESTS}")
print(f"FIX\t\t\t:\t{FIX}")
print(f"VERBOSE\t\t\t:\t{VERBOSE}")
print(f"INSTALL_EDITABLE_MODE\t:\t{INSTALL_EDITABLE_MODE}")
def _upgrade_basic(session):
session.run(
"python",
"-m",
"pip",
"install",
"--upgrade",
"setuptools",
"pip",
silent=SILENT,
)
def find_dirs(path: str):
for file in os.listdir(path):
fullname = os.path.join(path, file)
if os.path.isdir(fullname):
yield fullname
def install_hydra(session, cmd):
# clean install hydra
session.chdir(BASE)
session.run(*cmd, ".", silent=SILENT)
if not SILENT:
session.install("pipdeptree", silent=SILENT)
session.run("pipdeptree", "-p", "hydra-core")
def pytest_args(*args):
ret = ["pytest", "-Werror"]
ret.extend(args)
return ret
def run_pytest(session, directory=".", *args):
pytest_cmd = pytest_args(directory, *args)
# silent=False to enable some output on CI
# (otherwise we risk no-output timeout)
session.run(*pytest_cmd, silent=False)
def get_setup_python_versions(classifiers):
pythons = filter(lambda line: "Programming Language :: Python" in line, classifiers)
return [p[len("Programming Language :: Python :: ") :] for p in pythons]
def get_plugin_os_names(classifiers: List[str]) -> List[str]:
oses = list(filter(lambda line: "Operating System" in line, classifiers))
if len(oses) == 0:
# No Os is specified so all oses are supported
return DEFAULT_OS_NAMES
if len(oses) == 1 and oses[0] == "Operating System :: OS Independent":
# All oses are supported
return DEFAULT_OS_NAMES
else:
return [p.split("::")[-1].strip() for p in oses]
def select_plugins(session, directory: str) -> List[Plugin]:
"""
Select all plugins that should be tested in this session.
Considers the current Python version and operating systems against the supported ones,
as well as the user plugins selection (via the PLUGINS environment variable).
"""
assert session.python is not None, "Session python version is not specified"
blacklist = [".isort.cfg", "examples"]
plugins = [
{"dir_name": x, "path": x}
for x in sorted(os.listdir(os.path.join(BASE, directory)))
if x not in blacklist
]
ret = []
skipped = []
for plugin in plugins:
if not (plugin["dir_name"] in PLUGINS or PLUGINS == ["ALL"]):
skipped.append(f"Deselecting {plugin['dir_name']}: User request")
continue
setup_py = os.path.join(BASE, directory, plugin["path"], "setup.py")
classifiers = session.run(
"python", setup_py, "--name", "--classifiers", silent=True
).splitlines()
plugin_name = classifiers.pop(0)
plugin_python_versions = get_setup_python_versions(classifiers)
python_supported = session.python in plugin_python_versions
plugin_os_names = get_plugin_os_names(classifiers)
os_supported = get_current_os() in plugin_os_names
if not python_supported:
py_str = ", ".join(plugin_python_versions)
skipped.append(
f"Deselecting {plugin['dir_name']} : Incompatible Python {session.python}. Supports [{py_str}]"
)
continue
# Verify this plugin supports the OS we are testing on, skip otherwise
if not os_supported:
os_str = ", ".join(plugin_os_names)
skipped.append(
f"Deselecting {plugin['dir_name']}: Incompatible OS {get_current_os()}. Supports [{os_str}]"
)
continue
ret.append(
Plugin(
name=plugin_name,
path=plugin["path"],
module="hydra_plugins." + plugin["dir_name"],
)
)
for msg in skipped:
logger.warn(msg)
if len(ret) == 0:
logger.warn("No plugins selected")
return ret
def install_dev_deps(session):
_upgrade_basic(session)
session.run("pip", "install", "-r", "requirements/dev.txt", silent=SILENT)
def _black_cmd():
black = ["black", "."]
if not FIX:
black += ["--check"]
return black
def _isort_cmd():
isort = ["isort", "."]
if not FIX:
isort += ["--check", "--diff"]
return isort
@nox.session(python=PYTHON_VERSIONS)
def lint(session):
install_dev_deps(session)
install_hydra(session, ["pip", "install", "-e"])
apps = _get_standalone_apps_dirs()
session.log("Installing standalone apps")
for subdir in apps:
session.chdir(str(subdir))
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
session.run(*_black_cmd(), silent=SILENT)
skiplist = apps + [
".git",
"website",
"plugins",
"tools",
".nox",
"hydra/grammar/gen",
"tools/configen/example/gen",
"tools/configen/tests/test_modules/expected",
"temp",
]
isort = _isort_cmd() + [f"--skip={skip}" for skip in skiplist]
session.run(*isort, silent=SILENT)
session.run("mypy", ".", "--strict", silent=SILENT)
session.run("flake8", "--config", ".flake8")
session.run("yamllint", ".")
example_dirs = [
"examples/advanced/",
"examples/configure_hydra",
"examples/patterns",
"examples/instantiate",
"examples/tutorials/basic/your_first_hydra_app",
"examples/tutorials/basic/running_your_hydra_app",
"examples/tutorials/structured_configs/",
]
for edir in example_dirs:
dirs = find_dirs(path=edir)
for d in dirs:
session.run("mypy", d, "--strict", silent=SILENT)
# lint example plugins
lint_plugins_in_dir(session=session, directory="examples/plugins")
# bandit static security analysis
session.run("bandit", "--exclude", "./.nox/**", "-ll", "-r", ".", silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def lint_plugins(session):
lint_plugins_in_dir(session, "plugins")
def lint_plugins_in_dir(session, directory: str) -> None:
install_cmd = ["pip", "install", "-e"]
install_hydra(session, install_cmd)
plugins = select_plugins(session=session, directory=directory)
# plugin linting requires the plugins and their dependencies to be installed
for plugin in plugins:
cmd = install_cmd + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
install_dev_deps(session)
session.run("flake8", "--config", ".flake8", directory)
# Mypy for plugins
for plugin in plugins:
path = os.path.join(directory, plugin.path)
session.chdir(path)
session.run(*_black_cmd(), silent=SILENT)
session.run(*_isort_cmd(), silent=SILENT)
session.chdir(BASE)
files = []
for file in ["tests", "example"]:
abs = os.path.join(path, file)
if os.path.exists(abs):
files.append(abs)
session.run(
"mypy",
"--strict",
f"{path}/hydra_plugins",
"--config-file",
f"{BASE}/.mypy.ini",
silent=SILENT,
)
session.run(
"mypy",
"--strict",
"--namespace-packages",
"--config-file",
f"{BASE}/.mypy.ini",
*files,
silent=SILENT,
)
@nox.session(python=PYTHON_VERSIONS)
def test_tools(session):
install_cmd = ["pip", "install"]
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
tools = [
x
for x in sorted(os.listdir(os.path.join(BASE, "tools")))
if not os.path.isfile(x)
]
for tool in tools:
tool_path = os.path.join("tools", tool)
session.chdir(BASE)
if (Path(tool_path) / "setup.py").exists():
cmd = list(install_cmd) + ["-e", tool_path]
session.run(*cmd, silent=SILENT)
session.run("pytest", tool_path)
session.chdir(BASE)
def _get_standalone_apps_dirs():
standalone_apps_dir = Path(f"{BASE}/tests/standalone_apps")
apps = [standalone_apps_dir / subdir for subdir in os.listdir(standalone_apps_dir)]
apps.append(f"{BASE}/examples/advanced/hydra_app_example")
return apps
@nox.session(python=PYTHON_VERSIONS)
def test_core(session):
_upgrade_basic(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
if not SKIP_CORE_TESTS:
run_pytest(session, "build_helpers", "tests", *session.posargs)
else:
session.log("Skipping Hydra core tests")
apps = _get_standalone_apps_dirs()
session.log("Testing standalone apps")
for subdir in apps:
session.chdir(subdir)
session.run(*INSTALL_COMMAND, ".", silent=SILENT)
run_pytest(session, ".")
session.chdir(BASE)
test_plugins_in_directory(
session,
install_cmd=INSTALL_COMMAND,
directory="examples/plugins",
test_hydra_core=False,
)
@nox.session(python=PYTHON_VERSIONS)
def test_plugins(session):
test_plugins_in_directory(
session=session,
install_cmd=INSTALL_COMMAND,
directory="plugins",
test_hydra_core=True,
)
def test_plugins_in_directory(
session, install_cmd, directory: str, test_hydra_core: bool
):
_upgrade_basic(session)
session.install("pytest")
install_hydra(session, install_cmd)
selected_plugin = select_plugins(session=session, directory=directory)
for plugin in selected_plugin:
cmd = list(install_cmd) + [os.path.join(directory, plugin.path)]
session.run(*cmd, silent=SILENT)
if not SILENT:
session.run("pipdeptree", "-p", plugin.name)
# Test that we can import Hydra
session.run("python", "-c", "from hydra import main", silent=SILENT)
# Test that we can import all installed plugins
for plugin in selected_plugin:
session.run("python", "-c", f"import {plugin.module}")
# Run Hydra tests to verify installed plugins did not break anything
if test_hydra_core:
if not SKIP_CORE_TESTS:
# exclude test_completion for plugins tests.
# 1. It's tested during normal core tests.
# 2. it's somewhat fragile and tend to timeout in mac.
# 3. it's expensive and it's not worth the cost to run it for plugins as well.
run_pytest(session, "tests", "--ignore=tests/test_completion.py")
else:
session.log("Skipping Hydra core tests")
# Run tests for all installed plugins
for plugin in selected_plugin:
# install all other plugins that are compatible with the current Python version
session.chdir(os.path.join(BASE, directory, plugin.path))
run_pytest(session)
@nox.session(python="3.8")
def coverage(session):
coverage_env = {
"COVERAGE_HOME": BASE,
"COVERAGE_FILE": f"{BASE}/.coverage",
"COVERAGE_RCFILE": f"{BASE}/.coveragerc",
}
_upgrade_basic(session)
session.install("coverage", "pytest")
install_hydra(session, ["pip", "install", "-e"])
session.run("coverage", "erase", env=coverage_env)
for directory in ["plugins", "examples/plugins"]:
selected_plugins = select_plugins(session=session, directory=directory)
for plugin in selected_plugins:
session.run(
"pip",
"install",
"-e",
os.path.join(directory, plugin.path),
silent=SILENT,
)
# run plugin coverage
for plugin in selected_plugins:
session.chdir(os.path.join(directory, plugin.path))
cov_args = ["coverage", "run", "--append", "-m"]
cov_args.extend(pytest_args())
session.run(*cov_args, silent=SILENT, env=coverage_env)
session.chdir(BASE)
# run hydra-core coverage
session.run(
"coverage",
"run",
"--append",
"-m",
silent=SILENT,
env=coverage_env,
*pytest_args(),
)
# Increase the fail_under as coverage improves
session.run("coverage", "report", "--fail-under=80", env=coverage_env)
session.run("coverage", "erase", env=coverage_env)
@nox.session(python=PYTHON_VERSIONS)
def test_jupyter_notebooks(session):
versions = copy.copy(DEFAULT_PYTHON_VERSIONS)
if session.python not in versions:
session.skip(
f"Not testing Jupyter notebook on Python {session.python}, supports [{','.join(versions)}]"
)
session.install("jupyter", "nbval", "pyzmq")
install_hydra(session, ["pip", "install", "-e"])
args = pytest_args(
"--nbval", "examples/jupyter_notebooks/compose_configs_in_notebook.ipynb"
)
# Jupyter notebook test on Windows yield warnings
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
notebooks_dir = Path("tests/jupyter")
for notebook in [
file for file in notebooks_dir.iterdir() if str(file).endswith(".ipynb")
]:
args = pytest_args("--nbval", str(notebook))
args = [x for x in args if x != "-Werror"]
session.run(*args, silent=SILENT)
@nox.session(python=PYTHON_VERSIONS)
def benchmark(session):
_upgrade_basic(session)
install_dev_deps(session)
install_hydra(session, INSTALL_COMMAND)
session.install("pytest")
run_pytest(session, "build_helpers", "tests/benchmark.py", *session.posargs)
| 30.297189 | 111 | 0.626591 | [
"MIT"
] | strx2322/hydra | noxfile.py | 15,088 | Python |
#vim: set encoding=utf-8
from django.core.urlresolvers import reverse
from django.http import Http404
from django.views.generic.base import TemplateView
from regulations.generator import generator
from regulations.generator.html_builder import HTMLBuilder
from regulations.generator.node_types import EMPTYPART, REGTEXT, label_to_text
from regulations.views import navigation, utils
def generate_html(regulation_tree, layer_appliers):
builder = HTMLBuilder(*layer_appliers)
builder.tree = regulation_tree
builder.generate_html()
return builder
class PartialView(TemplateView):
"""Base class of various partial markup views. sectional_links indicates
whether this view should use section links (url to a path) or just hash
links (to an anchor on the page)"""
sectional_links = True
def determine_appliers(self, label_id, version):
"""Figure out which layers to apply by checking the GET args"""
if 'layers' in self.request.GET.keys():
return utils.handle_specified_layers(
self.request.GET['layers'], label_id, version,
self.__class__.sectional_links)
else:
layer_creator = generator.LayerCreator()
layer_creator.add_layers(
generator.LayerCreator.LAYERS.keys(),
label_id, version, self.__class__.sectional_links)
return layer_creator.get_appliers()
def get_context_data(self, **kwargs):
context = super(PartialView, self).get_context_data(**kwargs)
label_id = context['label_id']
version = context['version']
tree = generator.get_tree_paragraph(label_id, version)
if tree is None:
raise Http404
inline_applier, p_applier, s_applier = self.determine_appliers(
label_id, version)
builder = generate_html(tree, (inline_applier, p_applier, s_applier))
return self.transform_context(context, builder)
class PartialSectionView(PartialView):
""" Single section of reg text """
template_name = 'regulations/regulation-content.html'
def section_navigation(self, label, version):
nav_sections = navigation.nav_sections(label, version)
if nav_sections:
p_sect, n_sect = nav_sections
nav = {'previous': p_sect, 'next': n_sect}
return nav
def transform_context(self, context, builder):
child_of_root = builder.tree
# Add a layer to account for subpart if this is regtext
if builder.tree['node_type'] == REGTEXT:
child_of_root = {
'node_type': EMPTYPART,
'children': [builder.tree]}
context['markup_page_type'] = 'reg-section'
context['tree'] = {'children': [child_of_root]}
context['navigation'] = self.section_navigation(
context['label_id'], context['version'])
return context
class PartialParagraphView(PartialSectionView):
""" Single paragraph of a regtext """
def transform_context(self, context, builder):
node = builder.tree
# Wrap with layers until we reach a section
while len(node['label']) > 2:
node = {'node_type': node['node_type'],
'children': [node],
'label': node['label'][:-1]}
# added to give the proper parent container ID
# when interp headers are rendered
node['markup_id'] = context['label_id']
# One more layer for regtext
if node['node_type'] == REGTEXT:
node = {'node_type': EMPTYPART,
'children': [node],
'label': node['label'][:1] + ['Subpart']}
context['markup_page_type'] = 'reg-section'
context['tree'] = {'children': [node], 'label': node['label'][:1],
'node_type': REGTEXT}
context['navigation'] = self.section_navigation(
context['label_id'], context['version'])
return context
class PartialDefinitionView(PartialView):
""" Single paragraph of a regtext formatted for display
as an inline interpretation """
template_name = "regulations/partial-definition.html"
def transform_context(self, context, builder):
context['node'] = builder.tree
context['formatted_label'] = label_to_text(
builder.tree['label'], True, True)
context['node']['section_id'] = '%s-%s' % (
builder.tree['label'][0], builder.tree['label'][1])
return context
class PartialRegulationView(PartialView):
""" Entire regulation without chrome """
template_name = 'regulations/regulation-content.html'
sectional_links = False
def transform_context(self, context, builder):
context['tree'] = builder.tree
return context
| 35.822222 | 78 | 0.642473 | [
"CC0-1.0"
] | DalavanCloud/regulations-site | regulations/views/partial.py | 4,836 | Python |
# -*- coding: utf-8 -*-
import unittest
from pathlib import Path
from knipse.db import KnipseDB
from knipse.scan import scan_images
from knipse.lists import image_id_from_string
from .test_walk import EXPECTED_IMAGES
class TestKnipseDatabase(unittest.TestCase):
def setUp(self) -> None:
self.src = Path(__file__).resolve().parent / 'images' / 'various'
self.db = KnipseDB(':memory:')
def test_getting_image_id(self) -> None:
cnt = 0
for file_path, progress in scan_images(self.db, self.src,
skip_thumbnail_folders=True):
cnt += 1
self.assertEqual(len(EXPECTED_IMAGES), cnt)
recgn = self.db.get_recognizer()
image_id = image_id_from_string(str(self.src / 'img_0002.jpg'),
self.src, recgn)
self.assertEqual(1, image_id)
image_id = image_id_from_string('I002', self.src, recgn)
self.assertEqual(2, image_id)
| 33.266667 | 76 | 0.624248 | [
"MIT"
] | luphord/knipse_old | tests/test_lists.py | 998 | Python |
from django.db import models
class Produto(models.Model):
descricao = models.CharField(max_length=30, null=False, blank='False')
preco = models.DecimalField(max_digits=5, decimal_places=2)
estoque = models.IntegerField()
def __str__(self):
return self.descricao + ' ' + str(self.preco)
| 28.454545 | 74 | 0.70607 | [
"MIT"
] | evaristofm/gestao-produtos | produtos/models.py | 313 | Python |
from .api.accounts import Accounts
from .api.classifications import Classifications
from .api.departments import Departments
from .api.currencies import Currencies
from .api.locations import Locations
from .api.vendor_bills import VendorBills
from .api.vendors import Vendors
from .internal.client import NetSuiteClient
class NetSuiteConnection:
def __init__(self, account, consumer_key, consumer_secret, token_key, token_secret, host, db):
ns_client = NetSuiteClient(host=host, db=db, account=account)
ns_client.connect_tba(consumer_key=consumer_key, consumer_secret=consumer_secret, token_key=token_key, token_secret=token_secret)
self.accounts = Accounts(ns_client)
self.classifications = Classifications(ns_client)
self.departments = Departments(ns_client)
self.currencies = Currencies(ns_client)
self.locations = Locations(ns_client)
self.vendor_bills = VendorBills(ns_client)
self.vendors = Vendors(ns_client)
| 47.285714 | 137 | 0.775428 | [
"MIT"
] | Gzing/netsuite-sdk-py | netsuitesdk/connection.py | 993 | Python |
import user
import db
if __name__ == "__main__":
# Initializes the database if it doesn't already exist
engine = db.open_db('maintenance.db')
db.create_tables(engine)
# TODO: Make this selectable with arrow keys
while True:
print('\nSelect an option:\n1. View Service History\n2. Add a Car\n3. Add a Service\n4. Exit')
userInput = input()
if userInput[0] == '1': user.view_services(engine)
elif userInput[0] == '2': user.insert_car(engine)
elif userInput[0] == '3': user.insert_service(engine)
elif userInput[0] == '4': break | 33.294118 | 98 | 0.680212 | [
"MIT"
] | tshea113/maintenance-minder | src/main.py | 566 | Python |
#!/usr/bin/env python
import logging
from typing import (
Optional,
Dict,
List, Any)
from hummingbot.core.data_type.order_book import OrderBook
from sqlalchemy.engine import RowProxy
import hummingbot.connector.exchange.binarz.binarz_constants as constants
from hummingbot.connector.exchange.binarz.binarz_order_book_message import BinarzOrderBookMessage
from hummingbot.connector.exchange.binarz.binarz_websocket import BinarzTrade
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage, OrderBookMessageType
)
from hummingbot.logger import HummingbotLogger
_logger = None
class BinarzOrderMatched:
def __init__(self):
pass
class BinarzOrderBook(OrderBook):
@classmethod
def logger(cls) -> HummingbotLogger:
global _logger
if _logger is None:
_logger = logging.getLogger(__name__)
return _logger
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: float,
*args, **kwargs):
"""
Convert json snapshot data into standard OrderBookMessage format
:param msg: json snapshot data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: BinarzOrderBookMessage
"""
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=msg,
timestamp=timestamp,
*args, **kwargs)
@classmethod
def snapshot_message_from_db(cls, record: RowProxy):
"""
*used for backtesting
Convert a row of snapshot data into standard OrderBookMessage format
:param record: a row of snapshot data from the database
:return: BinarzBookMessage
"""
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def diff_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: Optional[float] = None):
"""
Convert json diff data into standard OrderBookMessage format
:param msg: json diff data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: BinarzOrderBookMessage
"""
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp
)
@classmethod
def diff_message_from_db(cls, record: RowProxy):
"""
*used for backtesting
Convert a row of diff data into standard OrderBookMessage format
:param record: a row of diff data from the database
:return: BinarzBookMessage
"""
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def trade_message_from_exchange(cls,
msg: BinarzTrade,
timestamp: Optional[float] = None,
):
"""
Convert a trade data into standard OrderBookMessage format
"""
msg = {
"exchange_order_id": msg.order_id,
"trade_type": msg.type,
"price": msg.price,
"amount": msg.amount,
}
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=msg,
timestamp=timestamp
)
@classmethod
def trade_message_from_db(cls, record: RowProxy, metadata: Optional[Dict] = None):
"""
*used for backtesting
Convert a row of trade data into standard OrderBookMessage format
:param record: a row of trade data from the database
:return: BinarzOrderBookMessage
"""
return BinarzOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=record.json,
timestamp=record.timestamp
)
@classmethod
def from_snapshot(cls, snapshot: OrderBookMessage):
raise NotImplementedError(constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
@classmethod
def restore_from_snapshot_and_diffs(self, snapshot: OrderBookMessage, diffs: List[OrderBookMessage]):
raise NotImplementedError(constants.EXCHANGE_NAME + " order book needs to retain individual order data.")
| 33.446809 | 113 | 0.628075 | [
"Apache-2.0"
] | amirhosein-fasihi/hummingbot | hummingbot/connector/exchange/binarz/binarz_order_book.py | 4,716 | Python |
import pygame
class TextSprite(pygame.sprite.Sprite):
"""Subclass of sprite to draw text to the screen"""
def __init__(self, position, text_lines, font, fg=(0, 0, 0), bg=None,
border_width=0, border_color=(0, 0, 0),
bold=False, italic=False, underline=False,
line_spacing=3, padding=5):
pygame.sprite.Sprite.__init__(self)
self.position = position
self.font = font
self.fg = fg
self.bg = bg
self.border_width = border_width
self.border_color = border_color
self.line_spacing = line_spacing
self.padding = padding
self.font.set_bold(bold)
self.font.set_italic(italic)
self.font.set_underline(underline)
self.rect = None
self.image = None
self.text_lines = text_lines
self.update()
def update(self):
""""""
# Render all lines of text
text_images = [self.font.render(t, False, self.fg, self.bg) for t in self.text_lines]
# Find the largest width line of text
max_width = max(text_images, key=lambda x: x.get_width()).get_width()
# Produce an image to hold all of the text strings
self.image = pygame.Surface(
(max_width + 2 * (self.border_width + self.padding),
text_images[0].get_height() * len(text_images) + self.line_spacing * (len(text_images) - 1) + 2 * (
self.border_width + self.padding)
)
)
self.image.fill(self.bg)
if self.border_width > 0:
pygame.draw.rect(self.image, self.border_color,
(0, 0, self.image.get_width(), self.image.get_height()), self.border_width)
for n, t in enumerate(text_images):
self.image.blit(t, (self.border_width + self.padding,
self.border_width + self.padding + (self.line_spacing + t.get_height()) * n))
# Store the last rect so if the new one is smaller we can update those bits of the screen too
last_rect = self.rect
self.rect = pygame.Rect(self.position[0], self.position[1], self.image.get_width(), self.image.get_height())
if last_rect is None:
return self.rect
else:
return last_rect.union(self.rect)
| 36.625 | 116 | 0.590444 | [
"MIT"
] | bashkirtsevich/pytile | text_sprite.py | 2,344 | Python |
#!/usr/bin/env python
u"""
combine_kernels.py
by Yara Mohajerani
Combine the sensitivity kernels of the sum of the 'fixed points'
and produce netcdf and png outputs
Last Update 12/2020
"""
#-- load required modules
import os
import sys
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
#-- also import gravity toolkit modules
from gravity_toolkit.ncdf_write import ncdf_write
from gravity_toolkit.ncdf_read import ncdf_read
#------------------------------------------------------------------------------
#-- create sensitivity kernels for given voronoi harmonics
#------------------------------------------------------------------------------
def combine_kernels(parameters):
DDEG_RASTER = float(parameters['DDEG_RASTER'])
#-- read harmonic parameters
LMAX = int(parameters['LMAX'])
#-- get output directory
ddir = os.path.expanduser(parameters['DIRECTORY'])
#-- smoothing radius
RAD = int(parameters['RAD'])
#-- ocn redistribution label
OCN = '_OCN' if parameters['MASCON_OCEAN'] in ['Y','y'] else ''
#-- load mascon configuration of interest
mascon_nums = np.array(parameters['MSCN_NUMS'].split(','),dtype=int)
mascon_name = parameters['MSCN_NAME']
out_lbl = '{0}_{1}'.format(mascon_name,parameters['MSCN_NUMS'].replace(',','+'))
#----------------------------------------------------------------------
#-- Read and sum up kernels corresponding to fixed points
#----------------------------------------------------------------------
kerns = {}
for i in mascon_nums:
#- read the netcdf files
kern_file = os.path.join(ddir,'MASCON_{0:d}_YLMS_{1:.2f}DEG_SKERNEL{2}_L{3:02d}_r{4:d}km.nc'.format(i,DDEG_RASTER,OCN,LMAX,RAD))
kerns[i] = ncdf_read(kern_file,DATE=False)
#-- sum up the kernels
kern_sum = kerns[mascon_nums[0]]['data']
for i in mascon_nums[1:]:
kern_sum += kerns[i]['data']
#-- get grid for saving combined sensitivity kernel
glat = kerns[mascon_nums[0]]['lat']
glon = kerns[mascon_nums[0]]['lon']
#----------------------------------------------------------------------
#-- write kernel sum to file
#----------------------------------------------------------------------
outfile = os.path.join(ddir,'MASCON_{0}_YLMS_{1:.2f}DEG_SKERNEL_OCN_L{2:02d}_r{3:d}km.nc'.format(out_lbl,DDEG_RASTER,LMAX,RAD))
ncdf_write(kern_sum,glon,glat,0,FILENAME=outfile,DATE=False,UNITS='unitless',LONGNAME='Sensitivity_Kernel')
#----------------------------------------------------------------------
#-- plot summed kernel
#----------------------------------------------------------------------
#-- load in world map for plotting in background
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
#-- plot summed kernel
fig, ax = plt.subplots(1,figsize = (10,6),dpi=100)
klim = np.max(np.abs(kern_sum))*0.95
c = ax.contourf(glon,glat,kern_sum,cmap='bwr',levels=np.linspace(-klim,klim,16))
#-- use an axis divider for the colorbar
drx = make_axes_locatable(ax)
cax = drx.append_axes("right", size="5%", pad=0.1)
cbar = fig.colorbar(c,cax=cax)
cbar.set_label('Sensitivity Kernel (min:{0:.1f}, max:{1:.1f})'.format(np.min(kern_sum),np.max(kern_sum)))
world.plot(ax=ax,alpha=0.3,fc='none',ec='k',linewidth=1.2,rasterized=True)
plt.tight_layout()
plt.savefig(outfile.replace('.nc','.png'),format='PNG')
plt.close(fig=fig)
#------------------------------------------------------------------------------
#-- main function
#------------------------------------------------------------------------------
def main():
if len(sys.argv) == 1:
sys.exit('No paramter file given')
else:
#-- read input files
input_files = sys.argv[1:]
parameters = {}
for infile in input_files:
#-- for each paramter file, extract parameters
fid = open(infile, 'r')
for fileline in fid:
part = fileline.split()
parameters[part[0]] = part[1]
fid.close()
#-- feed parameters to function to combine and plot kernels
combine_kernels(parameters)
#------------------------------------------------------------------------------
#-- run main program
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | 39.707547 | 130 | 0.562842 | [
"MIT"
] | yaramohajerani/dynamic_mascons | combine_kernels.py | 4,209 | Python |
# coding: utf8
"""
Delphi Decision Maker - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# =============================================================================
def index():
"""
Module Home Page
- provide the list of currently-Active Problems
"""
# Simply redirect to the Problems REST controller
redirect(URL(f="problem"))
# Alternative dashboard
module_name = settings.modules[module].name_nice
table = s3db.delphi_group
groups = db(table.active == True).select()
result = []
for group in groups:
actions = []
duser = s3db.delphi_DelphiUser(group)
if duser.authorised:
actions.append(("group/%d/update" % group.id, T("Edit")))
actions.append(("new_problem/create/?group=%s&next=%s" % \
(group.id,
URL(f="group_summary", args=group.id)),
"Add New Problem"))
actions.append(("group_summary/%s/#request" % group.id, T("Review Requests")))
else:
actions.append(("group_summary/%s/#request" % group.id,
"Role: %s%s" % (duser.status,
(duser.membership and duser.membership.req) and "*" or "")))
table = s3db.delphi_problem
query = (table.group_id == group.id) & \
(table.active == True)
latest_problems = db(query).select(orderby =~ table.modified_on)
result.append((group, latest_problems, actions))
response.title = module_name
return dict(groups_problems = result,
name = T("Active Problems"),
module_name = module_name,
)
# =============================================================================
# Groups
# =============================================================================
def group_rheader(r, tabs = []):
""" Group rheader """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
tabs = [(T("Basic Details"), None),
(T("Problems"), "problem"),
]
group = r.record
# Get this User's permissions for this Group
duser = s3db.delphi_DelphiUser(group.id)
if duser.authorised:
tabs.append((T("Membership"), "membership"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(
TABLE(
TR(TH("%s: " % T("Group")),
group.name,
),
TR(TH("%s: " % T("Description")),
group.description,
),
TR(TH("%s: " % T("Active")),
group.active,
),
),
rheader_tabs
)
return rheader
# -----------------------------------------------------------------------------
def group():
""" Problem Group REST Controller """
if not s3_has_role("DelphiAdmin"):
s3db.configure("delphi_group",
deletable=False,
# Remove ability to create new Groups
#insertable=False
)
def prep(r):
if r.interactive:
if r.component:
tablename = r.component.tablename
list_fields = s3db.get_config(tablename,
"list_fields")
try:
list_fields.remove("group_id")
except:
pass
s3db.configure(tablename,
deletable = s3_has_role("DelphiAdmin"),
list_fields = list_fields)
return True
s3.prep = prep
rheader = group_rheader
return s3_rest_controller(rheader = rheader,
# Allow components with components (such as problem) to breakout from tabs
native = True,
)
# =============================================================================
# Problems
# =============================================================================
def problem_rheader(r, tabs = []):
""" Problem rheader """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
problem = r.record
tabs = [# Components & Custom Methods
(T("Problems"), "problems"),
(T("Solutions"), "solution"),
(T("Discuss"), "discuss"),
(T("Vote"), "vote"),
(T("Scale of Results"), "results"),
]
# Get this User's permissions for this Group
duser = s3db.delphi_DelphiUser(problem.group_id)
if duser.authorised:
tabs.append((T("Edit"), None))
rheader_tabs = s3_rheader_tabs(r, tabs)
rtable = TABLE(TR(TH("%s: " % T("Problem")),
problem.name,
TH("%s: " % T("Active")),
problem.active,
),
TR(TH("%s: " % T("Description")),
problem.description,
),
TR(TH("%s: " % T("Criteria")),
problem.criteria,
),
)
if r.component and \
r.component_name == "solution" and \
r.component_id:
stable = s3db.delphi_solution
query = (stable.id == r.component_id)
solution = db(query).select(stable.name,
stable.description,
limitby=(0, 1)).first()
rtable.append(DIV(TR(TH("%s: " % T("Solution")),
solution.name,
),
TR(TH("%s: " % T("Description")),
solution.description,
),
))
rheader = DIV(rtable,
rheader_tabs)
return rheader
# -----------------------------------------------------------------------------
def problem():
""" Problem REST Controller """
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# Custom Methods
set_method = s3db.set_method
set_method(module, resourcename,
method="problems",
action=problems)
set_method(module, resourcename,
method="discuss",
action=discuss)
# Discussion can also be done at the Solution component level
set_method(module, resourcename,
component_name="solution",
method="discuss",
action=discuss)
set_method(module, resourcename,
method="vote",
action=vote)
set_method(module, resourcename,
method="results",
action=results)
# Filter to just Active Problems
s3.filter = (table.active == True)
if not s3_has_role("DelphiAdmin"):
s3db.configure(tablename,
deletable = False,
# Remove ability to create new Problems
#insertable = False
)
def prep(r):
if r.interactive:
if r.record:
duser = s3db.delphi_DelphiUser(r.record.group_id)
if duser.authorised:
s3db.configure(tablename,
deletable = True,
)
if r.component_name == "solution":
r.component.table.modified_on.label = T("Last Updated")
s3db.configure(r.component.tablename,
deletable = duser.authorised,
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
s3.actions = [
dict(label=str(T("Solutions")),
_class="action-btn",
url=URL(args=["[id]", "solution"])),
dict(label=str(T("Vote")),
_class="action-btn",
url=URL(args=["[id]", "vote"])),
]
elif r.component_name == "solution":
s3.actions = [
dict(label=str(T("Discuss")),
_class="action-btn",
url=URL(args=[r.id, "solution", "[id]", "discuss"])),
]
return output
s3.postp = postp
rheader = problem_rheader
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def problems(r, **attr):
"""
Redirect to the list of Problems for the Group
- used for a Tab
"""
try:
group_id = r.record.group_id
except:
raise HTTP(400)
else:
redirect(URL(f="group", args=[group_id, "problem"]))
# -----------------------------------------------------------------------------
def solution():
"""
Used for Imports
"""
return s3_rest_controller()
# =============================================================================
# Voting
# =============================================================================
def vote(r, **attr):
"""
Custom Method to allow Voting on Solutions to a Problem
"""
problem = r.record
# Get this User's permissions for this Group
duser = s3db.delphi_DelphiUser(problem.group_id)
# Add the RHeader to maintain consistency with the other pages
rheader = problem_rheader(r)
# Lookup Solution Options
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
rows = db(query).select(stable.id,
stable.name)
options = Storage()
for row in rows:
options[row.id] = row.name
if duser.user_id:
vtable = s3db.delphi_vote
query = (vtable.problem_id == problem.id) & \
(vtable.created_by == auth.user.id)
votes = db(query).select(vtable.solution_id,
orderby = vtable.rank)
else:
votes = []
rankings = OrderedDict()
for v in votes:
# Add to the list of ranked options
rankings[v.solution_id] = options[v.solution_id]
# Remove from the unranked options
options.pop(v.solution_id)
# Add Custom CSS from Static (cacheable)
s3.stylesheets.append("S3/delphi.css")
# Add Custom Javascript
# Settings to be picked up by Static code
js = "".join((
'''var problem_id=''', str(problem.id), '''
i18n.delphi_failed="''', str(T("Failed!")), '''"
i18n.delphi_saving="''', str(T("Saving...")), '''"
i18n.delphi_saved="''', str(T("Saved.")), '''"
i18n.delphi_vote="''', str(T("Save Vote")), '''"'''))
s3.js_global.append(js)
# Static code which can be cached
s3.scripts.append(URL(c="static", f="scripts",
args=["S3", "s3.delphi.js"]))
response.view = "delphi/vote.html"
return dict(rheader = rheader,
duser = duser,
votes = votes,
options = options,
rankings = rankings,
)
# -----------------------------------------------------------------------------
def save_vote():
"""
Function accessed by AJAX from vote() to save the results of a Vote
"""
try:
problem_id = request.args[0]
except:
raise HTTP(400)
ptable = s3db.delphi_problem
query = (ptable.id == problem_id)
problem = db(query).select(ptable.group_id,
limitby=(0, 1)).first()
if not problem:
raise HTTP(404)
# Get this User's permissions for this Group
duser = s3db.delphi_DelphiUser(problem.group_id)
if not duser.can_vote:
auth.permission.fail()
# Decode the data
try:
rankings = request.post_vars.keys()[0].split(",")
except IndexError:
status = current.xml.json_message(False, 400, "No Options Ranked")
raise HTTP(400, body=status)
# Check the votes are valid
stable = s3db.delphi_solution
query = (stable.problem_id == problem_id)
solutions = db(query).select(stable.id)
options = []
for row in solutions:
options.append(row.id)
for ranked in rankings:
if int(ranked) not in options:
status = current.xml.json_message(False, 400, "Option isn't valid!")
raise HTTP(400, body=status)
# Convert to a format suitable for comparisons
votes = []
count = 1
for ranked in rankings:
votes.append(Storage(solution_id=int(ranked), rank=count))
count += 1
# Read the old votes
vtable = s3db.delphi_vote
query = (vtable.problem_id == problem_id) & \
(vtable.created_by == auth.user.id)
old_votes = db(query).select(vtable.solution_id,
vtable.rank)
if old_votes:
# Calculate changes
ranks = {}
old_ranks = {}
used = []
for solution in solutions:
s1 = solution.id
ranks[s1] = 0
old_ranks[s1] = 0
for vote in votes:
if vote.solution_id == s1:
ranks[s1] = vote.rank
continue
for vote in old_votes:
if vote.solution_id == s1:
old_ranks[s1] = vote.rank
continue
for sol_2 in solutions:
changed = False
s2 = sol_2.id
if s2 == s1:
continue
if (s2, s1) in used:
# We've already evaluated this pair
continue
ranks[s2] = 0
old_ranks[s2] = 0
for vote in votes:
if vote.solution_id == s2:
ranks[s2] = vote.rank
continue
for vote in old_votes:
if vote.solution_id == s2:
old_ranks[s2] = vote.rank
continue
if (ranks[s1] > ranks[s2]) and \
(old_ranks[s1] < old_ranks[s2]):
changed = True
elif (ranks[s1] < ranks[s2]) and \
(old_ranks[s1] > old_ranks[s2]):
changed = True
elif (ranks[s1] == ranks[s2]) and \
(old_ranks[s1] != old_ranks[s2]):
changed = True
elif (ranks[s1] != ranks[s2]) and \
(old_ranks[s1] == old_ranks[s2]):
changed = True
if changed:
# This pair has changed places, so update Solution
db(stable.id.belongs((s1, s2))).update(changes=stable.changes + 1)
used.append((s1, s2))
# Clear the old votes
db(query).delete()
# Save the new votes
count = 1
for ranked in rankings:
vtable.insert(problem_id=problem_id, solution_id=ranked, rank=count)
count += 1
status = current.xml.json_message(True, 200, "Vote saved")
return status
# -----------------------------------------------------------------------------
def _getUnitNormalDeviation(zscore):
"""
Utility function used by Scale of Results
Looks up the Unit Normal Deviation based on the Z-Score (Proportion/Probability)
http://en.wikipedia.org/wiki/Standard_normal_table
@ToDo: Move to S3Statistics module
"""
UNIT_NORMAL = (
( 0.0, .0, .01, .02, .03, .04, .05, .06, .07, .08, .09 ),
( .0, .5000, .5040, .5080, .5120, .5160, .5199, .5239, .5279, .5319, .5359 ),
( .1, .5398, .5438, .5478, .5517, .5557, .5596, .5636, .5675, .5714, .5753 ),
( .2, .5793, .5832, .5871, .5910, .5948, .5987, .6026, .6064, .6103, .6141 ),
( .3, .6179, .6217, .6255, .6293, .6331, .6368, .6406, .6443, .6480, .6517 ),
( .4, .6554, .6591, .6628, .6664, .6700, .6736, .6772, .6808, .6844, .6879 ),
( .5, .6915, .6950, .6985, .7019, .7054, .7088, .7123, .7157, .7190, .7224 ),
( .6, .7257, .7291, .7324, .7357, .7389, .7422, .7454, .7486, .7517, .7549 ),
( .7, .7580, .7611, .7642, .7673, .7703, .7734, .7764, .7794, .7823, .7852 ),
( .8, .7881, .7910, .7939, .7967, .7995, .8023, .8051, .8078, .8106, .8133 ),
( .9, .8159, .8186, .8212, .8238, .8264, .8289, .8315, .8340, .8365, .8389 ),
( 1.0, .8415, .8438, .8461, .8485, .8508, .8531, .8554, .8577, .8509, .8621 ),
( 1.1, .8643, .8665, .8686, .8708, .8729, .8749, .8770, .8790, .8810, .8830 ),
( 1.2, .8849, .8869, .8888, .8907, .8925, .8944, .8962, .8980, .8997, .90147 ),
( 1.3, .90320, .90490, .90658, .90824, .90988, .91149, .91309, .91466, .91621, .91774 ),
( 1.4, .91924, .92073, .92220, .92364, .92507, .92647, .92785, .92922, .93056, .93189 ),
( 1.5, .93319, .93448, .93574, .93699, .93822, .93943, .94062, .94179, .94295, .94408 ),
( 1.6, .94520, .94630, .94738, .94845, .94950, .95053, .95154, .95254, .95352, .95449 ),
( 1.7, .95543, .95637, .95728, .95818, .95907, .95994, .96080, .96164, .96246, .96327 ),
( 1.8, .96407, .96485, .96562, .96638, .96712, .96784, .97856, .96926, .96995, .97062 ),
( 1.9, .97128, .97193, .97257, .97320, .97381, .97441, .97500, .97558, .97615, .97670 ),
( 2.0, .97725, .97778, .97831, .97882, .97932, .97982, .98030, .98077, .98124, .98169 ),
( 2.1, .98214, .98257, .98300, .98341, .98382, .98422, .98461, .98500, .98537, .98574 ),
( 2.2, .98610, .98645, .98679, .98713, .98745, .98778, .98809, .98840, .98870, .98899 ),
( 2.3, .98928, .98956, .98983, .990097, .990358, .990613, .990863, .991106, .991344, .991576 ),
( 2.4, .991802, .992024, .992240, .992451, .992656, .992857, .993053, .993244, .993431, .993613 ),
( 2.5, .993790, .993963, .994132, .994297, .994457, .994614, .994766, .994915, .995060, .995201 ),
( 2.6, .995339, .995473, .995604, .995731, .995855, .995975, .996093, .996207, .996319, .996427 ),
( 2.7, .996533, .996636, .996736, .996833, .996928, .997020, .997110, .997197, .997282, .997365 ),
( 2.8, .997445, .997523, .997599, .997673, .997744, .997814, .997882, .997948, .998012, .998074 ),
( 2.9, .998134, .998193, .998250, .998305, .998359, .998411, .998460, .998511, .998559, .998605 ),
( 3.0, .998650, .998694, .998736, .998777, .998817, .998856, .998893, .998930, .998965, .998999 ),
( 3.1, .9990324, .9990646, .9990957, .9991260, .9991553, .9991836, .9992112, .9992378, .9992636, .9992886 ),
( 3.2, .9993129, .9993363, .9993590, .9993810, .9994024, .9994230, .9994429, .9994623, .9994810, .9994991 ),
( 3.3, .9995166, .9995335, .9995499, .9995658, .9995811, .9995959, .9996103, .9996242, .9996376, .9996505 ),
( 3.4, .9996631, .9996752, .9996869, .9996982, .9997091, .9997197, .9997299, .9997398, .9997493, .9997585 ),
( 3.5, .9997674, .9997759, .9997842, .9997922, .9997999, .9998074, .9998146, .9998215, .9998282, .9998347 ),
( 3.6, .9998409, .9998469, .9998527, .9998583, .9998637, .9998689, .9998739, .9998787, .9998834, .9998879 ),
( 3.7, .9998922, .9998964, .99990039, .99990426, .99990799, .99991158, .99991504, .99991838, .99992159, .99992468 ),
( 3.8, .99992765, .99993052, .99993327, .99993593, .99993848, .99994094, .99994331, .99994558, .99994777, .99994988 ),
( 3.9, .99995190, .99995385, .99995573, .99995753, .99995926, .99996092, .99996253, .99996406, .99996554, .99996696 ),
( 4.0, .99996833, .99996964, .99997090, .99997211, .99997327, .99997439, .99997546, .99997649, .99997748, .99997843 ),
( 4.1, .99997934, .99998022, .99998106, .99998186, .99998263, .99998338, .99998409, .99998477, .99998542, .99998605 ),
( 4.2, .99998665, .99998723, .99998778, .99998832, .99998882, .99998931, .99998978, .999990226, .999990655, .999991066 ),
( 4.3, .999991460, .999991837, .999992199, .999992545, .999992876, .999993193, .999993497, .999993788, .999994066, .999994332 ),
( 4.4, .999994587, .999994831, .999995065, .999995288, .999995502, .999995706, .999995902, .999996089, .999996268, .999996439 ),
( 4.5, .999996602, .999996759, .999996908, .999997051, .999997187, .999997318, .999997442, .999997561, .999997675, .999997784 ),
( 4.6, .999997888, .999997987, .999998081, .999998172, .999998258, .999998340, .999998419, .999998494, .999998566, .999998634 ),
( 4.7, .999998699, .999998761, .999998821, .999998877, .999998931, .999998983, .9999990320, .9999990789, .9999991235, .9999991661 ),
( 4.8, .9999992067, .9999992453, .9999992822, .9999993173, .9999993508, .9999993827, .9999994131, .9999994420, .9999994696, .9999994958 ),
( 4.9, .9999995208, .9999995446, .9999995673, .9999995889, .9999996094, .9999996289, .9999996475, .9999996652, .9999996821, .9999996981 )
)
# Assume indifference
unitDeviation = 0.0
for j in range(1, 50):
if zscore == UNIT_NORMAL[j][1]:
unitDeviation = UNIT_NORMAL[j][0]
elif (UNIT_NORMAL[j][1] < zscore) and (zscore < UNIT_NORMAL[j + 1][1]):
for i in range(2, 10):
if (UNIT_NORMAL[j][i - 1] < zscore) and (zscore <= UNIT_NORMAL[j][i]):
unitDeviation = UNIT_NORMAL[j][0] + UNIT_NORMAL[0][i]
if zscore > UNIT_NORMAL[j][10]:
unitDeviation = UNIT_NORMAL[j + 1][0]
if zscore > UNIT_NORMAL[50][10]:
# maximum value
unitDeviation = 5.0
return unitDeviation
# -----------------------------------------------------------------------------
def online_variance(data):
"""
A numerically stable algorithm for calculating variance
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
"""
n = 0
mean = 0
M2 = 0
for x in data:
n = n + 1
delta = x - mean
mean = mean + delta/n
M2 = M2 + delta*(x - mean)
variance_n = M2/n
variance = M2/(n - 1)
return (variance, variance_n)
# -----------------------------------------------------------------------------
def results(r, **attr):
"""
Custom Method to show the Scale of Results
"""
def NBSP():
return XML(" ")
# Add the RHeader to maintain consistency with the other pages
rheader = problem_rheader(r)
response.view = "delphi/results.html"
empty = dict(rheader=rheader,
num_voted=0,
chart="",
table_color="",
grids="",
summary=""
)
problem = r.record
# Lookup Votes
if problem:
vtable = s3db.delphi_vote
query = (vtable.problem_id == problem.id)
votes = db(query).select(vtable.solution_id,
vtable.rank,
vtable.created_by)
else:
votes = None
if not votes:
return empty
# Lookup Solutions
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
solutions = db(query).select(stable.id,
stable.name,
stable.problem_id, # Needed for Votes virtual field
stable.changes)
if not solutions:
return empty
# Initialise arrays of pairwise comparisons
arrayF = {}
arrayP = {}
arrayX = {}
arrayP2 = {}
arrayU = {}
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if s1 == s2:
arrayF[(s1, s2)] = None
arrayP[(s1, s2)] = None
arrayX[(s1, s2)] = None
arrayP2[(s1, s2)] = None
arrayU[(s1, s2)] = None
continue
arrayF[(s1, s2)] = 0
# Allow new solutions to start at an indifferent probability
arrayP[(s1, s2)] = 0.5
arrayX[(s1, s2)] = 0
arrayP2[(s1, s2)] = 0.5
arrayU[(s1, s2)] = 0.5
# List of Voters
voters = []
for vote in votes:
voter = vote.created_by
if voter not in voters:
voters.append(voter)
num_voted = len(voters)
# Update array of pairwise comparisons based on votes
# Create array F which is the number of time a solution has been preferred compared to it'a partner
for voter in voters:
ranks = {}
for vote in votes:
if vote.created_by != voter:
continue
ranks[vote.rank] = vote.solution_id
for rank_1 in range(1, len(ranks)):
for rank_2 in range(rank_1 + 1, len(ranks) + 1):
arrayF[(ranks[rank_1], ranks[rank_2])] += 1
grids = DIV()
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
# Preferred should be the columns
value = arrayF[(s2, s1)]
if value is None:
row.append(TD("-"))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows,
_class="delphi_wide")
output = DIV(H4(T("Array F: # times that solution in column is preferred over it's partner in row")),
output)
grids.append(output)
grids.append(NBSP())
# Use the pairwise comparisons to build a Dynamic Thurstone scale of results
# http://en.wikipedia.org/wiki/Thurstone_scale
# http://www.brocku.ca/MeadProject/Thurstone/Thurstone_1927a.html
# http://www.brocku.ca/MeadProject/Thurstone/Thurstone_1927f.html
# @ToDo: For incomplete data, the calculation is more complex: Gulliksen
# Convert array F to array P by converting totals to proportions
# Convert array P to array X, which is the unit normal deviate
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if s1 == s2:
continue
total = float(arrayF[(s1, s2)] + arrayF[(s2, s1)])
# Preferred should be the columns
if total:
proportion = arrayF[(s2, s1)] / total
else:
# No votes yet, so assume indifference
proportion = 0.5
arrayP[(s2, s1)] = proportion
# Cannot do unit normal deviation for 0/1 so approximate in order to not have to do the incomplete data maths
if proportion == 0.0:
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif proportion == 1.0:
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayX[(s2, s1)] = _getUnitNormalDeviation(proportion)
# Now calculate the uncertainty scale
# i.e. assume that every user who didn't vote on a particular pair drags that back towards indifference
novotes = num_voted - total
if proportion == 0.5:
pass
elif proportion > 0.5:
# Assume the novotes vote against
proportion = (arrayF[s2, s1] - novotes) / num_voted
else:
# Assume the novotes vote for
proportion = (arrayF[s2, s1] + novotes) / num_voted
arrayP2[(s2, s1)] = proportion
# Cannot do unit normal deviation for 0/1 so approximate in order to not have to do the incomplete data maths
if proportion == 0.0:
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif proportion == 1.0:
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayU[(s2, s1)] = _getUnitNormalDeviation(proportion)
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
# Preferred should be the columns
value = arrayP[(s2, s1)]
if value is None:
row.append(TD("-"))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows,
_class="delphi_wide")
output = DIV(H4(T("Array P: proportion of times that solution in column is preferred over it's partner in row, assuming that pairs not ranked start at the level of indifference (0.5)")),
output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH("Total"))
footer2 = TR(TH("Scale"))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
# Preferred should be the columns
value = arrayX[(s2, s1)]
if value is None:
row.append(TD("-"))
else:
row.append(TD(value))
if value is not None:
totals[s2] += value
counts[s2] += 1
rows.append(row)
# Least-squares estimate of the scale values
# Average of the columns
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.scale = totals[s1]/counts[s1]
footer2.append(TH(solution.scale))
else:
solution.scale = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2,
_class="delphi_wide")
output = DIV(H4(T("Array X: unit normal deviate")),
output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
# Preferred should be the columns
value = arrayP2[(s2, s1)]
if value is None:
row.append(TD("-"))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows,
_class="delphi_wide")
output = DIV(H4(T("Array P2: proportion of times that solution in column is preferred over it's partner in row, assuming that non-votes move towards indifference")),
output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH("Total"))
footer2 = TR(TH("Scale"))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
# Preferred should be the columns
value = arrayU[(s2, s1)]
if value is None:
row.append(TD("-"))
else:
row.append(TD(value))
if value is not None:
totals[s2] += value
counts[s2] += 1
rows.append(row)
# Least-squares estimate of the uncertainty values
# Average of the columns
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.uncertainty = totals[s1]/counts[s1]
footer2.append(TH(solution.uncertainty))
else:
solution.uncertainty = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2,
_class="delphi_wide")
output = DIV(H4(T("Array U: unit normal deviate of the uncertainty value (assuming that all unvoted items return the probability towards indifference)")),
output)
grids.append(output)
# Sort the Solutions by Scale
def scale(solution):
return float(solution.scale)
solutions = solutions.sort(scale, reverse=True)
n = len(solutions)
# @ToDo: deployment_setting
image = ""
if image:
# Canvas of 900x600
from s3chart import S3Chart
chart = S3Chart(9, 6)
fig = chart.fig
# Add Axes with padding of 10px for the labels (fractional left, bottom, width, height)
ax = fig.add_axes([0.35, 0.1, 0.6, 0.8])
problem = r.record
ax.set_title(problem.name)
labels = []
scales = []
uncertainties = []
for solution in solutions:
labels.append(solution.name)
scales.append(solution.scale)
uncertainties.append(solution.uncertainty)
from numpy import arange
ind = arange(n)
width = .35
ax.set_yticks(ind + width)
ax.set_yticklabels(labels)
labels = ax.get_yticklabels()
for label in labels:
label.set_size(8)
ax.set_xlabel("Scale") # rotation="vertical" or rotation = 45
ax.xaxis.grid(True)
rects1 = ax.barh(ind, scales, width, linewidth=0) # color="blue"
rects2 = ax.barh(ind + width, uncertainties, width, linewidth=0, color="red")
ax.legend( (rects1[0], rects2[0]), ("Scale", "Uncertainty") )
image = chart.draw()
# Colour the rows
# Calculate Breaks
classes = 5
q = []
qappend = q.append
for i in range(classes - 1):
qappend(1.0 / classes * (i + 1))
values = [float(solution.scale) for solution in solutions]
breaks = s3db.stats_quantile(values, q)
# Make mutable
breaks = list(breaks)
values_min = min(values)
values_max = max(values)
breaks.insert(0, values_min)
breaks.append(values_max)
# Apply colours
# 5-class BuGn from ColorBrewer.org
colours = ["edf8fb",
"b2e2e2",
"66c2a4",
"2ca25f",
"006d2c",
]
for solution in solutions:
for i in range(classes):
value = solution.scale
if value >= breaks[i] and \
value <= breaks[i + 1]:
solution.color = colours[i]
break
# A table showing overall rankings
thead = THEAD(
TR(
TH(T("Solution Item"), _rowspan="2"),
TH(T("Scale"), _rowspan="2"),
TH(T("Uncertainty"), _rowspan="2"),
TH(T("Activity Level"), _colspan="3"),
),
TR(
TH(T("Voted on")),
TH(T("Times Changed")),
TH(T("Comments")),
),
)
tbody = TBODY()
for solution in solutions:
rows = True
tbody.append(
TR(
TD(solution.name),
TD(solution.scale,
_class="taright"),
TD(solution.uncertainty,
_class="taright"),
TD(solution.votes(),
_class="tacenter"),
TD(solution.changes,
_class="tacenter"),
TD(solution.comments(),
_class="tacenter"),
_style="background:#%s" % solution.color
)
)
summary = TABLE(thead,
tbody,
_class="delphi_wide")
# Add Custom CSS from Static (cacheable)
s3.stylesheets.append("S3/delphi.css")
return dict(rheader=rheader,
num_voted=num_voted,
chart=image,
summary=summary,
grids=grids
)
# =============================================================================
# Discussions
# =============================================================================
def discuss(r, **attr):
""" Custom Method to manage the discussion of a Problem or Solution """
if r.component:
resourcename = "solution"
id = r.component_id
else:
resourcename = "problem"
id = r.id
# Add the RHeader to maintain consistency with the other pages
rheader = problem_rheader(r)
ckeditor = URL(c="static", f="ckeditor", args="ckeditor.js")
s3.scripts.append(ckeditor)
adapter = URL(c="static", f="ckeditor", args=["adapters",
"jquery.js"])
s3.scripts.append(adapter)
# Toolbar options: http://docs.cksource.com/CKEditor_3.x/Developers_Guide/Toolbar
js = "".join((
'''i18n.reply="''', str(T("Reply")), '''"
var img_path=S3.Ap.concat('/static/img/jCollapsible/')
var ck_config={toolbar:[['Bold','Italic','-','NumberedList','BulletedList','-','Link','Unlink','-','Smiley','-','Source','Maximize']],toolbarCanCollapse:false,removePlugins:'elementspath'}
function comment_reply(id){
$('#delphi_comment_solution_id__row').hide()
$('#delphi_comment_solution_id__row1').hide()
$('#comment-title').html(i18n.reply)
var ed = $('#delphi_comment_body').ckeditorGet()
ed.destroy()
$('#delphi_comment_body').ckeditor(ck_config)
$('#comment-form').insertAfter($('#comment-'+id))
$('#delphi_comment_parent').val(id)
var solution_id=$('#comment-'+id).attr('solution_id')
if(undefined!=solution_id){
$('#delphi_comment_solution_id').val(solution_id)
}
}'''))
s3.js_global.append(js)
response.view = "delphi/discuss.html"
return dict(rheader=rheader,
resourcename=resourcename,
id=id)
# -----------------------------------------------------------------------------
def comment_parse(comment, comments, solution_id=None):
"""
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: solution_id - a reference ID: optional solution commented on
"""
author = B(T("Anonymous"))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
if not solution_id and comment.solution_id:
solution = "re: %s" % s3db.delphi_solution_represent(comment.solution_id)
header = DIV(author, " ", solution)
solution_id = comment.solution_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by),
DIV(DIV(header,
_class="comment-header"),
DIV(XML(comment.body)),
_class="comment-text"),
DIV(DIV(comment.created_on,
_class="comment-date"),
DIV(A(T("Reply"),
_class="action-btn"),
_onclick="comment_reply(%i);" % comment.id,
_class="comment-reply"),
_class="fright"),
_id="comment-%i" % comment.id,
_solution_id=solution_id,
_class="comment-box"))
# Add the children of this thread
children = UL(_class="children")
id = comment.id
count = 0
for comment in comments:
if comment.parent == id:
count = 1
child = comment_parse(comment, comments, solution_id=solution_id)
children.append(child)
if count == 1:
thread.append(children)
return thread
# -----------------------------------------------------------------------------
def comments():
""" Function accessed by AJAX from discuss() to handle Comments """
try:
resourcename = request.args[0]
except:
raise HTTP(400)
try:
id = request.args[1]
except:
raise HTTP(400)
if resourcename == "problem":
problem_id = id
solution_id = None
elif resourcename == "solution":
stable = s3db.delphi_solution
query = (stable.id == id)
solution = db(query).select(stable.problem_id,
limitby=(0, 1)).first()
if solution:
problem_id = solution.problem_id
solution_id = id
else:
raise HTTP(400)
else:
raise HTTP(400)
table = s3db.delphi_comment
field = table.problem_id
field.default = problem_id
field.writable = field.readable = False
sfield = table.solution_id
if solution_id:
sfield.default = solution_id
sfield.writable = sfield.readable = False
else:
sfield.label = T("Related to Solution (optional)")
sfield.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "delphi_solution.id",
s3.delphi_solution_represent,
filterby="problem_id",
filter_opts=(problem_id,)
))
# Form to add a new Comment
from gluon.tools import Crud
form = Crud(db).create(table, formname="delphi_%s/%s" % (resourcename, id))
# List of existing Comments
if solution_id:
comments = db(sfield == solution_id).select(table.id,
table.parent,
table.body,
table.created_by,
table.created_on)
else:
comments = db(field == problem_id).select(table.id,
table.parent,
table.solution_id,
table.body,
table.created_by,
table.created_on)
output = UL(_id="comments")
for comment in comments:
if not comment.parent:
# Show top-level threads at top-level
thread = comment_parse(comment, comments, solution_id=solution_id)
output.append(thread)
# Also see the outer discuss()
script = \
'''$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})
$('#delphi_comment_parent__row1').hide()
$('#delphi_comment_parent__row').hide()
$('#delphi_comment_body').ckeditor(ck_config)
$('#submit_record__row input').click(function(){$('#comment-form').hide();$('#delphi_comment_body').ckeditorGet().destroy();return true;})'''
# No layout in this output!
#s3.jquery_ready.append(script)
output = DIV(output,
DIV(H4(T("New Post"),
_id="comment-title"),
form,
_id="comment-form",
_class="clear"),
SCRIPT(script))
return XML(output)
# END =========================================================================
| 36.84703 | 190 | 0.50308 | [
"MIT"
] | Code4SierraLeone/shdms | controllers/delphi.py | 45,285 | Python |
#Copyright (c) 2013 Marion Zepf
#Copyright (c) 2014 Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
""" type system for Primitives and their arguments """
import ast
from tablock import Media
from taconstants import (Color, ColorObj, CONSTANTS, Vector)
class Type(object):
""" A type in the type hierarchy. """
def __init__(self, constant_name, value):
""" constant_name -- the name of the constant that points to this Type
object
value -- an arbitrary integer that is different from the values of
all other Types. The order of the integers doesn't matter. """
self.constant_name = constant_name
self.value = value
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Type):
return False
return self.value == other.value
def __str__(self):
return str(self.constant_name)
__repr__ = __str__
class TypeDisjunction(tuple, Type):
""" Disjunction of two or more Types (from the type hierarchy) """
def __init__(self, iterable):
self = tuple(iterable)
def __str__(self):
s = ["("]
for disj in self:
s.append(str(disj))
s.append(" or ")
s.pop()
s.append(")")
return "".join(s)
# individual types
TYPE_OBJECT = Type('TYPE_OBJECT', 0)
TYPE_CHAR = Type('TYPE_CHAR', 1)
TYPE_COLOR = Type('TYPE_COLOR', 2)
TYPE_FLOAT = Type('TYPE_FLOAT', 3)
TYPE_INT = Type('TYPE_INT', 4)
TYPE_BOOL = Type('TYPE_BOOL', 5)
# shortcut to avoid a TypeDisjunction between TYPE_FLOAT and TYPE_INT
TYPE_NUMBER = Type('TYPE_NUMBER', 6)
TYPE_NUMERIC_STRING = Type('TYPE_NUMERIC_STRING', 7)
TYPE_BOX = Type('TYPE_BOX', 8) # special type for the unknown content of a box
TYPE_STRING = Type('TYPE_STRING', 9)
TYPE_MEDIA = Type('TYPE_MEDIA', 10)
# An array of numbers used by the food plugin et al.
TYPE_VECTOR = Type('TYPE_VECTOR', 11)
# groups/ classes of types
TYPES_NUMERIC = (TYPE_FLOAT, TYPE_INT, TYPE_NUMBER)
BOX_AST = ast.Name(id='BOX', ctx=ast.Load)
ACTION_AST = ast.Name(id='ACTION', ctx=ast.Load)
def get_type(x):
""" Return the most specific type in the type hierarchy that applies to x
and a boolean indicating whether x is an AST. If the type cannot be
determined, return TYPE_OBJECT as the type. """
# non-AST types
if isinstance(x, (int, long)):
return (TYPE_INT, False)
elif isinstance(x, float):
return (TYPE_FLOAT, False)
elif isinstance(x, basestring):
if len(x) == 1:
return (TYPE_CHAR, False)
try:
float(x)
except ValueError:
return (TYPE_STRING, False)
else:
return (TYPE_NUMERIC_STRING, False)
elif isinstance(x, Color):
return (TYPE_COLOR, False)
elif isinstance(x, Media):
return (TYPE_MEDIA, False)
elif isinstance(x, Vector):
return (TYPE_VECTOR, False)
elif hasattr(x, "return_type"):
return (x.return_type, False)
# AST types
elif isinstance(x, ast.Num):
return (get_type(x.n)[0], True)
elif isinstance(x, ast.Str):
return (get_type(x.s)[0], True)
elif isinstance(x, ast.Name):
try:
# we need to have imported CONSTANTS for this to work
value = eval(x.id)
except NameError:
return (TYPE_OBJECT, True)
else:
return (get_type(value)[0], True)
elif isinstance(x, ast.Subscript):
if x.value == BOX_AST:
return (TYPE_BOX, True)
elif isinstance(x, ast.Call):
if isinstance(x.func, ast.Name):
if x.func.id == 'float':
return (TYPE_FLOAT, True)
elif x.func.id in ('int', 'ord'):
return (TYPE_INT, True)
elif x.func.id == 'chr':
return (TYPE_CHAR, True)
elif x.func.id in ('repr', 'str', 'unicode'):
return (TYPE_STRING, True)
elif x.func.id == 'Color':
return (TYPE_COLOR, True)
elif x.func.id == 'Media':
return (TYPE_MEDIA, True)
# unary operands never change the type of their argument
elif isinstance(x, ast.UnaryOp):
if issubclass(x.op, ast.Not):
# 'not' always returns a boolean
return (TYPE_BOOL, True)
else:
return get_type(x.operand)
# boolean and comparison operators always return a boolean
if isinstance(x, (ast.BoolOp, ast.Compare)):
return (TYPE_BOOL, True)
# other binary operators
elif isinstance(x, ast.BinOp):
type_left = get_type(x.left)[0]
type_right = get_type(x.right)[0]
if type_left == TYPE_STRING or type_right == TYPE_STRING:
return (TYPE_STRING, True)
if type_left == type_right == TYPE_INT:
return (TYPE_INT, True)
else:
return (TYPE_FLOAT, True)
return (TYPE_OBJECT, isinstance(x, ast.AST))
def is_instancemethod(method):
# TODO how to access the type `instancemethod` directly?
return type(method).__name__ == "instancemethod"
def is_bound_method(method):
return ((is_instancemethod(method) and method.im_self is not None) or
(hasattr(method, '__self__') and method.__self__ is not None))
def is_staticmethod(method):
# TODO how to access the type `staticmethod` directly?
return type(method).__name__ == "staticmethod"
def identity(x):
return x
TYPE_CONVERTERS = {
# Type hierarchy: If there is a converter A -> B, then A is a subtype of B.
# The converter from A to B is stored under TYPE_CONVERTERS[A][B].
# The relation describing the type hierarchy must be transitive, i.e.
# converting A -> C must yield the same result as converting A -> B -> C.
# TYPE_OBJECT is the supertype of everything.
TYPE_BOX: {
TYPE_COLOR: ColorObj, # FIXME: should be Color.name
TYPE_VECTOR: Vector,
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_NUMBER: float,
TYPE_STRING: str},
TYPE_CHAR: {
TYPE_INT: ord,
TYPE_STRING: identity},
TYPE_COLOR: {
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_NUMBER: int,
TYPE_STRING: Color.get_number_string},
TYPE_FLOAT: {
TYPE_INT: int,
TYPE_NUMBER: identity,
TYPE_STRING: str},
TYPE_INT: {
TYPE_FLOAT: float,
TYPE_NUMBER: identity,
TYPE_STRING: str},
TYPE_NUMBER: {
TYPE_FLOAT: float,
TYPE_INT: int,
TYPE_STRING: str},
TYPE_NUMERIC_STRING: {
TYPE_FLOAT: float,
TYPE_STRING: identity}
}
class TATypeError(BaseException):
""" TypeError with the types from the hierarchy, not with Python types """
def __init__(self, bad_value, bad_type=None, req_type=None, message=''):
""" bad_value -- the mis-typed value that caused the error
bad_type -- the type of the bad_value
req_type -- the type that the value was expected to have
message -- short statement about the cause of the error. It is
not shown to the user, but may appear in debugging output. """
self.bad_value = bad_value
self.bad_type = bad_type
self.req_type = req_type
self.message = message
def __str__(self):
msg = []
if self.message:
msg.append(self.message)
msg.append(" (")
msg.append("bad value: ")
msg.append(repr(self.bad_value))
if self.bad_type is not None:
msg.append(", bad type: ")
msg.append(repr(self.bad_type))
if self.req_type is not None:
msg.append(", req type: ")
msg.append(repr(self.req_type))
if self.message:
msg.append(")")
return "".join(msg)
__repr__ = __str__
def get_converter(old_type, new_type):
""" If there is a converter old_type -> new_type, return it. Else return
None. If a chain of converters is necessary, return it as a tuple or
list (starting with the innermost, first-to-apply converter). """
# every type can be converted to TYPE_OBJECT
if new_type == TYPE_OBJECT:
return identity
# every type can be converted to itself
if old_type == new_type:
return identity
# is there a converter for this pair of types?
converters_from_old = TYPE_CONVERTERS.get(old_type)
if converters_from_old is None:
return None
converter = converters_from_old.get(new_type)
if converter is not None:
return converter
else:
# form the transitive closure of all types that old_type can be
# converted to, and look for new_type there
backtrace = converters_from_old.copy()
new_backtrace = backtrace.copy()
break_all = False
while True:
newest_backtrace = {}
for t in new_backtrace:
for new_t in TYPE_CONVERTERS.get(t, {}):
if new_t not in backtrace:
newest_backtrace[new_t] = t
backtrace[new_t] = t
if new_t == new_type:
break_all = True
break
if break_all:
break
if break_all or not newest_backtrace:
break
new_backtrace = newest_backtrace
# use the backtrace to find the path from old_type to new_type
if new_type in backtrace:
converter_chain = []
t = new_type
while t in backtrace and isinstance(backtrace[t], Type):
converter_chain.insert(0, TYPE_CONVERTERS[backtrace[t]][t])
t = backtrace[t]
converter_chain.insert(0, TYPE_CONVERTERS[old_type][t])
return converter_chain
return None
def convert(x, new_type, old_type=None, converter=None):
""" Convert x to the new type if possible.
old_type -- the type of x. If not given, it is computed. """
if not isinstance(new_type, Type):
raise ValueError('%s is not a type in the type hierarchy'
% (repr(new_type)))
# every type can be converted to TYPE_OBJECT
if new_type == TYPE_OBJECT:
return x
if not isinstance(old_type, Type):
(old_type, is_an_ast) = get_type(x)
else:
is_an_ast = isinstance(x, ast.AST)
# every type can be converted to itself
if old_type == new_type:
return x
# special case: 'box' block (or 'pop' block) as an AST
if is_an_ast and old_type == TYPE_BOX:
new_type_ast = ast.Name(id=new_type.constant_name)
return get_call_ast('convert', [x, new_type_ast], return_type=new_type)
# if the converter is not given, try to find one
if converter is None:
converter = get_converter(old_type, new_type)
if converter is None:
# no converter available
raise TATypeError(bad_value=x, bad_type=old_type,
req_type=new_type, message=("found no converter"
" for this type combination"))
def _apply_converter(converter, y):
try:
if is_an_ast:
if converter == identity:
return y
elif is_instancemethod(converter):
func = ast.Attribute(value=y,
attr=converter.im_func.__name__,
ctx=ast.Load)
return get_call_ast(func)
else:
func_name = converter.__name__
return get_call_ast(func_name, [y])
else:
return converter(y)
except BaseException:
raise TATypeError(bad_value=x, bad_type=old_type,
req_type=new_type, message=("error during "
"conversion"))
if isinstance(converter, (list, tuple)):
# apply the converter chain recursively
result = x
for conv in converter:
result = _apply_converter(conv, result)
return result
elif converter is not None:
return _apply_converter(converter, x)
class TypedAST(ast.AST):
@property
def return_type(self):
if self._return_type is None:
return get_type(self.func)[0]
else:
return self._return_type
class TypedCall(ast.Call,TypedAST):
""" Like a Call AST, but with a return type """
def __init__(self, func, args=None, keywords=None, starargs=None,
kwargs=None, return_type=None):
if args is None:
args = []
if keywords is None:
keywords = []
ast.Call.__init__(self, func=func, args=args, keywords=keywords,
starargs=starargs, kwargs=kwargs)
self._return_type = return_type
class TypedSubscript(ast.Subscript,TypedAST):
""" Like a Subscript AST, but with a type """
def __init__(self, value, slice_, ctx=ast.Load, return_type=None):
ast.Subscript.__init__(self, value=value, slice=slice_, ctx=ctx)
self._return_type = return_type
class TypedName(ast.Name,TypedAST):
""" Like a Name AST, but with a type """
def __init__(self, id_, ctx=ast.Load, return_type=None):
ast.Name.__init__(self, id=id_, ctx=ctx)
self._return_type = return_type
def get_call_ast(func_name, args=None, kwargs=None, return_type=None):
""" Return an AST representing the call to a function with the name
func_name, passing it the arguments args (given as a list) and the
keyword arguments kwargs (given as a dictionary).
func_name -- either the name of a callable as a string, or an AST
representing a callable expression
return_type -- if this is not None, return a TypedCall object with this
return type instead """
if args is None:
args = []
# convert keyword argument dict to a list of (key, value) pairs
keywords = []
if kwargs is not None:
for (key, value) in kwargs.iteritems():
keywords.append(ast.keyword(arg=key, value=value))
# get or generate the AST representing the callable
if isinstance(func_name, ast.AST):
func_ast = func_name
else:
func_ast = ast.Name(id=func_name, ctx=ast.Load)
# if no return type is given, return a simple Call AST
if return_type is None:
return ast.Call(func=func_ast, args=args, keywords=keywords,
starargs=None, kwargs=None)
# if a return type is given, return a TypedCall AST
else:
return TypedCall(func=func_ast, args=args, keywords=keywords,
return_type=return_type)
| 35.363839 | 79 | 0.618633 | [
"MIT"
] | sugar-activities/4742-activity | TurtleArt/tatype.py | 15,843 | Python |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from O3 import prepare_data
from utils import plot_decision_regions
X_train, X_test, y_train, y_test = prepare_data(standardize=True,
split=True)
svm = SVC(kernel='rbf', random_state=1, gamma=0.2, C=1.0)
svm.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X_combined, y_combined, classifier=svm,
test_idx=range(105, 150))
plt.xlabel('petal length [standardize]')
plt.ylabel('petal width [standardize]')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
print(f'Accuracy: {svm.score(X_test, y_test) * 100}%')
| 24.419355 | 65 | 0.688243 | [
"MIT"
] | ShAlireza/ML-Tries | O3/_14_kernel_methods_linearly_inseparable_data/rbf_kernel_iris_dataset.py | 757 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: anc_endpoint_apply
short_description: Resource module for Anc Endpoint Apply
description:
- Manage operation update of the resource Anc Endpoint Apply.
version_added: '1.0.0'
extends_documentation_fragment:
- cisco.ise.module
author: Rafael Campos (@racampos)
options:
additionalData:
description: Anc Endpoint Apply's additionalData.
suboptions:
name:
description: Anc Endpoint Apply's name.
type: str
value:
description: Anc Endpoint Apply's value.
type: str
type: list
requirements:
- ciscoisesdk >= 1.1.0
- python >= 3.5
seealso:
# Reference by Internet resource
- name: Anc Endpoint Apply reference
description: Complete reference of the Anc Endpoint Apply object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Update all
cisco.ise.anc_endpoint_apply:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
additionalData:
- name: macAddress
value: MAC address
- name: ipAddress
value: IP address
- name: policyName
value: Policy Name
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: dict
sample: >
{}
"""
| 25.126984 | 92 | 0.69362 | [
"Apache-2.0"
] | saeedya/docker-ansible | venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/anc_endpoint_apply.py | 1,583 | Python |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : MLStudio #
# File : \test_preprocessing.py #
# Python : 3.8.3 #
# --------------------------------------------------------------------------- #
# Author : John James #
# Company : nov8.ai #
# Email : [email protected] #
# URL : https://github.com/nov8ai/MLStudio #
# --------------------------------------------------------------------------- #
# Created : Saturday, July 25th 2020, 9:54:15 pm #
# Last Modified : Saturday, July 25th 2020, 9:54:15 pm #
# Modified By : John James ([email protected]) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 nov8.ai #
# =========================================================================== #
"""Tests data preprocessing pipeline."""
#%%
import numpy as np
import pytest
from pytest import mark
from scipy.sparse import csr_matrix
from sklearn.datasets import make_classification, make_regression
from mlstudio.factories.data import DataProcessors
# -------------------------------------------------------------------------- #
def check_add_bias(X, X_train, test):
assert X_train.shape[1] == X.shape[1] + 1, test + ": bias term wasn't added."
def check_split(X, y, X_train, y_train, X_val, y_val, test):
assert X_train.shape[1] == X.shape[1] + 1, test + ": bias term wasn't added."
assert X.shape[0] > X_train.shape[0], test + ": split didn't happen."
assert X_train.shape[0] == y_train.shape[0], test + ": X, y shape mismatch."
assert X_val.shape[0] == y_val.shape[0], test + ": X, y shape mismatch."
assert X_train.shape[0] > X_val.shape[0], test + ": Train size not greater than test."
def check_label_encoder(y, test):
assert all(y) in range(len(np.unique(y))), test + ": label encoding didn't work"
def check_one_hot_label_encoder(y, test):
assert np.sum(y) == y.shape[0], test + ": one-hot-label encoding didn't binarize"
assert y.shape[1] > 2, test + ": one-hot-label encoding didn't create vector."
@mark.data_processing
@mark.regression_data
class RegressionDataTests:
_test = "Regression data"
def test_regression_train_data(self, get_regression_data):
X, y = get_regression_data
data_processor = DataProcessors.regression
data = data_processor().process_train_data(X, y)
check_add_bias(X, data['X_train']['data'],test = self._test)
def test_regression_train_val_data(self, get_regression_data):
X, y = get_regression_data
data_processor = DataProcessors.regression
data = data_processor().process_train_val_data(X, y, val_size=0.3)
check_add_bias(X, data['X_train']['data'], test = self._test)
check_add_bias(X, data['X_val']['data'], test = self._test)
check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test)
def test_regression_X_test_data(self, get_regression_data):
X, y = get_regression_data
data_processor = DataProcessors.regression
data = data_processor().process_X_test_data(X)
check_add_bias(X, data['X_test']['data'], test = self._test)
@mark.data_processing
@mark.binaryclass_data
class BinaryClassDataTests:
_test = "Binary classification data"
def test_binaryclass_train_data(self, get_logistic_regression_data):
X, y = get_logistic_regression_data
y = np.random.choice(["hat", "bowl"], size=y.shape[0])
data_processor = DataProcessors.binaryclass
data = data_processor().process_train_data(X, y)
check_add_bias(X, data['X_train']['data'],test = self._test)
def test_binaryclass_train_val_data(self, get_logistic_regression_data):
X, y = get_logistic_regression_data
y = np.random.choice(["hat", "bowl"], size=y.shape[0])
data_processor = DataProcessors.binaryclass
data = data_processor().process_train_val_data(X, y, val_size=0.3)
check_add_bias(X, data['X_train']['data'], test = self._test)
check_add_bias(X, data['X_val']['data'], test = self._test)
check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test)
check_label_encoder(data['y_train']['data'], test=self._test)
check_label_encoder(data['y_val']['data'], test=self._test)
def test_binaryclass_X_test_data(self, get_logistic_regression_data):
X, y = get_logistic_regression_data
y = np.random.choice(["hat", "bowl"], size=y.shape[0])
data_processor = DataProcessors.binaryclass
data = data_processor().process_X_test_data(X)
check_add_bias(X, data['X_test']['data'],test = self._test)
def test_binaryclass_y_test_data(self, get_logistic_regression_data):
X, y = get_logistic_regression_data
y = np.random.choice(["hat", "bowl"], size=y.shape[0])
data_processor = DataProcessors.binaryclass
data = data_processor().process_y_test_data(y)
check_label_encoder(data['y_test']['data'], test=self._test)
@mark.data_processing
@mark.multiclass_data
class MultiClassDataTests:
_test = "Multi classification data"
def test_multiclass_train_data(self, get_multiclass_data):
X, y = get_multiclass_data
y = np.random.choice(["hat", "bowl", "junky", "riding", "happy"], size=y.shape[0])
data_processor = DataProcessors.multiclass
data = data_processor().process_train_data(X, y)
check_add_bias(X, data['X_train']['data'],test = self._test)
def test_multiclass_train_val_data(self, get_multiclass_data):
X, y = get_multiclass_data
y = np.random.choice(["hat", "bowl", "junky", "riding", "happy"], size=y.shape[0])
data_processor = DataProcessors.multiclass
data = data_processor().process_train_val_data(X, y, val_size=0.3)
check_add_bias(X, data['X_train']['data'], test = self._test)
check_add_bias(X, data['X_val']['data'], test = self._test)
check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test)
check_one_hot_label_encoder(data['y_train']['data'], test=self._test)
check_one_hot_label_encoder(data['y_val']['data'], test=self._test)
def test_multiclass_X_test_data(self, get_multiclass_data):
X, y = get_multiclass_data
y = np.random.choice(["hat", "bowl", "junky", "riding", "happy"], size=y.shape[0])
data_processor = DataProcessors.multiclass
data = data_processor().process_X_test_data(X)
check_add_bias(X, data['X_test']['data'],test = self._test)
def test_multiclass_y_test_data(self, get_multiclass_data):
X, y = get_multiclass_data
y = np.random.choice(["hat", "bowl", "junky", "riding", "happy"], size=y.shape[0])
data_processor = DataProcessors.multiclass
data = data_processor().process_y_test_data(y)
check_one_hot_label_encoder(data['y_test']['data'], test=self._test)
| 52.42 | 146 | 0.572937 | [
"BSD-3-Clause"
] | DecisionScients/MLStudio | tests/test_data_services/test_preprocessing.py | 7,863 | Python |
# -*- coding: utf-8 -*-
# TheQube profile dialog
# A part of TheQube accessible social networking client
# Copyright © Andre Polykanine A.K.A. Menelion Elensúlë, 2014 — 2015
from logger import logger
logging = logger.getChild("sessions.twitter.gui.profile")
import config
import sessions
import wx
from core.gui import SquareDialog
import calendar
import time
import rfc822
class TwitterProfileDialog (SquareDialog):
def __init__ (self, user, *args, **kwargs):
super(TwitterProfileDialog, self).__init__(title=_("Profile for %s" % user['screen_name']), *args, **kwargs)
self.user = user
full_url = unicode(self.user['entities']['url']['urls'][0]['expanded_url']) if 'url' in self.user['entities'] else ''
self.screen_name = self.labeled_control(_("Screen name:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['screen_name']))
self.screen_name.Bind(wx.EVT_CHAR, self.charPressed)
self.name = self.labeled_control(_("Real name:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['name']))
self.name.Bind(wx.EVT_CHAR, self.charPressed)
if unicode(user['location']) != '' and unicode(user['location']).lower() != 'none':
self.location = self.labeled_control(_("Location:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['location']))
self.location.Bind(wx.EVT_CHAR, self.charPressed)
self.account_id = self.labeled_control(_("Account ID:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['id']))
self.account_id.Bind(wx.EVT_CHAR, self.charPressed)
if full_url != '' and full_url.lower() != 'none' and full_url.lower() != 'http://none':
self.url = self.labeled_control(_("URL:"), wx.TextCtrl, style=wx.TE_RICH2 | wx.TE_MULTILINE | wx.TE_AUTO_URL | wx.TE_READONLY | wx.WANTS_CHARS, value=full_url)
self.url.Bind(wx.EVT_CHAR, self.charPressed)
if unicode(user['description']) != '' and unicode(user['description']).lower() != 'none':
size = self.Size
size[0] = size[0] / 2
size[1] = -1
self.description = self.labeled_control(_("Bio:"), wx.TextCtrl, style=wx.TE_RICH2 | wx.TE_MULTILINE | wx.TE_READONLY, size=size, value=unicode(user['description']))
self.description.Bind(wx.EVT_CHAR, self.charPressed)
self.protected = self.labeled_control(_("Tweets are protected:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS)
if user['protected']:
self.protected.SetValue(_("Yes"))
else:
self.protected.SetValue(_("No"))
self.protected.Bind(wx.EVT_CHAR, self.charPressed)
self.followers_count = self.labeled_control(_("Number of followers:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['followers_count']))
self.followers_count.Bind(wx.EVT_CHAR, self.charPressed)
self.friends_count = self.labeled_control(_("Number of friends:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['friends_count']))
self.friends_count.Bind(wx.EVT_CHAR, self.charPressed)
self.listed_count = self.labeled_control(_("Number of having this user in their lists:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['listed_count']))
self.listed_count.Bind(wx.EVT_CHAR, self.charPressed)
self.statuses_count = self.labeled_control(_("Number of tweets:"), wx.TextCtrl, parent=self.pane, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS, value=unicode(user['statuses_count']))
self.statuses_count.Bind(wx.EVT_CHAR, self.charPressed)
self.average_tweets = self.labeled_control(_("Average tweets per day since joining Twitter:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS)
self.average_tweets.SetValue(unicode(int(round(int(unicode(user['statuses_count'])) * 86400 / (time.time() - time.mktime(rfc822.parsedate(user['created_at'])))))))
self.average_tweets.Bind(wx.EVT_CHAR, self.charPressed)
self.status_created_at = self.labeled_control(_("Date of last tweet:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS)
if 'status' in user:
self.status_created_at.SetValue(time.strftime('%c', time.localtime(calendar.timegm(rfc822.parsedate(user['status']['created_at'])))))
else:
self.status_created_at.SetValue(_("Not available"))
self.status_created_at.Bind(wx.EVT_CHAR, self.charPressed)
self.created_at = self.labeled_control(_("Date joined Twitter:"), wx.TextCtrl, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.WANTS_CHARS)
self.created_at.SetValue(time.strftime('%c', time.localtime(calendar.timegm(rfc822.parsedate(user['created_at'])))))
self.created_at.Bind(wx.EVT_CHAR, self.charPressed)
self.setup_follow_button(user)
self.btn_close = wx.Button(parent=self.pane, id=wx.ID_CLOSE)
self.btn_close.SetSizerProps(expand = True)
self.SetEscapeId(wx.ID_CLOSE)
self.finish_setup(create_buttons=False)
def charPressed(self, evt):
object = evt.GetEventObject()
key = evt.GetKeyCode()
modifiers = evt.GetModifiers()
if config.main['UI']['stdKeyHandling'] and key in (wx.WXK_END, wx.WXK_HOME):
evt.Skip()
elif key == wx.WXK_HOME and not modifiers:
object.SetInsertionPoint(0)
elif key == wx.WXK_END and not modifiers:
object.SetInsertionPointEnd()
elif key == wx.WXK_HOME and modifiers == wx.MOD_SHIFT:
object.SetSelection(object.GetInsertionPoint(), 0)
elif key == wx.WXK_END and modifiers == wx.MOD_SHIFT:
object.SetSelection(object.GetInsertionPoint(), len(object.GetValue()))
elif key == 1 and modifiers == wx.MOD_CONTROL:
object.SetInsertionPoint(0)
object.SetSelection(0, len(object.GetValue()))
else:
evt.Skip()
def setup_follow_button (self, user):
if sessions.current_session.is_current_user(user['screen_name']):
return
if not user['following']:
self.follow_button = wx.Button(parent=self.pane, label=_("Follow %s") % user['name'])
self.follow_button.Bind(wx.EVT_BUTTON, self.follow)
else:
self.follow_button = wx.Button(parent=self.pane, label=_("Unfollow %s") % user['name'])
self.follow_button.Bind(wx.EVT_BUTTON, self.unfollow)
self.follow_button.SetSizerProps(expand=True)
def follow (self, evt):
evt.Skip()
sessions.current_session.follow(screen_name=self.user['screen_name'])
def unfollow (self, evt):
evt.Skip()
sessions.current_session.do_unfollow(screen_name=self.user['screen_name'], action=0)
| 58.053097 | 199 | 0.729726 | [
"MIT"
] | Oire/TheQube | src/session/twitter/gui/profile.py | 6,565 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import os
from datetime import timedelta
from time import sleep
from typing import Any, Dict, Iterable
from airflow.exceptions import (
AirflowException, AirflowRescheduleException, AirflowSensorTimeout, AirflowSkipException,
)
from airflow.models import BaseOperator, SkipMixin, TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: float
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: float
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:type mode: str
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
:type exponential_backoff: bool
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
@apply_defaults
def __init__(self,
poke_interval: float = 60,
timeout: float = 60 * 60 * 24 * 7,
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException(
"The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException(
"The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
"The mode must be one of {valid_modes},"
"'{d}.{t}'; received '{m}'."
.format(valid_modes=self.valid_modes,
d=self.dag.dag_id if self.dag else "",
t=self.task_id, m=self.mode))
def poke(self, context: Dict) -> bool:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context: Dict) -> Any:
started_at = timezone.utcnow()
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
if self.reschedule:
# If reschedule, use first start date of current try
task_reschedules = TaskReschedule.find_for_task_instance(context['ti'])
if task_reschedules:
started_at = task_reschedules[0].start_date
try_number = len(task_reschedules) + 1
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
# If sensor is in soft fail mode but will be retried then
# give it a chance and fail with timeout.
# This gives the ability to set up non-blocking AND soft-fail sensors.
if self.soft_fail and not context['ti'].is_eligible_to_retry():
self._do_skip_downstream_tasks(context)
raise AirflowSkipException(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(
f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self._get_next_poke_interval(started_at, try_number))
raise AirflowRescheduleException(reschedule_date)
else:
sleep(self._get_next_poke_interval(started_at, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
def _do_skip_downstream_tasks(self, context: Dict) -> None:
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
def _get_next_poke_interval(self, started_at, try_number):
"""
Using the similar logic which is used for exponential backoff retry delay for operators.
"""
if self.exponential_backoff:
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1("{}#{}#{}#{}".format(
self.dag_id, self.task_id, started_at, try_number
).encode("utf-8")).hexdigest(), 16)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
new_interval = min(self.timeout - int((current_time - started_at).total_seconds()),
delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
else:
return self.poke_interval
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
# pylint: disable=no-member
@property
def deps(self):
"""
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
"""
if self.reschedule:
return BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()}
return BaseOperator.deps.fget(self)
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
:type cls: type
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError(
f"cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}.")
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
if 'BUILDING_AIRFLOW_DOCS' in os.environ:
# flake8: noqa: F811
# Monkey patch hook to get good function headers while building docs
apply_defaults = lambda x: x
| 41.769231 | 96 | 0.638609 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | CoverGenius/airflow | airflow/sensors/base_sensor_operator.py | 9,231 | Python |
import numpy as np
import matplotlib.pyplot as plt
import bisect
from lmfit import Parameters
import astropy.constants as cst
from edibles.models import ContinuumModel, VoigtModel
from edibles.utils.edibles_spectrum import EdiblesSpectrum
class Sightline:
'''A model of the sightline between the telescope and the target star.
Args:
Spectrum (EdiblesSpectrum): The input spectrum object
n_anchors (int): Optional, The number of anchors in the ContinuumSpline
'''
def __init__(self, Spectrum, init_cont=True, n_anchors=4):
self.__dict__.update(Spectrum.__dict__)
self.wave = Spectrum.wave
self.flux = Spectrum.flux
self.Spectrum = Spectrum
if init_cont:
cont_model = ContinuumModel(n_anchors=n_anchors)
cont_pars = cont_model.guess(self.flux, x=self.wave)
for yname in cont_model.ynames:
flux_range = np.max(self.flux) - np.min(self.flux)
ymin = cont_pars[yname].value - (flux_range / 2)
ymax = cont_pars[yname].value + (flux_range / 2)
cont_pars[yname].set(min=ymin, max=ymax)
self.cont_model = cont_model
self.cont_model_pars = cont_pars
self.complete_model = cont_model
self.all_pars = cont_pars
self.peaks = []
self.n_anchors = n_anchors
self.n_lines = 0
self.num_prior_lines = 0
self.source_names = []
self.add_source("Telluric", similar={'b': 2})
self.add_source("Nontelluric", similar=None)
def add_source(self, name, similar=None):
'''Adds a new source of absorption to the sightline.
The purpose of a source is to hold multiple line models
together, sometiimes with similar parameters
Args:
name (str): The name of the absorption source
similar (dict): A dict of parameters that change with the source,
not the specific line, default: None, example: similar={'b': 3}
'''
self.source_names.append(name)
if name == "Telluric" and similar is not None:
par = Parameters()
for key in similar:
par.add(name + '_' + key, value=similar[key], min=0, max=30)
self.telluric_pars = par
self.all_pars = self.all_pars + par
def add_line(self, name, source=None, pars=None, guess_data=None):
'''Adds a new line to a given absorption source.
If no source is given, a new one will be created.
Args:
name (str): The name of the line
source (str): the name of the source this line will belong to
pars (dict): user input parameters
guess_data (1darray): flux data to guess with
'''
assert source is not None, "Source must not be None"
if source not in self.source_names:
print()
print('Could not find source \'{}\' in source_names.'.format(source))
print('Creating source \'{}\''.format(source))
self.add_source(source)
new_line = VoigtModel(prefix=source + '_' + name + '_')
if guess_data is not None:
new_pars = new_line.guess(guess_data, x=self.wave)
else:
new_pars = new_line.guess(self.flux, x=self.wave)
if pars is not None:
for par in pars: # lam_0...
par_name = source + '_' + name + '_' + par # telluric_line1_lam_0...
new_pars[par_name].set(value=pars[par])
if source == "Telluric":
b_name = source + '_b'
new_pars[source + '_' + name + '_b'].set(expr=b_name)
new_pars[source + '_' + name + '_lam_0'].set(
min=self.Spectrum.xmin, max=self.Spectrum.xmax
)
self.old_complete_model = self.complete_model
self.complete_model = self.complete_model * new_line
self.old_all_pars = self.all_pars
self.all_pars = self.all_pars + new_pars
self.old_cont_model = self.cont_model
self.old_cont_pars = self.cont_model_pars
if source == "Telluric":
try:
self.old_telluric_model = self.telluric_model
self.telluric_model = self.telluric_model * new_line
except AttributeError:
self.old_telluric_model = new_line
self.telluric_model = new_line
try:
self.old_telluric_pars = self.telluric_pars
self.telluric_pars = self.telluric_pars + new_pars
except AttributeError:
print('Something bad is probably happening')
self.old_telluric_pars = new_pars
self.telluric_pars = new_pars
else:
try:
self.old_nontelluric_model = self.nontelluric_model
self.nontelluric_model = self.nontelluric_model * new_line
except AttributeError:
self.old_nontelluric_model = new_line
self.nontelluric_model = new_line
try:
self.old_nontelluric_pars = self.nontelluric_pars
self.nontelluric_pars = self.nontelluric_pars + new_pars
except AttributeError:
self.old_nontelluric_pars = new_pars
self.nontelluric_pars = new_pars
lambda_name = source + '_' + name + '_lam_0'
index = bisect.bisect(self.peaks, new_pars[lambda_name])
self.peaks.insert(index, new_pars[lambda_name])
self.most_recent = source + '_' + name
self.n_lines += 1
def fit(self, data=None, old=False, x=None, report=False,
plot=False, weights=None, method='leastsq', **kwargs):
'''Fits a model to the sightline data given by the EdiblesSpectrum object.
Args:
data (1darray): Flux data to fit
params (lmfit.parameter.Parameters): Initial parameters to fit
model (lmfit.model.CompositeModel): The model to fit, default: self.complete_model
x (1darray): Wavelength data to fit
report (bool): default False: If true, prints the report from the fit.
plot (bool): default False: If true, plots the data and the fit model.
method (str): The method of fitting. default: leastsq
'''
if data is None:
data = self.flux
if x is None:
x = self.wave
if old is True:
model = self.old_complete_model
params = self.old_all_pars
else:
model = self.complete_model
params = self.all_pars
self.result = model.fit(data=data,
params=params,
x=x,
weights=weights,
method=method,
**kwargs)
if report:
print(self.result.fit_report())
self.result.params.pretty_print()
if plot:
self.result.plot_fit()
plt.show()
# Update parameter values after fit - for use in model separation
self.all_pars = self.result.params
# create new parameters object and add to it from the results parameters
if old is False:
try:
tell_pars = Parameters()
for par_name in self.telluric_pars:
tell_pars.add(self.all_pars[par_name])
# update attribute
assert len(self.telluric_pars) == len(tell_pars)
self.telluric_pars = tell_pars
except AttributeError:
pass
try:
non_tell_pars = Parameters()
for par_name in self.nontelluric_pars:
non_tell_pars.add(self.all_pars[par_name])
assert len(self.nontelluric_pars) == len(non_tell_pars)
self.nontelluric_pars = non_tell_pars
except AttributeError:
pass
try:
cont_pars = Parameters()
for par_name in self.cont_model_pars:
cont_pars.add(self.all_pars[par_name])
assert len(self.cont_model_pars) == len(cont_pars)
self.cont_model_pars = cont_pars
except AttributeError:
pass
def freeze(self, pars=None, prefix=None, freeze_cont=True, unfreeze=False):
'''Freezes the current params, so you can still add to the
model but the 'old' parameters will not change
Args:
prefix (str): Prefix of parameters to freeze, default: None, example: 'Telluric'
freeze_cont (bool): Freeze the continuum or not, default: True
unfreeze (bool): unfreezes all parameters except x values of
spline anchors, default=False
'''
if pars is None:
pars = self.all_pars
if unfreeze is False:
if prefix:
for par in pars:
if prefix in par:
pars[par].set(vary=False)
else:
for par in pars:
pars[par].set(vary=False)
if not freeze_cont:
for par in pars:
if 'y_' in par:
pars[par].set(vary=True)
if unfreeze is True:
for par in pars:
if ('y_' in par):
pars[par].set(vary=True)
if ('Telluric' in par) and (par[-2:] != '_b'):
pars[par].set(vary=True)
pars['Telluric_b'].set(vary=True)
if ('Nontelluric' in par) and (par[-2:] != '_d'):
pars[par].set(vary=True)
def separate(self, data, x, old=False, plot=True):
'''Separate the sources that were added to Sightline.
Args:
data (1darray): FLux data to use for separation
x (1darray): Wavelength array to use
old (bool): If true, uses the older, second-most recent model and parameters
plot (bool): If true, plots separted spectrum
'''
assert len(self.telluric_pars) > 0
assert len(self.nontelluric_pars) > 0
if old is True:
model = self.old_complete_model
params = self.old_all_pars
telluric_model = self.old_telluric_model
telluric_params = self.old_telluric_pars
nontelluric_model = self.old_nontelluric_model
nontelluric_params = self.old_nontelluric_pars
cont_model = self.old_cont_model
cont_params = self.old_cont_pars
else:
model = self.complete_model
params = self.all_pars
telluric_model = self.telluric_model
telluric_params = self.telluric_pars
nontelluric_model = self.nontelluric_model
nontelluric_params = self.nontelluric_pars
cont_model = self.cont_model
cont_params = self.cont_model_pars
if len(self.source_names) == 2:
complete_out = model.eval(
data=data,
params=params,
x=x
)
telluric_out = telluric_model.eval(
data=data,
params=telluric_params,
x=x
)
nontelluric_out = nontelluric_model.eval(
data=data,
params=nontelluric_params,
x=x
)
cont_out = cont_model.eval(
data=data,
params=cont_params,
x=x
)
if plot:
plt.plot(x, data, label='Data', color='k')
plt.plot(x, complete_out, label='Final model', color='r')
plt.plot(x, data - complete_out, label='Residual', color='g')
plt.plot(x, telluric_out * cont_out, label='Telluric model')
plt.plot(x, nontelluric_out * cont_out, label='Non-telluric model')
plt.xlabel(r'Wavelength ($\AA$)', fontsize=14)
plt.ylabel('Flux', fontsize=14)
plt.legend()
plt.show()
return complete_out, telluric_out, nontelluric_out, cont_out
if __name__ == "__main__":
FILE1 = "/HD170740/RED_860/HD170740_w860_redl_20140915_O12.fits"
xmin = 7661.75
xmax = 7669
sp1 = EdiblesSpectrum(FILE1)
sp1.getSpectrum(xmin=xmin, xmax=xmax)
sightline = Sightline(sp1, n_anchors=5)
# Add line with auto-guessed params
sightline.add_line(name='line1', source='Telluric')
# Add line with user defined params
pars = {'d': 0.01, 'tau_0': 0.6, 'lam_0': 7664.8}
sightline.add_line(name='line2', pars=pars, source='Telluric')
# # ###############################################################
# # Fit and plot
sightline.fit(report=True, plot=False, method='leastsq')
out = sightline.complete_model.eval(data=sp1.flux, params=sightline.result.params, x=sp1.wave)
resid = sp1.flux - out
# Add line with different source
lam_0 = 7665.25
K_Gamma = 3.820e7
K_d = K_Gamma * lam_0**2 / (4 * np.pi * (cst.c.to("cm/s").value * 1e8))
pars = {'d': K_d, 'tau_0': 0.07, 'lam_0': lam_0}
sightline.add_line(name='line3', source='Nontelluric', pars=pars)
sightline.all_pars['Nontelluric_line3_d'].set(vary=False)
# sightline.fit(report=True, plot=False, method='leastsq')
# out = sightline.complete_model.eval(data=sp1.flux, params=sightline.result.params, x=sp1.wave)
# resid = sp1.flux - out
lam_0 = 7665.33
pars = {'d': K_d, 'tau_0': 0.01, 'b': 1, 'lam_0': lam_0}
sightline.add_line(name='line4', source='Nontelluric', pars=pars)
sightline.all_pars['Nontelluric_line4_d'].set(vary=False)
# sightline.fit(report=True, plot=False, method='leastsq')
lam_0 = 7665.15
pars = {'d': K_d, 'tau_0': 0.001, 'b': 1, 'lam_0': lam_0}
sightline.add_line(name='line5', source='Nontelluric', pars=pars)
sightline.all_pars['Nontelluric_line5_d'].set(vary=False)
sightline.fit(report=True, plot=False, method='leastsq')
pars = {'d': 0.01, 'tau_0': 0.01, 'b': 1, 'lam_0': 7662}
sightline.add_line(name='line6', source='Telluric', pars=pars)
sightline.fit(report=True, plot=False, method='leastsq')
pars = {'d': 0.01, 'tau_0': 0.01, 'b': 1, 'lam_0': 7663.7}
sightline.add_line(name='line7', source='Telluric', pars=pars)
sightline.fit(report=True, plot=False, method='leastsq')
pars = {'d': 0.01, 'tau_0': 0.01, 'b': 1, 'lam_0': 7666.5}
sightline.add_line(name='line8', source='Telluric', pars=pars)
sightline.fit(report=True, plot=False, method='leastsq')
pars = {'d': 0.01, 'tau_0': 0.01, 'b': 1, 'lam_0': 7667.5}
sightline.add_line(name='line9', source='Telluric', pars=pars)
sightline.fit(report=True, plot=False, method='leastsq')
out = sightline.complete_model.eval(data=sp1.interp_flux, params=sightline.result.params,
x=sp1.grid)
resid = sp1.interp_flux - out
plt.plot(sp1.grid, sp1.interp_flux)
plt.plot(sp1.grid, out)
plt.plot(sp1.grid, resid)
plt.show()
sightline.separate(data=sp1.interp_flux, x=sp1.grid)
| 33.404762 | 100 | 0.573447 | [
"MIT"
] | jancami/edibles | edibles/sightline.py | 15,433 | Python |
# _tmp_
import datetime
from copy import deepcopy
# _tmp_
from pymongo import MongoClient
import pymongo
import pandas as pd
import numpy as np
import sys
pd.options.mode.chained_assignment = None
class AnalyzerDatabaseManager(object):
def __init__(self, db_config, config):
self._db_config = db_config
self._config = config
def aggregate_data(self, model_type, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[],
metric=None, threshold=None):
if model_type == "failed_request_ratio":
return self._aggregate_data_for_failed_request_ratio_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "duplicate_message_ids":
return self._aggregate_data_for_duplicate_message_id_model(agg_minutes=agg_minutes, start_time=start_time,
end_time=end_time, ids_to_exclude=ids_to_exclude)
elif model_type == "time_sync_errors":
return self._aggregate_data_for_time_sync_model(relevant_metric=metric, threshold=threshold,
agg_minutes=agg_minutes, start_time=start_time, end_time=end_time,
ids_to_exclude=ids_to_exclude)
else:
return None
def aggregate_data_for_historic_averages_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[], service_calls=None):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if service_calls is not None and len(service_calls) > 0:
for col in self._config.service_call_fields:
service_calls.loc[service_calls[col] == "-", col] = None
service_call_query = {"$or": service_calls.to_dict(orient="records")}
filter_dict_elems.append(service_call_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
"request_count": {"$sum": 1},
"mean_request_size": {"$avg": "$requestSize"},
"mean_response_size": {"$avg": "$responseSize"},
"mean_client_duration": {"$avg": "$totalDuration"},
"mean_producer_duration": {"$avg": "$producerDurationProducerView"},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " aggregate_data_for_historic_averages_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def add_first_request_timestamps_from_clean_data(self, data=None):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict = {'correctorStatus': 'done'}
if data is not None:
for col in self._config.service_call_fields:
data.loc[data[col] == "-", col] = None
filter_dict["$or"] = data.to_dict(orient="records")
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
self._config.timestamp_field: {"$min": "$%s" % self._config.timestamp_field}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
results = []
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_start ")
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " add_first_request_timestamps_from_clean_data_end ")
# _tmp_
# res = list(res)
res = deepcopy(results)
if len(res) == 0:
return
# res = self._generate_dataframe(list(res))
res = self._generate_dataframe(res)
res = res.sort_values(self._config.timestamp_field, ascending=True).drop_duplicates(self._config.service_call_fields)
# exclude service calls that already exist in the first timestamps table
existing_first_timestamps = self.get_first_timestamps_for_service_calls()
if len(existing_first_timestamps) > 0:
res = res.merge(existing_first_timestamps[self._config.service_call_fields + ["first_request_timestamp"]],
on=self._config.service_call_fields, how="left")
res = res[pd.isnull(res.first_request_timestamp)].drop("first_request_timestamp", axis=1)
res = res.rename(columns={self._config.timestamp_field: "first_request_timestamp"})
res.first_request_timestamp = pd.to_datetime(res.first_request_timestamp, unit='ms')
res = res.assign(first_incident_timestamp=None)
res = res.assign(first_model_retrain_timestamp=None)
res = res.assign(first_model_train_timestamp=None)
# add new service calls
scft = self._get_service_call_first_timestamps_collection()
if len(res) > 0:
scft.insert_many(res.to_dict('records'))
def update_first_timestamps(self, field, value, service_calls=None):
scft = self._get_service_call_first_timestamps_collection()
scft.update({"$or": service_calls.to_dict(orient="records")}, {"$set": {field: value}}, upsert=False, multi=True)
def update_first_train_retrain_timestamps(self, sc_first_model, sc_second_model, current_time):
if len(sc_first_model) > 0:
self.update_first_timestamps(field="first_model_train_timestamp",
value=current_time,
service_calls=sc_first_model[self._config.service_call_fields])
if len(sc_second_model) > 0:
self.update_first_timestamps(field="first_model_retrain_timestamp",
value=current_time,
service_calls=sc_second_model[self._config.service_call_fields])
def _aggregate_data_for_failed_request_ratio_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
filter_dict_elems = [{'correctorStatus': 'done'}]
# conditions to filter the data before processing
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
else:
filter_dict = {}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['succeeded'] = '$succeeded'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {
"_id": group_dict,
'count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_failed_request_ratio_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def _aggregate_data_for_duplicate_message_id_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
group_dict['messageId'] = '$messageId'
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'message_id_count': {'$sum': 1},
"request_ids": {"$push": "$_id"}}},
{'$match': {'message_id_count': {"$gt": 1}}}],
allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_duplicate_message_id_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def _aggregate_data_for_time_sync_model(self, relevant_metric, threshold, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):
# create connection
clean_data = self._get_clean_data_collection()
# nested fields need to be projected (select field from client if, exists, else from producer)
project_dict = self._get_clean_data_projection_dict()
# conditions to filter the data before processing
filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]
if len(ids_to_exclude) > 0:
id_exclude_query = {'_id': {'$nin': ids_to_exclude}}
filter_dict_elems.append(id_exclude_query)
if start_time is not None:
start_time_query = {self._config.timestamp_field: {"$gte": start_time}}
filter_dict_elems.append(start_time_query)
if end_time is not None:
end_time_query = {self._config.timestamp_field: {"$lt": end_time}}
filter_dict_elems.append(end_time_query)
if len(filter_dict_elems) == 1:
filter_dict = filter_dict_elems[0]
elif len(filter_dict_elems) > 1:
filter_dict = {"$and": filter_dict_elems}
# set up elements to group by (service call fields and temporal aggregation window)
group_dict = {col: "$%s" % col for col in self._config.service_call_fields}
group_dict[self._config.timestamp_field] = {
"$subtract": [
"$%s" % self._config.timestamp_field,
{"$mod": ["$%s" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}
]}
res = clean_data.aggregate([
{'$project': project_dict},
{'$match': filter_dict},
{'$group': {"_id": group_dict,
'request_count': {'$sum': 1},
"docs": {"$push":
{relevant_metric: "$%s" % relevant_metric,
"id": "$_id"}}}},
{"$unwind": "$docs"},
{'$match': {'docs.%s' % relevant_metric: {"$lt": threshold}}},
{'$group': {"_id": "$_id",
'erroneous_count': {'$sum': 1},
'avg_erroneous_diff': {'$avg': '$docs.%s' % relevant_metric},
"request_count": {"$first": "$request_count"},
"request_ids": {"$push": "$docs.id"}}}
], allowDiskUse=True, maxTimeMS=14400000)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_start ")
results = []
for item_tmp in res:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " _aggregate_data_for_time_sync_model_end ")
# _tmp_
# return self._generate_dataframe(list(res))
return self._generate_dataframe(results)
def get_request_ids_from_incidents(self, incident_status=["new", "showed", "normal", "incident", "viewed"],
relevant_anomalous_metrics=None, max_incident_creation_timestamp=None):
filter_dict = {"incident_status": {"$in": incident_status}}
if relevant_anomalous_metrics is not None:
filter_dict["anomalous_metric"] = {"$in": relevant_anomalous_metrics}
if max_incident_creation_timestamp is not None:
filter_dict["incident_creation_timestamp"] = {"$lte": max_incident_creation_timestamp}
incident_collection = self._get_incident_collection()
# request_ids = incident_collection.distinct("request_ids", filter_dict)
request_ids = [doc['_id'] for doc in incident_collection.aggregate([{'$match': filter_dict}, {'$group': {'_id': '$request_ids'}}], allowDiskUse=True)]
return request_ids
def delete_incidents(self, field=None, value=None):
incident_collection = self._get_incident_collection()
if field is None or value is None:
incident_collection.delete_many({})
else:
incident_collection.delete_many({field: value})
def insert_incidents(self, dt_incidents):
incident_collection = self._get_incident_collection()
incident_collection.insert_many(dt_incidents.to_dict('records'))
def get_timestamp(self, ts_type, model_type):
ts_collection = self._get_incident_timestamp_collection()
ts = ts_collection.find_one({"type": ts_type, "model": model_type})
if ts:
return ts["timestamp"]
return ts
def load_model(self, model_name, version=None):
incident_model_collection = self._get_incident_model_collection()
filter_dict = {"model_name": model_name}
if version is not None:
filter_dict["version"] = version
result = incident_model_collection.find(filter_dict)
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_start ")
results = []
for item_tmp in result:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " load_model_end ")
# _tmp_
# return pd.DataFrame(list(result)).drop("_id", axis=1)
return pd.DataFrame(results).drop("_id", axis=1)
def save_model(self, df, delete_old_version=True):
incident_model_collection = self._get_incident_model_collection()
df = df.to_dict('records')
if delete_old_version and len(df) > 0:
model_name = df[0]["model_name"]
incident_model_collection.delete_many({"model_name": model_name})
incident_model_collection.insert_many(df)
def set_timestamp(self, ts_type, model_type, value):
ts_collection = self._get_incident_timestamp_collection()
ts_collection.update({"type": ts_type, "model": model_type},
{"type": ts_type, "model": model_type, "timestamp": value},
upsert=True)
def get_first_timestamps_for_service_calls(self):
scft = self._get_service_call_first_timestamps_collection()
# results = list(scft.find())
# _tmp_
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start1 ")
results = []
results_tmp = scft.find()
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_start2 ")
for item_tmp in results_tmp:
# print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls " + str(item_tmp))
results.append(item_tmp)
print(datetime.datetime.now().strftime('%H:%M:%s') + " get_first_timestamps_for_service_calls_end ")
# _tmp_
if len(results) == 0:
return pd.DataFrame()
data = pd.DataFrame(results).drop("_id", axis=1)
for col in ["first_request_timestamp", "first_model_train_timestamp", "first_incident_timestamp",
"first_model_retrain_timestamp"]:
data.loc[:, col] = pd.to_datetime(data.loc[:, col])
return data
def get_service_calls_for_train_stages(self, time_first_model, time_second_model):
first_timestamps = self.get_first_timestamps_for_service_calls()
if len(first_timestamps) == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
first_model_to_be_trained = first_timestamps[(pd.isnull(first_timestamps.first_model_train_timestamp)) &
(first_timestamps.first_request_timestamp <= time_first_model)]
model_to_be_retrained = first_timestamps[(pd.isnull(first_timestamps.first_model_retrain_timestamp)) &
(first_timestamps.first_incident_timestamp <= time_second_model)]
first_timestamps = first_timestamps[~pd.isnull(first_timestamps.first_model_retrain_timestamp)]
return first_timestamps, first_model_to_be_trained, model_to_be_retrained
def get_service_calls_for_transform_stages(self):
first_timestamps = self.get_first_timestamps_for_service_calls()
first_incidents_to_be_reported = first_timestamps[(pd.isnull(first_timestamps.first_incident_timestamp)) &
(~pd.isnull(first_timestamps.first_model_train_timestamp))]
regular_service_calls = first_timestamps[~pd.isnull(first_timestamps.first_incident_timestamp)]
return regular_service_calls, first_incidents_to_be_reported
def get_data_for_train_stages(self, sc_regular, sc_first_model, sc_second_model, relevant_anomalous_metrics,
max_incident_creation_timestamp, last_fit_timestamp, agg_minutes, max_request_timestamp):
# exclude requests that are part of a "true" incident
ids_to_exclude = self.get_request_ids_from_incidents(
incident_status=["incident"],
relevant_anomalous_metrics=relevant_anomalous_metrics,
max_incident_creation_timestamp=max_incident_creation_timestamp)
# make the timestamps correspond to the millisecond format
if max_request_timestamp is not None:
max_request_timestamp = max_request_timestamp.timestamp() * 1000
if last_fit_timestamp is not None:
last_fit_timestamp = last_fit_timestamp.timestamp() * 1000
data_regular = pd.DataFrame()
data_first_train = pd.DataFrame()
data_first_retrain = pd.DataFrame()
# for the first-time training, don't exclude anything
if len(sc_first_model) > 0:
if len(sc_first_model) > 100:
data_first_train = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp)
if len(data_first_train) > 0:
data_first_train = data_first_train.merge(sc_first_model[self._config.service_call_fields])
else:
data_first_train = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=max_request_timestamp,
service_calls=sc_first_model[self._config.service_call_fields])
# for the second model, exclude queries that were marked as "incident" after the first training,
# but don't limit the start time
if len(sc_second_model) > 0:
if len(sc_second_model) > 100:
data_first_retrain = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_first_retrain) > 0:
data_first_retrain = data_first_retrain.merge(sc_second_model[self._config.service_call_fields])
else:
data_first_retrain = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
service_calls=sc_second_model[self._config.service_call_fields],
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
# for regular training, exclude the incidents and limit the start time
if len(sc_regular) > 0:
data_regular = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
start_time=last_fit_timestamp,
end_time=max_request_timestamp,
ids_to_exclude=ids_to_exclude)
if len(data_regular) > 0:
data_regular = data_regular.merge(sc_regular[self._config.service_call_fields])
return data_regular, data_first_train, data_first_retrain
def get_data_for_transform_stages(self, agg_minutes, last_transform_timestamp, current_transform_timestamp,
sc_regular, sc_first_incidents):
data_regular = pd.DataFrame()
data_first_incidents = pd.DataFrame()
# retrieve all data that have appeared after the last transform time
data = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
start_time=last_transform_timestamp,
end_time=current_transform_timestamp)
if len(data) > 0:
# exclude service calls that are not past the training period
data_regular = data.merge(sc_regular[self._config.service_call_fields])
if len(sc_first_incidents) > 100:
# for first-time incdent reporting, retrieve all data for these service calls
data_first_incidents = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,
end_time=current_transform_timestamp)
if len(data_first_incidents) > 0:
data_first_incidents = data_first_incidents.merge(sc_first_incidents[self._config.service_call_fields])
elif len(sc_first_incidents) > 0:
data_first_incidents = self.aggregate_data_for_historic_averages_model(
agg_minutes=agg_minutes,
end_time=current_transform_timestamp,
service_calls=sc_first_incidents[self._config.service_call_fields])
return pd.concat([data_regular, data_first_incidents])
def _get_incident_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident
def _get_incident_model_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_model
def _get_incident_timestamp_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.incident_timestamps
def _get_service_call_first_timestamps_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_AD]
return db.service_call_first_timestamps
def _get_clean_data_collection(self):
db_client = MongoClient(self._db_config.MONGODB_URI)
db = db_client[self._db_config.MONGODB_QD]
return db.clean_data
def _get_clean_data_projection_dict(self):
project_dict = {col: {"$ifNull": ["$client.%s" % col, "$producer.%s" % col]}
for col in self._config.relevant_cols_nested}
for col, field1, field2 in self._config.relevant_cols_general_alternative:
project_dict[col] = {"$ifNull": ["$%s" % field1, "$%s" % field2]}
for col in self._config.relevant_cols_general:
project_dict[col] = "$%s" % col
return project_dict
def _generate_dataframe(self, result):
data = pd.DataFrame(result)
if len(data) > 0:
data = pd.concat([data, pd.DataFrame(list(data["_id"]))], axis=1)
data = data.drop(["_id"], axis=1)
data.loc[:, self._config.timestamp_field] = pd.to_datetime(data.loc[:, self._config.timestamp_field], unit='ms')
for col in self._config.service_call_fields:
data.loc[:, col] = data.loc[:, col].fillna("-")
return data
| 50.876923 | 158 | 0.629977 | [
"MIT"
] | JanRaik/X-Road-opmonitor | analysis_module/analyzer/AnalyzerDatabaseManager_tmp.py | 29,763 | Python |
import asyncio
from typing import List
import pytest
from hddcoin.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from hddcoin.full_node.mempool_manager import MempoolManager
from hddcoin.simulator.simulator_protocol import FarmNewBlockProtocol
from hddcoin.types.blockchain_format.coin import Coin
from hddcoin.types.blockchain_format.sized_bytes import bytes32
from hddcoin.types.peer_info import PeerInfo
from hddcoin.util.ints import uint16, uint32, uint64
from hddcoin.wallet.cc_wallet.cc_utils import cc_puzzle_hash_for_inner_puzzle_hash
from hddcoin.wallet.cc_wallet.cc_wallet import CCWallet
from hddcoin.wallet.puzzles.cc_loader import CC_MOD
from hddcoin.wallet.transaction_record import TransactionRecord
from hddcoin.wallet.wallet_coin_record import WalletCoinRecord
from tests.setup_nodes import setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
async def tx_in_pool(mempool: MempoolManager, tx_id: bytes32):
tx = mempool.get_spendbundle(tx_id)
if tx is None:
return False
return True
class TestCCWallet:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_colour_creation(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
@pytest.mark.asyncio
async def test_cc_spend(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet.generate_signed_transaction([uint64(60)], [cc_2_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 40)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 40)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 60)
cc_hash = await cc_wallet.get_new_inner_hash()
tx_record = await cc_wallet_2.generate_signed_transaction([uint64(15)], [cc_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 55)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 55)
@pytest.mark.asyncio
async def test_get_wallet_for_colour(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
colour = cc_wallet.get_colour()
assert await wallet_node.wallet_state_manager.get_wallet_for_colour(colour) == cc_wallet
@pytest.mark.asyncio
async def test_generate_zero_val(self, two_wallet_nodes):
num_blocks = 4
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
await asyncio.sleep(1)
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
await asyncio.sleep(1)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
spend_bundle = await cc_wallet_2.generate_zero_val_coin()
await asyncio.sleep(1)
await time_out_assert(15, tx_in_pool, True, full_node_api.full_node.mempool_manager, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
async def unspent_count():
unspent: List[WalletCoinRecord] = list(
await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
)
return len(unspent)
await time_out_assert(15, unspent_count, 1)
unspent: List[WalletCoinRecord] = list(
await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
)
assert unspent.pop().coin.amount == 0
@pytest.mark.asyncio
async def test_cc_spend_uncoloured(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet.generate_signed_transaction([uint64(60)], [cc_2_hash])
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 40)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 40)
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 60)
cc2_ph = await cc_wallet_2.get_new_cc_puzzle_hash()
tx_record = await wallet.wallet_state_manager.main_wallet.generate_signed_transaction(10, cc2_ph, 0)
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
id = cc_wallet_2.id()
wsm = cc_wallet_2.wallet_state_manager
await time_out_assert(15, wsm.get_confirmed_balance_for_wallet, 70, id)
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 60)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 60)
@pytest.mark.asyncio
async def test_cc_spend_multiple(self, three_wallet_nodes):
num_blocks = 3
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet_0.get_confirmed_balance, funds)
cc_wallet_0: CCWallet = await CCWallet.create_new_cc(wallet_node_0.wallet_state_manager, wallet_0, uint64(100))
tx_queue: List[TransactionRecord] = await wallet_node_0.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 100)
assert cc_wallet_0.cc_info.my_genesis_checker is not None
colour = cc_wallet_0.get_colour()
cc_wallet_1: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_1.wallet_state_manager, wallet_1, colour
)
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_2.wallet_state_manager, wallet_2, colour
)
assert cc_wallet_0.cc_info.my_genesis_checker == cc_wallet_1.cc_info.my_genesis_checker
assert cc_wallet_0.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
cc_1_hash = await cc_wallet_1.get_new_inner_hash()
cc_2_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet_0.generate_signed_transaction([uint64(60), uint64(20)], [cc_1_hash, cc_2_hash])
await wallet_0.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 20)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 20)
await time_out_assert(30, cc_wallet_1.get_confirmed_balance, 60)
await time_out_assert(30, cc_wallet_1.get_unconfirmed_balance, 60)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 20)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 20)
cc_hash = await cc_wallet_0.get_new_inner_hash()
tx_record = await cc_wallet_1.generate_signed_transaction([uint64(15)], [cc_hash])
await wallet_1.wallet_state_manager.add_pending_transaction(tx_record)
tx_record_2 = await cc_wallet_2.generate_signed_transaction([uint64(20)], [cc_hash])
await wallet_2.wallet_state_manager.add_pending_transaction(tx_record_2)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record_2.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet_0.get_confirmed_balance, 55)
await time_out_assert(15, cc_wallet_0.get_unconfirmed_balance, 55)
await time_out_assert(30, cc_wallet_1.get_confirmed_balance, 45)
await time_out_assert(30, cc_wallet_1.get_unconfirmed_balance, 45)
await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 0)
await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_cc_max_amount_send(self, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100000))
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100000)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100000)
assert cc_wallet.cc_info.my_genesis_checker is not None
cc_2_hash = await cc_wallet.get_new_inner_hash()
amounts = []
puzzle_hashes = []
for i in range(1, 50):
amounts.append(uint64(i))
puzzle_hashes.append(cc_2_hash)
spent_coint = (await cc_wallet.get_cc_spendable_coins())[0].coin
tx_record = await cc_wallet.generate_signed_transaction(amounts, puzzle_hashes, coins={spent_coint})
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(2)
async def check_all_there():
spendable = await cc_wallet.get_cc_spendable_coins()
spendable_name_set = set()
for record in spendable:
spendable_name_set.add(record.coin.name())
puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, cc_wallet.cc_info.my_genesis_checker, cc_2_hash)
for i in range(1, 50):
coin = Coin(spent_coint.name(), puzzle_hash, i)
if coin.name() not in spendable_name_set:
return False
return True
await time_out_assert(15, check_all_there, True)
await asyncio.sleep(5)
max_sent_amount = await cc_wallet.get_max_send_amount()
# 1) Generate transaction that is under the limit
under_limit_tx = None
try:
under_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount - 1],
[ph],
)
except ValueError:
assert ValueError
assert under_limit_tx is not None
# 2) Generate transaction that is equal to limit
at_limit_tx = None
try:
at_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount],
[ph],
)
except ValueError:
assert ValueError
assert at_limit_tx is not None
# 3) Generate transaction that is greater than limit
above_limit_tx = None
try:
above_limit_tx = await cc_wallet.generate_signed_transaction(
[max_sent_amount + 1],
[ph],
)
except ValueError:
pass
assert above_limit_tx is None
| 43.312727 | 120 | 0.702166 | [
"Apache-2.0"
] | HDDcoin-Network/hddcoin-blockchain | tests/wallet/cc_wallet/test_cc_wallet.py | 23,822 | Python |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 11:37:06 2018
@author: dkorff
"""
import numpy as np
import cantera as ct
from assimulo.problem import Implicit_Problem
from li_ion_battery_p2d_init import anode as an
from li_ion_battery_p2d_init import cathode as cat
from li_ion_battery_p2d_init import separator as sep
from li_ion_battery_p2d_init import Inputs
from li_ion_battery_p2d_init import anode_obj as anode
from li_ion_battery_p2d_init import anode_surf_obj as anode_s
from li_ion_battery_p2d_init import elyte_obj as elyte
from li_ion_battery_p2d_init import cathode_surf_obj as cathode_s
from li_ion_battery_p2d_init import cathode_obj as cathode
from li_ion_battery_p2d_init import conductor_obj as conductor
class Extended_Problem(Implicit_Problem):
sw0 = True
def Battery_Func(t, SV, SV_dot, sw):
"""================================================================="""
"""==========================INITIALIZE============================="""
"""================================================================="""
print(t)
nSV = len(SV)
res = np.zeros([nSV])
offset_vec = sep.offsets
""" anode = an.obj['electrode']
anode_s = an.obj['surf']
elyte = an.obj['elyte']
cathode = cat.obj['electrode']
cathode_s = cat.obj['surf']"""
nsp_an = anode.n_species; nsp_cat = cathode.n_species
F = ct.faraday; R = ct.gas_constant; T = Inputs.T
#sigma_eff_an = an.params['sigma_eff_ed']; dyInv = an.geom['dyInv']
#u_Li_elyte = an.params['u_Li_elyte']; D_Li_an = an.params['D_Li_ed']
#dr = an.dr
# %%
"""================================================================="""
"""============================ANODE================================"""
"""================================================================="""
# --------------------------------
# ANODE CURRENT COLLECTOR BOUNDARY
# --------------------------------
# Looking at node 1, j=0, set THIS node conditions
offset = an.offsets
ptr = an.ptr
j = 0
N_io_m = 0
i_io_m = 0
i_el_m = an.i_ext
X_an_1 = SV[offset[j] + ptr['X_ed'][-1]]
rho_k_elyte_1 = SV[offset[j] + ptr['rho_k_elyte']]
phi_elec_an_1 = SV[offset[j] + ptr['Phi_ed']]
phi_elec_elyte_1 = phi_elec_an_1 - SV[offset[j] + ptr['Phi_dl']]
anode.X = [X_an_1, 1-X_an_1]
anode.electric_potential = phi_elec_an_1
conductor.electric_potential = phi_elec_an_1
#elyte.TDY = Inputs.T, np.sum(rho_k_elyte_1), rho_k_elyte_1
elyte.Y = rho_k_elyte_1/np.sum(rho_k_elyte_1)
elyte.electric_potential = phi_elec_elyte_1
sdot_1 = anode_s.net_production_rates
# Shift forward to node 2, j=1, set NEXT node conditions
j = 1; offset = int(offset_vec[j])
X_an_2 = SV[offset + an.ptr['X_ed'][-1]]
rho_k_elyte_2 = SV[offset + an.ptr['rho_k_elyte']]
phi_elec_an_2 = SV[offset + an.ptr['Phi_ed']]
phi_elec_elyte_2 = phi_elec_an_2 - SV[offset + an.ptr['Phi_dl']]
anode.X = [X_an_2, 1-X_an_2]
conductor.electric_potential = phi_elec_an_2
anode.electric_potential = phi_elec_an_2
#elyte.TDY = Inputs.T, np.sum(rho_k_elyte_2), rho_k_elyte_2
elyte.Y = rho_k_elyte_2/np.sum(rho_k_elyte_2)
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
# Shift back to node 1, j=0, set THIS node outlet conditions
j = 0; offset = int(offset_vec[j])
i_el_p = an.sigma_eff_ed*(phi_elec_an_1-phi_elec_an_2)*an.dyInv
N_io_p = (-an.u_Li_elyte*elyte.density_mole*(R*T*(rho_k_elyte_2 - rho_k_elyte_1)
+ F*(phi_elec_elyte_2 - phi_elec_elyte_1))*an.dyInv)
i_io_p = np.dot(N_io_p,Inputs.z_k_elyte)*F
i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv
X_Li = 1 - SV[offset + an.ptr['X_ed']]
DiffFlux = np.zeros([an.nshells+1])
DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr
DiffFlux[-1] = sdot_1[0]/anode.density_mole
k_m = np.arange(0, an.nshells)/an.nshells
k_p = np.arange(1, an.nshells+1)/an.nshells
# print(anode_s.forward_rate_constants, phi_elec_an_1, sdot_1[an.ptr['iFar']])
"""Calculate the change in X_C6 in the particle interior.
Note that the DiffFlux is the diffusion of lithium
toward the particle surface, and that diffusion of Li
into the shell decreases the amount of C6. The fluxes
must be scaled by the shell interface surface area
relative to the total particle surface area"""
res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]
- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
* an.A_surf/an.eps_ed/an.V_shell))
"""Change in electrolyte_composition"""
res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]
- (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)
/elyte.density_mole/an.eps_elyte))
"""Double-layer voltage"""
res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]
- (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)
"""Algebraic equation for ANODE electric potential boundary condition"""
res[offset + an.ptr['Phi_ed']] = SV[offset + an.ptr['Phi_ed']]
# (i_el_m - i_el_p + i_io_m - i_io_p)
# SV_dot[offset + an.ptr['V_ed']]
# %%
"""============================ANODE================================"""
"""INTERIOR NODES"""
for j in np.arange(2, an.npoints):
# Save previous node outlet conditions as new inlet conditions
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
# Shift forward to NEXT node
offset = int(an.offsets[j])
X_an_2 = SV[offset + an.ptr['X_ed'][-1]]
rho_k_elyte_2 = SV[offset + an.ptr['rho_k_elyte']]
phi_elec_an_2 = SV[offset + an.ptr['Phi_ed']]
phi_elec_elyte_2 = phi_elec_an_2 - SV[offset + an.ptr['Phi_dl']]
anode.X = [X_an_2, 1-X_an_2]
anode.electric_potential = phi_elec_an_2
conductor.electric_potential = phi_elec_an_2
elyte.Y = rho_k_elyte_2/np.sum(rho_k_elyte_2)
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
# Shift back to THIS node, set THIS node outlet conditions
offset = int(an.offsets[j - 1])
i_el_p = an.sigma_eff_ed*(phi_elec_an_1-phi_elec_an_2)*an.dyInv
N_io_p = (-an.u_Li_elyte*elyte.density_mole*(R*T*(rho_k_elyte_2 - rho_k_elyte_1)
+ F*(phi_elec_elyte_2 - phi_elec_elyte_1))*an.dyInv)
i_io_p = np.dot(N_io_p,Inputs.z_k_elyte)*F
i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv
X_Li = 1 - SV[offset + an.ptr['X_ed']]
DiffFlux = np.zeros([an.nshells+1])
DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr
DiffFlux[-1] = sdot_1[0]/anode.density_mole
"""Calculate the change in X_C6 in the particle interior."""
res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]
- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
* an.A_surf/an.eps_ed/an.V_shell))
"""Change in electrolyte_composition"""
res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]
- (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)
/elyte.density_mole/an.eps_elyte))
"""Double-layer voltage"""
res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]
- (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)
"""Algebraic equation for ANODE electric potential boundary condition"""
res[offset + an.ptr['Phi_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)
# %%
"""============================ANODE================================"""
"""Separator boundary"""
# Save previous node outlet conditions as new inlet conditions
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
# Shift forward to NEXT node (first separator node)
# j = an.npoints; offset = int(offset_vec[j])
#
# X_elyte_2 = SV[offset + sep.ptr['X_elyte']]
#
# phi_elec_elyte_2 = SV[offset + sep.ptr['V_elyte']]
# Shift back to THIS node, set THIS node outlet conditions
i_el_p = 0
# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)
# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*dyInv)
#
# i_io_p = N_io_p*F
# Set j to final ANODE node
j = an.npoints-1; offset = int(an.offsets[j])
i_Far_1 = sdot_1[an.ptr['iFar']]*F*an.A_surf/an.dyInv
i_io_p = an.i_ext
#THIS IS TEMPORARY, NON-GENERALIZED CODE:
N_io_p = np.zeros_like(N_io_p)
N_io_p[2] = i_io_p/F
X_Li = 1 - SV[offset + an.ptr['X_ed']]
DiffFlux = np.zeros([an.nshells+1])
DiffFlux[1:-1] = an.D_Li_ed*(X_Li[0:-1] - X_Li[1:])/an.dr
DiffFlux[-1] = sdot_1[0]/anode.density_mole
"""Calculate the change in X_C6 in the particle interior."""
res[offset + an.ptr['X_ed']] = (SV_dot[offset + an.ptr['X_ed']]
- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
* an.A_surf/an.eps_ed/an.V_shell))
"""Change in electrolyte_composition"""
res[offset + an.ptr['rho_k_elyte']] = (SV_dot[offset + an.ptr['rho_k_elyte']]
- (((N_io_m - N_io_p)*an.dyInv + sdot_1[nsp_an]*an.A_surf)
/elyte.density_mole/an.eps_elyte))
"""Double-layer voltage"""
res[offset + an.ptr['Phi_dl']] = (SV_dot[offset + an.ptr['Phi_dl']]
- (i_Far_1 + i_el_m - i_el_p)*an.dyInv/an.C_dl/an.A_surf)
"""Algebraic equation for ANODE electric potential boundary condition"""
res[offset + an.ptr['Phi_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)
# %%
"""================================================================="""
"""==========================SEPARATOR=============================="""
"""================================================================="""
# for j in np.arange(an.npoints+1, sep.sep_max):
#
# X_elyte_1 = X_elyte_2
# phi_elec_elyte_1 = phi_elec_elyte_2
# N_io_m = N_io_p
# i_io_m = i_io_p
# Shift forward to NEXT node
# offset = int(offset_vec[j])
#
# X_elyte_2 = SV[offset + sep.ptr['X_elyte']]
# phi_elec_elyte_2 = SV[offset + sep.ptr['V_elyte']]
# Step back to THIS node to calculate outlet flux
# offset = int(offset_vec[j-1])
# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)
# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*sep.geom['dyInv'])
#
# i_io_p = N_io_p*F
# i_io_p = an.params['i_ext']
# N_io_p = i_io_p/F
#
# """Change in electrolyte_composition"""
# res[offset + sep.ptr['X_elyte']] = (SV_dot[offset + sep.ptr['X_elyte']]
# - (((N_io_m - N_io_p)*dyInv)/elyte.density_mole/sep.geom['phi_elyte']))
#
# """Charge neutrality enforced"""
# res[offset + sep.ptr['V_elyte']] = (i_io_m - i_io_p)
# %%
# Looking at LAST node in separator
# X_elyte_1 = X_elyte_2
# phi_elec_elyte_1 = phi_elec_elyte_2
# N_io_m = N_io_p
# i_io_m = i_io_p
# Shift forward to NEXT node, first cathode node
# j = sep.sep_max; offset = int(offset_vec[j])
#
# X_cat_2 = SV[offset + cat.ptr['X_ed'][-1]]
# X_elyte_2 = SV[offset + cat.ptr['X_elyte']]
#
# phi_elec_cat_2 = SV[offset + cat.ptr['V_ed']]
# phi_elec_elyte_2 = phi_elec_cat_2 - SV[offset + cat.ptr['V_dl']]
#
# cathode.X = [1-X_cat_2, X_cat_2]
# cathode.electric_potential = phi_elec_cat_2
#
# elyte.X = [X_elyte_2, 7.8730103237e-2, 2.8328131770e-1]
# elyte.electric_potential = phi_elec_elyte_2
#
# sdot_2 = cathode_s.net_production_rates
# Shift back to THIS node (last separator node)
# j = sep.sep_max-1; offset = int(offset_vec[j])
#
# i_el_p = 0
# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)
# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*sep.geom['dyInv'])
#
# i_io_p = N_io_p*F
# i_io_p = an.params['i_ext']
# N_io_p = i_io_p/F
#
# """Change in electrolyte_composition"""
# res[offset + sep.ptr['X_elyte']] = (SV_dot[offset + sep.ptr['X_elyte']]
# - (((N_io_m - N_io_p)*dyInv)/elyte.density_mole/sep.geom['phi_elyte']))
#
# """Charge neutrality enforced"""
# res[offset + sep.ptr['V_elyte']] = (i_io_m - i_io_p)
# print(SV, res)
# SV[offset + sep.ptr['V_elyte']]
# (i_io_m - i_io_p)
# %%
"""================================================================="""
"""===========================CATHODE==============================="""
"""================================================================="""
# Alrighty, CATHODE time
# sigma_eff_cat = cat.params['sigma_eff_ed']; dyInv = cat.geom['dyInv']
# D_Li_cat = cat.params['D_Li_ed']
#
# i_io_m = i_io_p
# N_io_m = N_io_p
# i_el_m = i_el_p
# X_cat_1 = X_cat_2
# X_elyte_1 = X_elyte_2
# phi_elec_cat_1 = phi_elec_cat_2
# phi_elec_elyte_1 = phi_elec_elyte_2
# sdot_1 = sdot_2
# j = sep.cat_max-1; offset = int(offset_vec[j])
# i_el_p = -an.params['i_ext']
# i_io_p = 0
# N_io_p = i_io_p/F
#
# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv
# print(cathode_s.forward_rate_constants, phi_elec_cat_1, sdot_1[cat.ptr['iFar']])
# X_Li = 1 - SV[offset + cat.ptr['X_ed']]
# DiffFlux = np.zeros([cat.nshells+1])
# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr
# DiffFlux[-1] = sdot_1[0]/cathode.density_mole
#
# """Calculate the change in CoO2 in the particle interior."""
# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']]
# - ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))
#
# """Change in electrolyte_composition"""
# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']]
# - (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])
# /elyte.density_mole/cat.geom['phi_elyte']))
#
# """Double-layer voltage"""
# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]
# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])
#
# """Algebraic equation for CATHODE electric potential boundary condition"""
# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)
# print(SV, res)
# for j in np.arange(an.npoints + sep.npoints, sep.cat_max-1):
# N_io_m = N_io_p
# i_io_m = i_io_p
# i_el_m = i_el_p
# X_cat_1 = X_cat_2
# X_elyte_1 = X_elyte_2
# phi_elec_cat_1 = phi_elec_cat_2
# phi_elec_elyte_1 = phi_elec_elyte_2
# sdot_1 = sdot_2
#
# # Look at NEXT node
# offset = int(offset_vec[j])
#
# X_cat_2 = SV[offset + cat.ptr['X_ed'][-1]]
# X_elyte_2 = SV[offset + cat.ptr['X_elyte']]
#
# phi_elec_cat_2 = SV[offset + cat.ptr['V_ed']]
# phi_elec_elyte_2 = phi_elec_cat_2 - SV[offset + cat.ptr['V_dl']]
#
# cathode.X = [1-X_cat_2, X_cat_2]
# cathode.electric_potential = phi_elec_cat_2
#
# elyte.X = [X_elyte_2, 1-X_elyte_2]
# elyte.electric_potential = phi_elec_elyte_2
#
# sdot_2 = cathode_s.net_production_rates
#
# # Shift back to THIS node, set THIS node outlet conditions
# offset = int(offset_vec[j-1])
#
# i_el_p = sigma_eff_cat*(phi_elec_cat_1 - phi_elec_cat_2)*dyInv
#
# N_io_p = (-u_Li_elyte*elyte.density_mole*(R*T*(X_elyte_2 - X_elyte_1)
# + F*(phi_elec_elyte_2 - phi_elec_elyte_1))*dyInv)
#
# i_io_p = N_io_p*F
#
# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv
#
# X_Li = 1 - SV[offset + cat.ptr['X_ed']]
# DiffFlux = np.zeros([cat.nshells+1])
# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr
# DiffFlux[-1] = sdot_1[1]/cathode.density_mole
#
# """Calculate the change in CoO2 in the particle interior."""
# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']])
# """- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))"""
#
# """Change in electrolyte_composition"""
# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']])
# """- (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])
# /elyte.density_mole/cat.geom['phi_elyte']))"""
#
# """Double-layer voltage"""
# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]
# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])
#
# """Algebraic equation for CATHODE electric potential boundary condition"""
# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)
# %%
"""=========================CATHODE============================="""
"""current collector boundary"""
# N_io_m = N_io_p
# i_io_m = i_io_p
# i_el_m = i_el_p
# X_cat_1 = X_cat_2
# X_elyte_1 = X_elyte_2
# phi_elec_cat_1 = phi_elec_cat_2
# phi_elec_elyte_1 = phi_elec_elyte_2
# sdot_1 = sdot_2
#
# # Set THIS node outlet conditions (last node BCs)
# j = sep.cat_max - 1; offset = int(offset_vec[j])
# i_io_p = 0
# N_io_p = 0
# i_el_p = cat.params['i_ext']
#
# i_Far_1 = sdot_1[cat.ptr['iFar']]*F*cat.geom['A_surf']/dyInv
#
# X_Li = 1 - SV[offset + cat.ptr['X_ed']]
# DiffFlux = np.zeros([cat.nshells+1])
# DiffFlux[1:-1] = D_Li_cat*(X_Li[0:-1] - X_Li[1:])/dr
# DiffFlux[-1] = sdot_1[1]/cathode.density_mole
#
# """Calculate the change in CoO2 in the particle interior."""
# res[offset + cat.ptr['X_ed']] = (SV_dot[offset + cat.ptr['X_ed']])
# """- ((DiffFlux[1:]*k_p**2 - DiffFlux[0:-1]*k_m**2)
# * cat.geom['A_surf']/cat.geom['phi_ed']/cat.params['V_shell']))"""
#
# """Change in electrolyte_composition"""
# res[offset + cat.ptr['X_elyte']] = (SV_dot[offset + cat.ptr['X_elyte']])
# """- (((N_io_m - N_io_p)*dyInv + sdot_1[nsp_cat]*cat.geom['A_surf'])
# /elyte.density_mole/cat.geom['phi_elyte']))"""
#
# """Double-layer voltage"""
# res[offset + cat.ptr['V_dl']] = (SV_dot[offset + cat.ptr['V_dl']]
# - (i_Far_1 + i_el_m - i_el_p)*dyInv/cat.params['C_dl']/cat.geom['A_surf'])
#
# """Algebraic equation for CATHODE electric potential boundary condition"""
# res[offset + cat.ptr['V_ed']] = (i_el_m - i_el_p + i_io_m - i_io_p)
return res
"""====================================================================="""
"""====================================================================="""
"""====================================================================="""
# %%
def state_events(self, t, y, yd, sw):
event1 = np.zeros([an.params['npoints']])
event2 = np.zeros([an.params['npoints']])
event3 = np.zeros([an.params['nshells']])
event4 = np.zeros([an.params['nshells']])
for j in np.arange(0, an.params['npoints']):
offset = j*an.params['nVars']
event1[j] = (y[offset + an.ptr['V_dl']])
event2[j] = (1 - y[offset + an.ptr['V_dl']])
for i in np.arange(0, an.params['nshells']):
event3[i] = y[offset + an.ptr['X_ed'][i]] - (1 - an.params['X_Li_max'])
event4[i] = (((1 - an.params['X_Li_min']) - y[offset + an.ptr['X_ed'][i]]))
event5 = np.zeros([cat.params['npoints']])
event6 = np.zeros([cat.params['npoints']])
event7 = np.zeros([cat.params['nshells']])
event8 = np.zeros([cat.params['nshells']])
for j in np.arange(0, cat.params['npoints']):
offset = j*cat.params['nVars'] + an.npoints*an.nVars + sep.npoints*sep.nVars
event5[j] = (y[offset + cat.ptr['V_dl']])
event6[j] = (y[offset + cat.ptr['V_dl']] - 5)
for i in np.arange(0, cat.params['nshells']):
event7[i] = y[offset + cat.ptr['X_ed'][i]] - (1 - cat.params['X_Li_max'])
event8[i] = (1 - cat.params['X_Li_min']) - y[offset + cat.ptr['X_ed'][i]]
event9 = np.zeros([sep.npoints])
event10 = np.zeros([sep.npoints])
for j in np.arange(0, sep.npoints):
offset = an.npoints*an.nVars
event9[j] = 1 - y[offset + sep.ptr['X_elyte']]
event10[j] = y[offset + sep.ptr['X_elyte']]
events = np.concatenate((event1, event2, event3, event4, event5, event6,
event7, event8, event9, event10))
return events
"""====================================================================="""
def handle_event(self, solver, event_info):
while True:
self.event_switch(solver, event_info)
self.init_mode(solver)
if not True in event_info:
break
def event_switch(self, solver, event_info):
if not all(event_info):
solver.sw = [not solver.sw]
def init_mode(self, solver):
an.t_flag = solver.t
if an.params['i_ext'] != 0:
an.params['i_ext'] = 0
cat.params['i_ext'] = 0
| 39.375 | 92 | 0.541711 | [
"BSD-3-Clause"
] | coresresearch/p2d_li_ion_battery | li_ion_battery_p2d_functions.py | 22,680 | Python |
import asyncio
import discord
from discord.ext.commands import Bot
from discord.ext import commands
from discord import Color, Embed
import backend.commands as db
from backend import strikechannel
# This command allows players to change their name.
#
# !name [new_name]
#
# This replaces the default nickname changing that Discord provides so
# that their name will also be replaced in the spreadsheet.
class Name(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.strike_channel_id = strikechannel
@commands.command()
async def name(self, ctx):
old_name = ctx.author.display_name
new_name = ctx.message.content[6:]
print(old_name)
print(new_name)
# This changes their name in the "#strikes" channel
channel = self.bot.get_channel(self.strike_channel_id)
async for msg in channel.history(limit=None):
text = msg.content.replace("```", "")
text_lst = text.split("\n")
d = {}
for line in text_lst:
try:
name, strikes = line.rsplit(" - ", 1)
except:
continue
d[name] = int(strikes)
if old_name in d:
d[new_name] = d[old_name]
del d[old_name]
inner_text = ""
for k, v in d.items():
inner_text += f"{k} - {v}\n"
full_text = f"```\n{inner_text}```"
await msg.edit(content=full_text)
db.change_name(old_name, new_name)
await ctx.author.edit(nick=new_name)
await ctx.channel.send("Name Changed!")
def setup(bot):
bot.add_cog(Name(bot))
| 25.537313 | 70 | 0.585038 | [
"Apache-2.0"
] | DaleNaci/AUC | commands/name.py | 1,711 | Python |
import json
import re
from types import FunctionType, LambdaType
_pattern_type = re.compile('t').__class__
class ConversionIndex(object):
def __init__(self, index_name=''):
self.o2t = {}
self.t2o = {}
self.name = index_name
def set_index(self, origin_index, target_index):
if isinstance(origin_index, str):
self.o2t[origin_index] = target_index
self.t2o[target_index] = origin_index
elif isinstance(origin_index, list):
self.o2t['-'.join(origin_index)] = target_index
self.t2o[target_index] = origin_index
def get_origin_by_target(self, target_index):
return self.t2o[target_index]
def is_origin_indexed(self, origin_index):
if isinstance(origin_index, str):
return origin_index in self.o2t
elif isinstance(origin_index, list):
return '-'.join(origin_index) in self.o2t
def get_target_by_origin(self, origin_index):
if isinstance(origin_index, str):
return self.o2t[origin_index]
elif isinstance(origin_index, list):
return self.o2t['-'.join(origin_index)]
else:
return None
def is_target_indexed(self, target_index):
return target_index in self.t2o
def reset(self):
self.o2t = None
self.t2o = None
self.name = None
def dump(self, file_name):
obj = {
'name': self.name,
'o2t': self.o2t,
't2o': self.t2o,
}
with open(file_name, 'w') as fp:
json.dump(obj, fp)
@staticmethod
def load(file_name):
with open(file_name, 'r') as fp:
idx = ConversionIndex()
obj = json.load(fp)
idx.name = obj['name']
idx.o2t = obj['o2t']
idx.t2o = obj['t2o']
return idx
class FieldIndex(object):
def __init__(self, table):
self.table = table
self.indices = {}
def create_index(self, name, **kwargs):
index = {}
keys = [key for key in kwargs.keys() if key in self.table.headers()]
is_callable = {}
is_regex = {}
for key in keys:
index[key] = []
value = kwargs[key]
is_callable[key] = isinstance(value, FunctionType) or isinstance(value, LambdaType)
is_regex[key] = isinstance(value, _pattern_type)
for idx, row in enumerate(self.table):
for key in keys:
value = kwargs[key]
if value:
if is_callable[key]:
index_available = value(idx, row)
elif is_regex[key]:
index_available = bool(value.match(row[key]))
else:
index_available = row[key] == value
if not index_available:
continue
index[key].append(idx)
for key in keys:
index[key] = set(index[key])
self.indices[name] = index
return self
def get_index(self, name, *args):
if len(args) == 0 or name not in self.indices:
return None
index = self.indices[name]
result = index[args[0]]
for key in args[1:]:
result = result.intersection(index[key])
return result
def filter_index(self, name, filters, dict_key=None):
intersect = self.get_index(name, *filters.keys())
is_callable = {}
is_regex = {}
for key in filters:
value = filters[key]
is_callable[key] = isinstance(value, FunctionType) or isinstance(value, LambdaType)
is_regex[key] = isinstance(value, _pattern_type)
if dict_key:
output = {}
else:
output = []
for idx in intersect:
row = self.table.row(idx)
available = True
for key, value in filters.items():
if value:
if is_callable[key]:
available = value(idx, row)
elif is_regex[key]:
available = bool(value.match(row[key]))
else:
available = row[key] == value
if not available:
break
else:
available = False
break
if available:
if dict_key:
if row[dict_key] not in output:
output[row[dict_key]] = []
output[row[dict_key]].append(idx)
else:
output.append(idx)
return output
| 29.7 | 95 | 0.518519 | [
"MIT"
] | chwnam/pymigrator | pymigrator/core/indices.py | 4,752 | Python |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fake HP client for testing LeftHand without installing the client."""
import sys
import mock
from cinder.tests import fake_hp_client_exceptions as hpexceptions
hplefthand = mock.Mock()
hplefthand.version = "1.0.4"
hplefthand.exceptions = hpexceptions
sys.modules['hplefthandclient'] = hplefthand
| 33.413793 | 78 | 0.745098 | [
"Apache-2.0"
] | AO-AO/cmss-cinder | cinder/tests/fake_hp_lefthand_client.py | 969 | Python |
FROM = 'vie-qs'
TO = 'fee-qs'
from_layer = Font.glyphs[FROM].layers[0]
XHEIGHT = Font.masters[0].xHeight
WIDTH = Font.masters[0].widthValue
to_layer = Font.glyphs[TO].layers[0] = from_layer.copyDecomposedLayer()
for path in to_layer.paths:
for node in path.nodes:
# mess with node.x and node.y
node.y = XHEIGHT - node.y
node.x = WIDTH - node.x
node.x += 4*WIDTH
# doesn't seem to work
del(path.nodes[-1]) # get rid of tail
| 23.8 | 71 | 0.632353 | [
"Apache-2.0"
] | adiabatic/abbots-morton-experiment | glyph-generation scripts/vie2fee.py | 476 | Python |
import torch
import numpy as np
class ScheduledOptim:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = np.power(model_config["transformer"]["encoder_hidden"], -0.5)
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
""" Learning rate scheduling per step """
self.current_step += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| 33.230769 | 85 | 0.595486 | [
"MIT"
] | ARBML/klaam | FastSpeech2/model/optimizer.py | 1,728 | Python |
#!/usr/bin/env python
"""
Analyze docstrings to detect errors.
If no argument is provided, it does a quick check of docstrings and returns
a csv with all API functions and results of basic checks.
If a function or method is provided in the form "pandas.function",
"pandas.module.class.method", etc. a list of all errors in the docstring for
the specified function or method.
Usage::
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
import os
import sys
import csv
import re
import functools
import collections
import argparse
import pydoc
import inspect
import importlib
import doctest
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
import numpy
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_PATH))
import pandas
from pandas.compat import signature
sys.path.insert(1, os.path.join(BASE_PATH, 'doc', 'sphinxext'))
from numpydoc.docscrape import NumpyDocString
from pandas.io.formats.printing import pprint_thing
PRIVATE_CLASSES = ['NDFrame', 'IndexOpsMixin']
DIRECTIVES = ['versionadded', 'versionchanged', 'deprecated']
def _load_obj(obj_name):
for maxsplit in range(1, obj_name.count('.') + 1):
# TODO when py3 only replace by: module, *func_parts = ...
func_name_split = obj_name.rsplit('.', maxsplit)
module = func_name_split[0]
func_parts = func_name_split[1:]
try:
obj = importlib.import_module(module)
except ImportError:
pass
else:
continue
if 'module' not in locals():
raise ImportError('No module can be imported '
'from "{}"'.format(obj_name))
for part in func_parts:
obj = getattr(obj, part)
return obj
def _to_original_callable(obj):
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith('<') and f.endswith('>'):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None
def _output_header(title, width=80, char='#'):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = '' if len(title) % 2 == 0 else ' '
title_line = '{side} {title}{adj} {side}'.format(side=char * side_len,
title=title,
adj=adj)
return '\n{full_line}\n{title_line}\n{full_line}\n\n'.format(
full_line=full_line, title_line=title_line)
class Docstring(object):
def __init__(self, method_name, method_obj):
self.method_name = method_name
self.method_obj = method_obj
self.raw_doc = method_obj.__doc__ or ''
self.clean_doc = pydoc.getdoc(self.method_obj)
self.doc = NumpyDocString(self.clean_doc)
def __len__(self):
return len(self.raw_doc)
@property
def is_function_or_method(self):
# TODO(py27): remove ismethod
return (inspect.isfunction(self.method_obj)
or inspect.ismethod(self.method_obj))
@property
def source_file_name(self):
fname = inspect.getsourcefile(self.method_obj)
if fname:
fname = os.path.relpath(fname, BASE_PATH)
return fname
@property
def source_file_def_line(self):
try:
return inspect.getsourcelines(self.method_obj)[-1]
except OSError:
pass
@property
def github_url(self):
url = 'https://github.com/pandas-dev/pandas/blob/master/'
url += '{}#L{}'.format(self.source_file_name,
self.source_file_def_line)
return url
@property
def start_blank_lines(self):
i = None
if self.raw_doc:
for i, row in enumerate(self.raw_doc.split('\n')):
if row.strip():
break
return i
@property
def end_blank_lines(self):
i = None
if self.raw_doc:
for i, row in enumerate(reversed(self.raw_doc.split('\n'))):
if row.strip():
break
return i
@property
def double_blank_lines(self):
prev = True
for row in self.raw_doc.split('\n'):
if not prev and not row.strip():
return True
prev = row.strip()
return False
@property
def summary(self):
return ' '.join(self.doc['Summary'])
@property
def num_summary_lines(self):
return len(self.doc['Summary'])
@property
def extended_summary(self):
if not self.doc['Extended Summary'] and len(self.doc['Summary']) > 1:
return ' '.join(self.doc['Summary'])
return ' '.join(self.doc['Extended Summary'])
@property
def needs_summary(self):
return not (bool(self.summary) and bool(self.extended_summary))
@property
def doc_parameters(self):
return collections.OrderedDict((name, (type_, ''.join(desc)))
for name, type_, desc
in self.doc['Parameters'])
@property
def signature_parameters(self):
if inspect.isclass(self.method_obj):
if hasattr(self.method_obj, '_accessors') and (
self.method_name.split('.')[-1] in
self.method_obj._accessors):
# accessor classes have a signature but don't want to show this
return tuple()
try:
sig = signature(self.method_obj)
except (TypeError, ValueError):
# Some objects, mainly in C extensions do not support introspection
# of the signature
return tuple()
params = sig.args
if sig.varargs:
params.append("*" + sig.varargs)
if sig.keywords:
params.append("**" + sig.keywords)
params = tuple(params)
if params and params[0] in ('self', 'cls'):
return params[1:]
return params
@property
def parameter_mismatches(self):
errs = []
signature_params = self.signature_parameters
doc_params = tuple(self.doc_parameters)
missing = set(signature_params) - set(doc_params)
if missing:
errs.append(
'Parameters {} not documented'.format(pprint_thing(missing)))
extra = set(doc_params) - set(signature_params)
if extra:
errs.append('Unknown parameters {}'.format(pprint_thing(extra)))
if (not missing and not extra and signature_params != doc_params
and not (not signature_params and not doc_params)):
errs.append('Wrong parameters order. ' +
'Actual: {!r}. '.format(signature_params) +
'Documented: {!r}'.format(doc_params))
return errs
@property
def correct_parameters(self):
return not bool(self.parameter_mismatches)
def parameter_type(self, param):
return self.doc_parameters[param][0]
def parameter_desc(self, param):
desc = self.doc_parameters[param][1]
# Find and strip out any sphinx directives
for directive in DIRECTIVES:
full_directive = '.. {}'.format(directive)
if full_directive in desc:
# Only retain any description before the directive
desc = desc[:desc.index(full_directive)]
return desc
@property
def see_also(self):
return collections.OrderedDict((name, ''.join(desc))
for name, desc, _
in self.doc['See Also'])
@property
def examples(self):
return self.doc['Examples']
@property
def returns(self):
return self.doc['Returns']
@property
def yields(self):
return self.doc['Yields']
@property
def method_source(self):
return inspect.getsource(self.method_obj)
@property
def first_line_ends_in_dot(self):
if self.doc:
return self.doc.split('\n')[0][-1] == '.'
@property
def deprecated(self):
pattern = re.compile('.. deprecated:: ')
return (self.method_name.startswith('pandas.Panel') or
bool(pattern.search(self.summary)) or
bool(pattern.search(self.extended_summary)))
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@property
def examples_errors(self):
flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
finder = doctest.DocTestFinder()
runner = doctest.DocTestRunner(optionflags=flags)
context = {'np': numpy, 'pd': pandas}
error_msgs = ''
for test in finder.find(self.raw_doc, self.method_name, globs=context):
f = StringIO()
runner.run(test, out=f.write)
error_msgs += f.getvalue()
return error_msgs
def get_api_items():
api_fname = os.path.join(BASE_PATH, 'doc', 'source', 'api.rst')
previous_line = current_section = current_subsection = ''
position = None
with open(api_fname) as f:
for line in f:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set('-'):
current_section = previous_line
continue
if set(line) == set('~'):
current_subsection = previous_line
continue
if line.startswith('.. currentmodule::'):
current_module = line.replace('.. currentmodule::', '').strip()
continue
if line == '.. autosummary::':
position = 'autosummary'
continue
if position == 'autosummary':
if line == '':
position = 'items'
continue
if position == 'items':
if line == '':
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split('.'):
func = getattr(func, part)
yield ('.'.join([current_module, item]), func,
current_section, current_subsection)
previous_line = line
def _csv_row(func_name, func_obj, section, subsection, in_api, seen={}):
obj_type = type(func_obj).__name__
original_callable = _to_original_callable(func_obj)
if original_callable is None:
return [func_name, obj_type] + [''] * 12, ''
else:
doc = Docstring(func_name, original_callable)
key = doc.source_file_name, doc.source_file_def_line
shared_code = seen.get(key, '')
return [func_name,
obj_type,
in_api,
int(doc.deprecated),
section,
subsection,
doc.source_file_name,
doc.source_file_def_line,
doc.github_url,
int(bool(doc.summary)),
int(bool(doc.extended_summary)),
int(doc.correct_parameters),
int(bool(doc.examples)),
shared_code], key
def validate_all():
writer = csv.writer(sys.stdout)
cols = ('Function or method',
'Type',
'In API doc',
'Is deprecated',
'Section',
'Subsection',
'File',
'Code line',
'GitHub link',
'Has summary',
'Has extended summary',
'Parameters ok',
'Has examples',
'Shared code with')
writer.writerow(cols)
seen = {}
api_items = list(get_api_items())
for func_name, func, section, subsection in api_items:
row, key = _csv_row(func_name, func, section, subsection,
in_api=1, seen=seen)
seen[key] = func_name
writer.writerow(row)
api_item_names = set(list(zip(*api_items))[0])
for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel):
for member in inspect.getmembers(class_):
func_name = 'pandas.{}.{}'.format(class_.__name__, member[0])
if (not member[0].startswith('_') and
func_name not in api_item_names):
func = _load_obj(func_name)
row, key = _csv_row(func_name, func, section='', subsection='',
in_api=0)
writer.writerow(row)
return 0
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated
Returns
-------
int
The number of errors found in the `func_name` docstring
"""
func_obj = _load_obj(func_name)
doc = Docstring(func_name, func_obj)
sys.stderr.write(_output_header('Docstring ({})'.format(func_name)))
sys.stderr.write('{}\n'.format(doc.clean_doc))
errs = []
wrns = []
if doc.start_blank_lines != 1:
errs.append('Docstring text (summary) should start in the line '
'immediately after the opening quotes (not in the same '
'line, or leaving a blank line in between)')
if doc.end_blank_lines != 1:
errs.append('Closing quotes should be placed in the line after '
'the last text in the docstring (do not close the '
'quotes in the same line as the text, or leave a '
'blank line between the last text and the quotes)')
if doc.double_blank_lines:
errs.append('Use only one blank line to separate sections or '
'paragraphs')
if not doc.summary:
errs.append('No summary found (a short summary in a single line '
'should be present at the beginning of the docstring)')
else:
if not doc.summary[0].isupper():
errs.append('Summary does not start with a capital letter')
if doc.summary[-1] != '.':
errs.append('Summary does not end with a period')
if (doc.is_function_or_method and
doc.summary.split(' ')[0][-1] == 's'):
errs.append('Summary must start with infinitive verb, '
'not third person (e.g. use "Generate" instead of '
'"Generates")')
if doc.num_summary_lines > 1:
errs.append("Summary should fit in a single line.")
if not doc.extended_summary:
wrns.append('No extended summary found')
param_errs = doc.parameter_mismatches
for param in doc.doc_parameters:
if not param.startswith("*"): # Check can ignore var / kwargs
if not doc.parameter_type(param):
param_errs.append('Parameter "{}" has no type'.format(param))
else:
if doc.parameter_type(param)[-1] == '.':
param_errs.append('Parameter "{}" type should '
'not finish with "."'.format(param))
if not doc.parameter_desc(param):
param_errs.append('Parameter "{}" '
'has no description'.format(param))
else:
if not doc.parameter_desc(param)[0].isupper():
param_errs.append('Parameter "{}" description '
'should start with a '
'capital letter'.format(param))
if doc.parameter_desc(param)[-1] != '.':
param_errs.append('Parameter "{}" description '
'should finish with "."'.format(param))
if param_errs:
errs.append('Errors in parameters section')
for param_err in param_errs:
errs.append('\t{}'.format(param_err))
if doc.is_function_or_method:
if not doc.returns and "return" in doc.method_source:
errs.append('No Returns section found')
if not doc.yields and "yield" in doc.method_source:
errs.append('No Yields section found')
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append('Private classes ({}) should not be mentioned in public '
'docstring.'.format(mentioned_errs))
if not doc.see_also:
wrns.append('See Also section not found')
else:
for rel_name, rel_desc in doc.see_also.items():
if not rel_desc:
errs.append('Missing description for '
'See Also "{}" reference'.format(rel_name))
for line in doc.raw_doc.splitlines():
if re.match("^ *\t", line):
errs.append('Tabs found at the start of line "{}", '
'please use whitespace only'.format(line.lstrip()))
examples_errs = ''
if not doc.examples:
wrns.append('No examples section found')
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append('Examples do not pass tests')
sys.stderr.write(_output_header('Validation'))
if errs:
sys.stderr.write('Errors found:\n')
for err in errs:
sys.stderr.write('\t{}\n'.format(err))
if wrns:
sys.stderr.write('Warnings found:\n')
for wrn in wrns:
sys.stderr.write('\t{}\n'.format(wrn))
if not errs:
sys.stderr.write('Docstring for "{}" correct. :)\n'.format(func_name))
if examples_errs:
sys.stderr.write(_output_header('Doctests'))
sys.stderr.write(examples_errs)
return len(errs)
def main(function):
if function is None:
return validate_all()
else:
return validate_one(function)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='validate pandas docstrings')
argparser.add_argument('function',
nargs='?',
default=None,
help=('function or method to validate '
'(e.g. pandas.DataFrame.head) '
'if not provided, all docstrings '
'are validated'))
args = argparser.parse_args()
sys.exit(main(args.function))
| 33.713775 | 79 | 0.566699 | [
"BSD-3-Clause"
] | Anjali2019/pandas | scripts/validate_docstrings.py | 18,846 | Python |
import itertools
def turn(face, dir):
nface=face[:]
if dir==1:
nface=[0, face[2],face[6],face[3],face[1],face[5],face[4]]
if dir==2:
nface=[0, face[3],face[2],face[6],face[4],face[1],face[5]]
if dir==3:
nface=[0, face[4],face[1],face[3],face[6],face[5],face[2]]
if dir==4:
nface=[0, face[5],face[2],face[1],face[4],face[6],face[3]]
return nface
def dfs(si, nowface):
global link, visited
result=True
visited[si]=True
if nowface[1] != si:
return False
for dir in range(1,5):
if link[si][dir] and not visited[link[si][dir]]:
face = turn(nowface, dir)
result = result and dfs(link[si][dir], face)
return result
x=[[0]*8]
for i in range(6):
x.append([0] + list(map(int,input().split())) + [0])
x.append([0]*8)
link=[[None]*5 for i in range(10)]
for i in range(1, 7):
for j in range(1, 7):
if x[i][j]:
if x[i-1][j]: link[x[i][j]][1]=x[i-1][j]
if x[i+1][j]: link[x[i][j]][3]=x[i+1][j]
if x[i][j-1]: link[x[i][j]][4]=x[i][j-1]
if x[i][j+1]: link[x[i][j]][2]=x[i][j+1]
for i in itertools.permutations(map(int,'123456'), 6):
face=list((0,)+i)
visited=[0]*7
if dfs(face[1], face) and sum(visited)>=6:
print(face[6])
exit(0)
print(0) | 26.490196 | 66 | 0.510733 | [
"MIT"
] | njw1204/BOJ-AC | problem/01000~09999/02642/2642.py3.py | 1,351 | Python |
import subprocess
import math
import os
from pipes import quote
import platform
class Sorolla:
"""
Main class which will launch ImageMagick commands to apply selected
transformations to the given images.
It needs ImageMagick & GhostScript installed in the system and in PATH
to work properly
"""
@staticmethod
def scale_resource(source_file, dest_file, scale):
"""
Scales a resource; detects if it's a nine-patch via filename in order
to scale it properly
Arguments:
source_file Source file to convert. Path can be relative or
absolute
dest_file Destination file where the converted file will be
saved. Path can be relative or absolute
scale Scale value as a float. If it's greater than zero, the
function upscales the image; if less than zero,
it downscales the image
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
# Default base density in dpi, set by Imagemagick
base_pdf_density_dpi = 72
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
density = int(scale * base_pdf_density_dpi)
# Scales a vector resource to the desired density
command = 'convert -background transparent -density {0} {1} {2}'
command = command.format(
density,
Sorolla._shellquote(source_file),
Sorolla._shellquote(dest_file),
)
else:
# Resource defined as nine-patch
# Attributes used in Imagemagick command
imagemagick_scale = scale * 100
border_size = math.ceil(scale)
# The following ImageMagick command works as follows (each step
# generates a temporary image)
#
# 0. Tell convert the image that we're going to use, and that
# we want a transparent background
# 1. Create a copy of (0) with our base density (72 DPI)
# 2. Remove 9-patch border from (1) and replace it with
# color
# 3. Mix (1) & (2) so that 9-patch borders are extracted from
# the transparent original image
# 4. Resize (3) to 'imagemagick_scale'. We get scaled 9-patch
# borders, but there will be semi-transparent pixels
# 5. Apply a threshold in (4)'s alpha channel so we can make
# semi-transparent pixels fully opaque
# 6-7. Same process as in 2-3 to extract a bigger 9-patch
# border
# 8-12. Process to adjust the 9-patch border in (7) so we don't
# leave extra space between the border & the image
# 13. Create a raster of the original image (0), keeping
# original quality if PDF or SVG
# 14. Remove 9-patch border of (13) depending on the scale used
# 15. Merge (14) with (12) so we finally have the result
# 9-patch for the given dpi scale
# 16. Delete all generated files in each step
#
# There might be some pixel data loss in ldpi & hdpi
# resolutions as they use float scales to resize the source
# files
#
# In order to debug the process, copy the command to your
# console, remove the 'delete' parenthesis block and add
# '-append' before the destination file. This'll generate a
# .png with all the image steps described by the commands
command = 'convert {0} -background transparent '\
'\( +clone -density {1} \) '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) '\
'\( -clone 1 +clone -compose ChangeMask -composite -compose Over \) '\
'\( +clone -resize {2}%% \) '\
'\( +clone -channel A -threshold 50%% +channel \) '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) ' \
'\( -clone 5 +clone -compose ChangeMask -composite -compose Over \) '\
'\( -clone 7 -repage +{3}+0 -background none -flatten \) '\
'\( -clone 7 -repage +0+{3} -background none -flatten \) '\
'\( -clone 7 -repage -{3}+0 -background none -flatten \) '\
'\( -clone 7 -repage +0-{3} -background none -flatten \) '\
'\( -clone 8 -clone 9 -compose Over -composite -clone 10 -composite -clone 11 -composite -shave {3}x{3} \) '\
'\( -clone 0 -scale {2}% \) '\
'\( +clone -shave {4}x{4} -bordercolor transparent -border 1x1 \) '\
'\( +clone -clone 12 -composite \) '\
'\( -delete 0-14 \) '\
'{5}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
base_pdf_density_dpi,
imagemagick_scale,
border_size - 1,
border_size,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.errno, e.strerror
return False
@staticmethod
def color_resource(source_file, dest_file, fill_color):
"""
Colors a raster resource; detects if it's a nine-patch via filename in
order to scale it properly
Arguments:
source_file Source file to color. Path can be relative or
absolute
dest_file Destination file where the colored file will be
saved. Path can be relative or absolute
fill_color Color to fill the resource. Must be a RRGGBB string.
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
command = 'convert -background transparent {0} +level-colors "#{1}", '\
'{2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
fill_color,
Sorolla._shellquote(os.path.abspath(dest_file)),
)
else:
# nine-patch
command = 'convert -background transparent {0} '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 +level-colors "#{1}", \) '\
'\( -clone 0 +clone -composite \) '\
'\( -delete 0-1 \) '\
'{2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
fill_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.value
return False
@staticmethod
def tint_resource(source_file, dest_file, tint_color):
"""
Tints a gray-scaled raster resource; detects if it's a nine-patch via
filename in order to tint it properly
Arguments:
source_file Source file to tint. Path can be relative or
absolute
dest_file Destination file where the tinted file will be
saved. Path can be relative or absolute
fill_color Color to tint the resource. Must be a RRGGBB string.
Returns:
Whether the action could be run or not
"""
if not Sorolla._check_needed_commands:
return False
try:
command = ""
if ".9." not in source_file:
# Not a resource identified as nine-patch
# Check http://www.imagemagick.org/Usage/color_mods/#tint_overlay
command = 'convert -background transparent {0} '\
'\( +clone +matte -fill "#{1}" -colorize 100%% +clone +swap -compose overlay -composite \) '\
'-compose SrcIn -composite {2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
tint_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
else:
# nine-patch
command = 'convert -background transparent {0} '\
'\( +clone -shave 1x1 -bordercolor transparent -border 1x1 \) '\
'\( +clone +matte -fill "#{1}" -colorize 100%% \) '\
'\( -clone 0 +clone -compose overlay -composite \) '\
'\( -clone 0 +clone -compose SrcIn -composite \) '\
'\( -delete 0-3 \) {2}'.format(
Sorolla._shellquote(
os.path.abspath(source_file)),
tint_color,
Sorolla._shellquote(os.path.abspath(dest_file))
)
return Sorolla._run_command(command)
except Exception as e:
print e.value
return False
@staticmethod
def _run_command(command):
"""
Runs a given ImageMagick command
"""
# Windows check; remove escape sequences from parentheses so cmd can
# properly launch the command
if Sorolla._is_windows():
command = command.replace('\\(', '(').replace('\\)', ')')
return subprocess.call(command, shell=True) == 0
@staticmethod
def _shellquote(s):
"""
Util method to escape data in order to use it in shell commands
"""
# return "'" + s.replace("'", "'\\''") + "'"
# Windows check
if not Sorolla._is_windows():
return quote(s)
else:
return '"{0}"'.format(s)
@staticmethod
def _check_command(command, args=[]):
"""
Checks if a command can be executed in the file-system
"""
devnull = open(os.devnull, 'w')
try:
status = subprocess.call(
[command] + args, stdout=devnull, stderr=devnull)
return status == 0
except Exception as e:
print e
return False
@staticmethod
def _check_needed_commands():
"""
Check needed commands: ImageMagick's convert & GhostScript
"""
# Imagemagick check
if not Sorolla._check_command("convert"):
print "Imagemagick is not installed"
return False
# Ghostscript check
if not Sorolla._check_command("gs", ["-version"]):
print "GhostScript is not installed"
return False
return True
@staticmethod
def _is_windows():
"""
Check if the current platform is Windows
"""
return platform.uname()[0].find("Win") != -1
| 41.522968 | 129 | 0.512722 | [
"Apache-2.0"
] | bq/sorolla | sorolla/sorolla.py | 11,751 | Python |
#%%
# Our numerical workhorses
import numpy as np
import pandas as pd
import itertools
# Import libraries to parallelize processes
from joblib import Parallel, delayed
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as sns
# Pickle is useful for saving outputs that are computationally expensive
# to obtain every time
import pickle
import os
import glob
import git
# Import the project utils
import ccutils
#%%
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Read MaxEnt distributions
print('Reading MaxEnt distributions')
df_maxEnt_prot = pd.read_csv(
"../../data/csv_maxEnt_dist/MaxEnt_Lagrange_mult_protein_IPTG_range.csv"
)
# Define dictionaries to map operator to binding energy and rbs to rep copy
op_dict = dict(zip(["O1", "O2", "O3"], [-15.3, -13.9, -9.7]))
rbs_dict = dict(
zip(
["HG104", "RBS1147", "RBS446", "RBS1027", "RBS1", "RBS1L"],
[22, 60, 124, 260, 1220, 1740],
)
)
# Define sample space
mRNA_space = np.array([0])
protein_space = np.arange(0, 1.5E4)
# Group df_maxEnt by operator and repressor copy number
df_group = df_maxEnt_prot.groupby(["operator", "repressor"])
# Define column names for data frame
names = ["operator", "binding_energy", "repressor", "channcap", "pc"]
# Initialize data frame to save channel capacity computations
df_channcap = pd.DataFrame(columns=names)
# Define function to compute in parallel the channel capacity
def cc_parallel_protein(df_lagrange):
# Build mRNA transition matrix
Qpc = ccutils.channcap.trans_matrix_maxent(
df_lagrange,
mRNA_space,
protein_space,
False)
# Compute the channel capacity with the Blahut-Arimoto algorithm
cc_p, pc, _ = ccutils.channcap.channel_capacity(Qpc.T, epsilon=1e-4)
# Extract operator and repressor copy number
op = df_lagrange.operator.unique()[0]
eRA = df_lagrange.binding_energy.unique()[0]
rep = df_lagrange.repressor.unique()[0]
return [op, eRA, rep, cc_p, pc]
print('Running Blahut algorithm in multiple cores')
# Run the function in parallel
ccaps = Parallel(n_jobs=6)(
delayed(cc_parallel_protein)(df_lagrange)
for group, df_lagrange in df_group
)
# Convert to tidy data frame
ccaps = pd.DataFrame(ccaps, columns=names)
# Concatenate to data frame
df_channcap = pd.concat([df_channcap, ccaps], axis=0)
# Save results
print('Saving results into memory')
df_channcap.to_csv(
f"{homedir}/data/csv_maxEnt_dist/chann_cap_multi_prom_protein_IPTG_range.csv",
index=False,
)
print('Done!')
| 26.742574 | 82 | 0.733062 | [
"MIT"
] | RPGroup-PBoC/chann_cap | src/theory/scripts/channcap_protein_multi_prom_iptg_range.py | 2,701 | Python |
from settings import settings
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.caml_query import CamlQuery
from office365.sharepoint.client_context import ClientContext
list_title = "Survey"
view_title = "All Responses"
def print_list_views(ctx):
"""Read list view by title example"""
list_object = ctx.web.lists.get_by_title(list_title)
views = list_object.views
ctx.load(views)
ctx.execute_query()
for view in views:
# print "View title: {0}".format(view.Properties["Title"])
cur_view_title = view.properties["Title"]
cur_view = views.get_by_title(cur_view_title)
ctx.load(cur_view)
ctx.execute_query()
print("View title: {0}".format(cur_view.properties["Title"]))
def print_view_items(ctx):
"""Example demonstrates how to retrieve View items"""
list_object = ctx.web.lists.get_by_title(list_title)
# 1.get View query
view = list_object.views.get_by_title(view_title)
ctx.load(view, ["ViewQuery"])
ctx.execute_query()
# 2.get View fields
view_fields = view.view_fields
ctx.load(view_fields)
ctx.execute_query()
# 3.get items for View query
qry = CamlQuery()
qry.ViewXml = "<View><Where>{0}</Where></View>".format(view.properties["ViewQuery"])
items = list_object.get_items(qry)
ctx.load(items)
ctx.execute_query()
for item in items:
print("Item title: {0}".format(item.properties["Title"]))
if __name__ == '__main__':
ctx_auth = AuthenticationContext(url=settings['url'])
if ctx_auth.acquire_token_for_user(username=settings['user_credentials']['username'],
password=settings['user_credentials']['password']):
ctx = ClientContext(settings['url'], ctx_auth)
# print_list_views(ctx)
print_view_items(ctx)
else:
print(ctx_auth.get_last_error())
| 31.387097 | 90 | 0.684995 | [
"MIT"
] | Aisbergg/Office365-REST-Python-Client | examples/sharepoint/view_operations.py | 1,946 | Python |
from transform import Transform
import tensorflow as tf
class StyleTransferTester:
def __init__(self, session, content_image, model_path):
# session
self.sess = session
# input images
self.x0 = content_image
# input model
self.model_path = model_path
# image transform network
self.transform = Transform()
# build graph for style transfer
self._build_graph()
def _build_graph(self):
# graph input
self.x = tf.placeholder(tf.float32, shape=self.x0.shape, name='input')
self.xi = tf.expand_dims(self.x, 0) # add one dim for batch
# result image from transform-net
self.y_hat = self.transform.net(self.xi/255.0)
self.y_hat = tf.squeeze(self.y_hat) # remove one dim for batch
self.y_hat = tf.clip_by_value(self.y_hat, 0., 255.)
def test(self):
# initialize parameters
self.sess.run(tf.global_variables_initializer())
# load pre-trained model
saver = tf.train.Saver()
saver.restore(self.sess, self.model_path)
# get transformed image
output = self.sess.run(self.y_hat, feed_dict={self.x: self.x0})
return output
| 23.396226 | 78 | 0.625806 | [
"Apache-2.0"
] | altonelli/tensorflow-fast-style-transfer | style_transfer_tester.py | 1,240 | Python |
from django.db import models
from .validators import validate_resume_ext
class Resume(models.Model):
name = models.CharField(max_length = 20)
phone = models.IntegerField()
email = models.EmailField()
resume = models.FileField(upload_to='resume/%Y/%m/%d/', validators=[validate_resume_ext])
uploaded_at = models.DateTimeField(auto_now_add=True)
#Add name, phone number and email fields | 36.909091 | 93 | 0.753695 | [
"MIT"
] | asisbagga-dev/RapidezWriter | rapidez/resumeAnalysis/models.py | 406 | Python |
from asyncio import iscoroutinefunction
from types import MethodType
from typing import Any
__all__ = ("VirtualStep",)
class VirtualStep:
def __init__(self, orig_step: MethodType) -> None:
self._orig_step: MethodType = orig_step
@property
def name(self) -> str:
return self._orig_step.__name__
def is_coro(self) -> bool:
return iscoroutinefunction(self._orig_step)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self._orig_step(*args, **kwargs)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._orig_step!r})"
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and (self.__dict__ == other.__dict__)
| 27.555556 | 86 | 0.677419 | [
"Apache-2.0"
] | nikitanovosibirsk/vedro | vedro/_core/_virtual_step.py | 744 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "SeqAL"
copyright = "2020, Xu Liang"
author = "Xu Liang"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_parser",
]
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 33.438596 | 79 | 0.659496 | [
"MIT"
] | BrambleXu/SeqAL | docs/source/conf.py | 1,906 | Python |
import unittest
from gen_bst_seq import gen_bst_seq, Node
class Test_Case_Gen_Bst_Seq(unittest.TestCase):
def test_gen_bst_seq(self):
root = Node(2)
root.left_child = Node(1)
root.right_child = Node(3)
root.right_child.left_child = Node(4)
ans = gen_bst_seq(root)
self.assertListEqual(ans, [[2, 1, 3, 4], [2, 3, 4, 1]]) | 33.818182 | 63 | 0.647849 | [
"Apache-2.0"
] | angelusualle/algorithms | cracking_the_coding_interview_qs/4.9/get_bst_seq_test.py | 372 | Python |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
raise Exception("Test files can't be run directly. Use `python -m pytest greenery`")
import pytest
from greenery.fsm import fsm, null, epsilon, anything_else
def test_addbug():
# Odd bug with fsm.__add__(), exposed by "[bc]*c"
int5A = fsm(
alphabet = {"a", "b", "c", anything_else},
states = {0, 1},
initial = 1,
finals = {1},
map = {
0: {anything_else: 0, "a": 0, "b": 0, "c": 0},
1: {anything_else: 0, "a": 0, "b": 1, "c": 1},
}
)
assert int5A.accepts("")
int5B = fsm(
alphabet = {"a", "b", "c", anything_else},
states = {0, 1, 2},
initial = 1,
finals = {0},
map = {
0: {anything_else: 2, "a": 2, "b": 2, "c": 2},
1: {anything_else: 2, "a": 2, "b": 2, "c": 0},
2: {anything_else: 2, "a": 2, "b": 2, "c": 2},
}
)
assert int5B.accepts("c")
int5C = int5A + int5B
assert int5C.accepts("c")
# assert int5C.initial == 0
def test_builtins():
assert not null("a").accepts("a")
assert epsilon("a").accepts("")
assert not epsilon("a").accepts("a")
@pytest.fixture
def a():
a = fsm(
alphabet = {"a", "b"},
states = {0, 1, "ob"},
initial = 0,
finals = {1},
map = {
0 : {"a" : 1 , "b" : "ob"},
1 : {"a" : "ob", "b" : "ob"},
"ob" : {"a" : "ob", "b" : "ob"},
},
)
return a
def test_a(a):
assert not a.accepts("")
assert a.accepts("a")
assert not a.accepts("b")
@pytest.fixture
def b():
b = fsm(
alphabet = {"a", "b"},
states = {0, 1, "ob"},
initial = 0,
finals = {1},
map = {
0 : {"a" : "ob", "b" : 1 },
1 : {"a" : "ob", "b" : "ob"},
"ob" : {"a" : "ob", "b" : "ob"},
},
)
return b
def test_b(b):
assert not b.accepts("")
assert not b.accepts("a")
assert b.accepts("b")
def test_concatenation_aa(a):
concAA = a + a
assert not concAA.accepts("")
assert not concAA.accepts("a")
assert concAA.accepts("aa")
assert not concAA.accepts("aaa")
concAA = epsilon({"a", "b"}) + a + a
assert not concAA.accepts("")
assert not concAA.accepts("a")
assert concAA.accepts("aa")
assert not concAA.accepts("aaa")
def test_concatenation_ab(a, b):
concAB = a + b
assert not concAB.accepts("")
assert not concAB.accepts("a")
assert not concAB.accepts("b")
assert not concAB.accepts("aa")
assert concAB.accepts("ab")
assert not concAB.accepts("ba")
assert not concAB.accepts("bb")
def test_alternation_a(a):
altA = a | null({"a", "b"})
assert not altA.accepts("")
assert altA.accepts("a")
def test_alternation_ab(a, b):
altAB = a | b
assert not altAB.accepts("")
assert altAB.accepts("a")
assert altAB.accepts("b")
assert not altAB.accepts("aa")
assert not altAB.accepts("ab")
assert not altAB.accepts("ba")
assert not altAB.accepts("bb")
def test_star(a):
starA = a.star()
assert starA.accepts("")
assert starA.accepts("a")
assert not starA.accepts("b")
assert starA.accepts("aaaaaaaaa")
def test_multiply_0(a):
zeroA = a * 0
assert zeroA.accepts("")
assert not zeroA.accepts("a")
def test_multiply_1(a):
oneA = a * 1
assert not oneA.accepts("")
assert oneA.accepts("a")
assert not oneA.accepts("aa")
def test_multiply_2(a):
twoA = a * 2
assert not twoA.accepts("")
assert not twoA.accepts("a")
assert twoA.accepts("aa")
assert not twoA.accepts("aaa")
def test_multiply_7(a):
sevenA = a * 7
assert not sevenA.accepts("aaaaaa")
assert sevenA.accepts("aaaaaaa")
assert not sevenA.accepts("aaaaaaaa")
def test_optional_mul(a, b):
unit = a + b
# accepts "ab"
optional = (epsilon(a.alphabet) | unit)
# accepts "(ab)?
assert optional.accepts([])
assert not optional.accepts(["a"])
assert not optional.accepts(["b"])
assert optional.accepts(["a", "b"])
assert not optional.accepts(["a", "a"])
optional = optional * 2
# accepts "(ab)?(ab)?"
assert optional.accepts([])
assert not optional.accepts(["a"])
assert not optional.accepts(["b"])
assert not optional.accepts(["a", "a"])
assert optional.accepts(["a", "b"])
assert not optional.accepts(["b", "a"])
assert not optional.accepts(["b", "b"])
assert not optional.accepts(["a", "a", "a"])
assert optional.accepts(["a", "b", "a", "b"])
def test_intersection_ab(a, b):
intAB = a & b
assert not intAB.accepts("")
assert not intAB.accepts("a")
assert not intAB.accepts("b")
def test_negation(a):
everythingbutA = a.everythingbut()
assert everythingbutA.accepts("")
assert not everythingbutA.accepts("a")
assert everythingbutA.accepts("b")
assert everythingbutA.accepts("aa")
assert everythingbutA.accepts("ab")
def test_crawl_reduction():
# this is "0*1" in heavy disguise. crawl should resolve this duplication
# Notice how states 2 and 3 behave identically. When resolved together,
# states 1 and 2&3 also behave identically, so they, too should be resolved
# (this is impossible to spot before 2 and 3 have been combined).
# Finally, the oblivion state should be omitted.
merged = fsm(
alphabet = {"0", "1"},
states = {1, 2, 3, 4, "oblivion"},
initial = 1,
finals = {4},
map = {
1 : {"0" : 2 , "1" : 4 },
2 : {"0" : 3 , "1" : 4 },
3 : {"0" : 3 , "1" : 4 },
4 : {"0" : "oblivion", "1" : "oblivion"},
"oblivion" : {"0" : "oblivion", "1" : "oblivion"},
}
).reduce()
assert len(merged.states) == 2
def test_bug_28():
# This is (ab*)* and it caused some defects.
abstar = fsm(
alphabet = {'a', 'b'},
states = {0, 1},
initial = 0,
finals = {1},
map = {
0: {'a': 1},
1: {'b': 1}
}
)
assert abstar.accepts("a")
assert not abstar.accepts("b")
assert abstar.accepts("ab")
assert abstar.accepts("abb")
abstarstar = abstar.star()
assert abstarstar.accepts("a")
assert not abstarstar.accepts("b")
assert abstarstar.accepts("ab")
assert not abstar.star().accepts("bb")
def test_star_advanced():
# This is (a*ba)*. Naively connecting the final states to the initial state
# gives the incorrect result here.
starred = fsm(
alphabet = {"a", "b"},
states = {0, 1, 2, "oblivion"},
initial = 0,
finals = {2},
map = {
0 : {"a" : 0 , "b" : 1 },
1 : {"a" : 2 , "b" : "oblivion"},
2 : {"a" : "oblivion", "b" : "oblivion"},
"oblivion" : {"a" : "oblivion", "b" : "oblivion"},
}
).star()
assert starred.alphabet == frozenset(["a", "b"])
assert starred.accepts("")
assert not starred.accepts("a")
assert not starred.accepts("b")
assert not starred.accepts("aa")
assert starred.accepts("ba")
assert starred.accepts("aba")
assert starred.accepts("aaba")
assert not starred.accepts("aabb")
assert starred.accepts("abababa")
def test_reduce():
# FSM accepts no strings but has 3 states, needs only 1
asdf = fsm(
alphabet = {None},
states = {0, 1, 2},
initial = 0,
finals = {1},
map = {
0 : {None : 2},
1 : {None : 2},
2 : {None : 2},
},
)
asdf = asdf.reduce()
assert len(asdf.states) == 1
def test_reverse_abc():
abc = fsm(
alphabet = {"a", "b", "c"},
states = {0, 1, 2, 3, None},
initial = 0,
finals = {3},
map = {
0 : {"a" : 1 , "b" : None, "c" : None},
1 : {"a" : None, "b" : 2 , "c" : None},
2 : {"a" : None, "b" : None, "c" : 3 },
3 : {"a" : None, "b" : None, "c" : None},
None : {"a" : None, "b" : None, "c" : None},
},
)
cba = reversed(abc)
assert cba.accepts("cba")
def test_reverse_brzozowski():
# This is (a|b)*a(a|b)
brzozowski = fsm(
alphabet = {"a", "b"},
states = {"A", "B", "C", "D", "E"},
initial = "A",
finals = {"C", "E"},
map = {
"A" : {"a" : "B", "b" : "D"},
"B" : {"a" : "C", "b" : "E"},
"C" : {"a" : "C", "b" : "E"},
"D" : {"a" : "B", "b" : "D"},
"E" : {"a" : "B", "b" : "D"},
},
)
assert brzozowski.accepts("aa")
assert brzozowski.accepts("ab")
assert brzozowski.accepts("aab")
assert brzozowski.accepts("bab")
assert brzozowski.accepts("abbbbbbbab")
assert not brzozowski.accepts("")
assert not brzozowski.accepts("a")
assert not brzozowski.accepts("b")
assert not brzozowski.accepts("ba")
assert not brzozowski.accepts("bb")
assert not brzozowski.accepts("bbbbbbbbbbbb")
# So this is (a|b)a(a|b)*
b2 = reversed(brzozowski)
assert b2.accepts("aa")
assert b2.accepts("ba")
assert b2.accepts("baa")
assert b2.accepts("bab")
assert b2.accepts("babbbbbbba")
assert not b2.accepts("")
assert not b2.accepts("a")
assert not b2.accepts("b")
assert not b2.accepts("ab")
assert not b2.accepts("bb")
assert not b2.accepts("bbbbbbbbbbbb")
# Test string generator functionality.
gen = b2.strings()
assert next(gen) == ["a", "a"]
assert next(gen) == ["b", "a"]
assert next(gen) == ["a", "a", "a"]
assert next(gen) == ["a", "a", "b"]
assert next(gen) == ["b", "a", "a"]
assert next(gen) == ["b", "a", "b"]
assert next(gen) == ["a", "a", "a", "a"]
def test_reverse_epsilon():
# epsilon reversed is epsilon
assert reversed(epsilon("a")).accepts("")
def test_binary_3():
# Binary numbers divisible by 3.
# Disallows the empty string
# Allows "0" on its own, but not leading zeroes.
div3 = fsm(
alphabet = {"0", "1"},
states = {"initial", "zero", 0, 1, 2, None},
initial = "initial",
finals = {"zero", 0},
map = {
"initial" : {"0" : "zero", "1" : 1 },
"zero" : {"0" : None , "1" : None},
0 : {"0" : 0 , "1" : 1 },
1 : {"0" : 2 , "1" : 0 },
2 : {"0" : 1 , "1" : 2 },
None : {"0" : None , "1" : None},
},
)
assert not div3.accepts("")
assert div3.accepts("0")
assert not div3.accepts("1")
assert not div3.accepts("00")
assert not div3.accepts("01")
assert not div3.accepts("10")
assert div3.accepts("11")
assert not div3.accepts("000")
assert not div3.accepts("001")
assert not div3.accepts("010")
assert not div3.accepts("011")
assert not div3.accepts("100")
assert not div3.accepts("101")
assert div3.accepts("110")
assert not div3.accepts("111")
assert not div3.accepts("0000")
assert not div3.accepts("0001")
assert not div3.accepts("0010")
assert not div3.accepts("0011")
assert not div3.accepts("0100")
assert not div3.accepts("0101")
assert not div3.accepts("0110")
assert not div3.accepts("0111")
assert not div3.accepts("1000")
assert div3.accepts("1001")
def test_invalid_fsms():
# initial state 1 is not a state
try:
fsm(
alphabet = {},
states = {},
initial = 1,
finals = set(),
map = {}
)
assert False
except AssertionError:
assert False
except Exception:
pass
# final state 2 not a state
try:
fsm(
alphabet = {},
states = {1},
initial = 1,
finals = {2},
map = {}
)
assert False
except AssertionError:
assert False
except Exception:
pass
# invalid transition for state 1, symbol "a"
try:
fsm(
alphabet = {"a"},
states = {1},
initial = 1,
finals = set(),
map = {
1 : {"a" : 2}
}
)
assert False
except AssertionError:
assert False
except Exception:
pass
def test_bad_multiplier(a):
try:
x = a * -1
assert False
except AssertionError:
assert False
except Exception:
pass
def test_anything_else_acceptance():
a = fsm(
alphabet = {"a", "b", "c", anything_else},
states = {1},
initial = 1,
finals = {1},
map = {
1 : {"a" : 1, "b" : 1, "c" : 1, anything_else : 1}
},
)
assert a.accepts("d")
def test_difference(a, b):
aorb = fsm(
alphabet = {"a", "b"},
states = {0, 1, None},
initial = 0,
finals = {1},
map = {
0 : {"a" : 1 , "b" : 1 },
1 : {"a" : None, "b" : None},
None : {"a" : None, "b" : None},
},
)
assert list((a ^ a).strings()) == []
assert list((b ^ b).strings()) == []
assert list((a ^ b).strings()) == [["a"], ["b"]]
assert list((aorb ^ a).strings()) == [["b"]]
def test_empty(a, b):
assert not a.empty()
assert not b.empty()
assert fsm(
alphabet = {},
states = {0, 1},
initial = 0,
finals = {1},
map = {0:{}, 1:{}},
).empty()
assert not fsm(
alphabet = {},
states = {0},
initial = 0,
finals = {0},
map = {0:{}},
).empty()
assert fsm(
alphabet = {"a", "b"},
states = {0, 1, None, 2},
initial = 0,
finals = {2},
map = {
0 : {"a" : 1 , "b" : 1 },
1 : {"a" : None, "b" : None},
None : {"a" : None, "b" : None},
2 : {"a" : None, "b" : None},
},
).empty()
def test_equivalent(a, b):
assert (a | b).equivalent(b | a)
def test_dead_default():
'''
You may now omit a transition, or even an entire state, from the map. This
affects every usage of `fsm.map`.
'''
blockquote = fsm(
alphabet = {"/", "*", anything_else},
states = {0, 1, 2, 3, 4, 5},
initial = 0,
finals = {4},
map = {
0 : {"/" : 1},
1 : {"*" : 2},
2 : {"/" : 2, anything_else : 2, "*" : 3},
3 : {"/" : 4, anything_else : 2, "*" : 3},
}
)
assert blockquote.accepts(["/", "*", "whatever", "*", "/"])
assert not blockquote.accepts(["*", "*", "whatever", "*", "/"])
str(blockquote) # test stringification
blockquote | blockquote
blockquote & blockquote
blockquote ^ blockquote
reversed(blockquote)
assert not blockquote.everythingbut().accepts(["/", "*", "whatever", "*", "/"])
assert blockquote.everythingbut().accepts(["*"]) # deliberately seek oblivion
assert blockquote.islive(3)
assert blockquote.islive(4)
assert not blockquote.islive(5)
gen = blockquote.strings()
assert next(gen) == ["/", "*", "*", "/"]
def test_alphabet_unions():
# Thanks to sparse maps it should now be possible to compute the union of FSMs
# with disagreeing alphabets!
a = fsm(
alphabet = {"a"},
states = {0, 1},
initial = 0,
finals = {1},
map = {
0 : {"a" : 1},
},
)
b = fsm(
alphabet = {"b"},
states = {0, 1},
initial = 0,
finals = {1},
map = {
0 : {"b" : 1},
},
)
assert (a | b).accepts(["a"])
assert (a | b).accepts(["b"])
assert (a & b).empty()
assert (a + b).accepts(["a", "b"])
assert (a ^ b).accepts(["a"])
assert (a ^ b).accepts(["b"])
def test_repr():
assert repr(anything_else) == "anything_else"
assert str(anything_else) == "anything_else"
def test_new_set_methods(a, b):
# A whole bunch of new methods were added to the FSM module to enable FSMs to
# function exactly as if they were sets of strings (symbol lists), see:
# https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset
# But do they work?
assert len(a) == 1
assert len((a | b) * 4) == 16
try:
len(a.star())
assert False
except OverflowError:
pass
# "in"
assert "a" in a
assert not "a" in b
assert "a" not in b
# List comprehension!
four = (a | b) * 2
for string in four:
assert string == ["a", "a"]
break
assert [s for s in four] == [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]]
# set.union() imitation
assert fsm.union(a, b) == a.union(b)
assert len(fsm.union()) == 0
assert fsm.intersection(a, b) == a.intersection(b)
# This takes a little explaining. In general, `a & b & c` is equivalent to
# `EVERYTHING & a & b & c` where `EVERYTHING` is an FSM accepting every
# possible string. Similarly `a` is equivalent to `EVERYTHING & a`, and the
# intersection of no sets at all is... `EVERYTHING`.
# However, since we compute the union of alphabets, and there are no
# alphabets, the union is the empty set. So the only string which `EVERYTHING`
# actually recognises is the empty string, [] (or "" if you prefer).
int_none = fsm.intersection()
assert len(int_none) == 1
assert [] in int_none
assert (a | b).difference(a) == fsm.difference((a | b), a) == (a | b) - a == b
assert (a | b).difference(a, b) == fsm.difference((a | b), a, b) == (a | b) - a - b == null("ab")
assert a.symmetric_difference(b) == fsm.symmetric_difference(a, b) == a ^ b
assert a.isdisjoint(b)
assert a <= (a | b)
assert a < (a | b)
assert a != (a | b)
assert (a | b) > a
assert (a | b) >= a
assert list(a.concatenate(a, a).strings()) == [["a", "a", "a"]]
assert list(a.concatenate().strings()) == [["a"]]
assert list(fsm.concatenate(b, a, b).strings()) == [["b", "a", "b"]]
assert list(fsm.concatenate().strings()) == []
assert not a.copy() is a
def test_oblivion_crawl(a):
# When crawling a new FSM, we should avoid generating an oblivion state.
# `abc` has no oblivion state... all the results should not as well!
abc = fsm(
alphabet = {"a", "b", "c"},
states = {0, 1, 2, 3},
initial = 0,
finals = {3},
map = {
0 : {"a" : 1},
1 : {"b" : 2},
2 : {"c" : 3},
}
)
assert len((abc + abc).states) == 7
assert len(abc.star().states) == 3
assert len((abc * 3).states) == 10
assert len(reversed(abc).states) == 4
assert len((abc | abc).states) == 4
assert len((abc & abc).states) == 4
assert len((abc ^ abc).states) == 1
assert len((abc - abc).states) == 1
def test_concatenate_bug(a):
# This exposes a defect in fsm.concatenate.
assert fsm.concatenate(a, epsilon({"a"}), a).accepts("aa")
assert fsm.concatenate(a, epsilon({"a"}), epsilon({"a"}), a).accepts("aa")
def test_derive(a, b):
# Just some basic tests because this is mainly a regex thing.
assert a.derive("a") == epsilon({"a", "b"})
assert a.derive("b") == null({"a", "b"})
try:
a.derive("c")
assert False
except KeyError:
assert True
assert (a * 3).derive("a") == a * 2
assert (a.star() - epsilon({"a", "b"})).derive("a") == a.star()
def test_bug_36():
etc1 = fsm(
alphabet = {anything_else},
states = {0},
initial = 0,
finals = {0},
map = {
0: {
anything_else: 0
}
}
)
etc2 = fsm(
alphabet = {'s', anything_else},
states = {0, 1},
initial = 0,
finals = {1},
map = {
0: {
's': 1
},
1: {
's': 1,
anything_else: 1
}
}
)
both = etc1 & etc2
assert etc1.accepts(["s"])
assert etc2.accepts(["s"])
assert both.alphabet == {anything_else, "s"}
assert both.accepts(["s"])
def test_add_anything_else():
fsm1=fsm( # [^a]
alphabet={"a",anything_else},
states={0,1},
initial=0,
finals={1},
map={0:{anything_else:1}}
)
fsm2=fsm( # [^b]
alphabet={"b",anything_else},
states={0,1},
initial=0,
finals={1},
map={0:{anything_else:1}}
)
assert (fsm1+fsm2).accepts("ba")
| 24.898352 | 98 | 0.576906 | [
"MIT"
] | doni69/greenery | greenery/fsm_test.py | 18,126 | Python |
"""Tools for constructing domains for expressions. """
from sympy.core import sympify
from sympy.core.evalf import pure_complex
from sympy.polys.domains import ZZ, QQ, ZZ_I, QQ_I, EX
from sympy.polys.domains.complexfield import ComplexField
from sympy.polys.domains.realfield import RealField
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import parallel_dict_from_basic
from sympy.utilities import public
def _construct_simple(coeffs, opt):
"""Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """
rationals = floats = complexes = algebraics = False
float_numbers = []
if opt.extension is True:
is_algebraic = lambda coeff: coeff.is_number and coeff.is_algebraic
else:
is_algebraic = lambda coeff: False
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
if algebraics:
# there are both reals and algebraics -> EX
return False
else:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex:
complexes = True
x, y = is_complex
if x.is_Rational and y.is_Rational:
if not (x.is_Integer and y.is_Integer):
rationals = True
continue
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
if is_algebraic(coeff):
if floats:
# there are both algebraics and reals -> EX
return False
algebraics = True
else:
# this is a composite domain, e.g. ZZ[X], EX
return None
# Use the maximum precision of all coefficients for the RR or CC
# precision
max_prec = max(c._prec for c in float_numbers) if float_numbers else 53
if algebraics:
domain, result = _construct_algebraic(coeffs, opt)
else:
if floats and complexes:
domain = ComplexField(prec=max_prec)
elif floats:
domain = RealField(prec=max_prec)
elif rationals or opt.field:
domain = QQ_I if complexes else QQ
else:
domain = ZZ_I if complexes else ZZ
result = [domain.from_sympy(coeff) for coeff in coeffs]
return domain, result
def _construct_algebraic(coeffs, opt):
"""We know that coefficients are algebraic so construct the extension. """
from sympy.polys.numberfields import primitive_element
result, exts = [], set()
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
g, span, H = primitive_element(exts, ex=True, polys=True)
root = sum([ s*ext for s, ext in zip(span, exts) ])
domain, g = QQ.algebraic_field((g, root)), g.rep.rep
for i, (coeff, a, b) in enumerate(result):
if coeff is not None:
coeff = a*domain.dtype.from_list(H[exts.index(coeff)], g, QQ) + b
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return domain, result
def _construct_composite(coeffs, opt):
"""Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """
numers, denoms = [], []
for coeff in coeffs:
numer, denom = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting
if not gens:
return None
if opt.composite is None:
if any(gen.is_number and gen.is_algebraic for gen in gens):
return None # generators are number-like so lets better use EX
all_symbols = set()
for gen in gens:
symbols = gen.free_symbols
if all_symbols & symbols:
return None # there could be algebraic relations between generators
else:
all_symbols |= symbols
n = len(gens)
k = len(polys)//2
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
fractions, zeros = False, (0,)*n
for denom in denoms:
if len(denom) > 1 or zeros not in denom:
fractions = True
break
coeffs = set()
if not fractions:
for numer, denom in zip(numers, denoms):
denom = denom[zeros]
for monom, coeff in numer.items():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for numer, denom in zip(numers, denoms):
coeffs.update(list(numer.values()))
coeffs.update(list(denom.values()))
rationals = floats = complexes = False
float_numbers = []
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex is not None:
complexes = True
x, y = is_complex
if x.is_Rational and y.is_Rational:
if not (x.is_Integer and y.is_Integer):
rationals = True
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
max_prec = max(c._prec for c in float_numbers) if float_numbers else 53
if floats and complexes:
ground = ComplexField(prec=max_prec)
elif floats:
ground = RealField(prec=max_prec)
elif complexes:
if rationals:
ground = QQ_I
else:
ground = ZZ_I
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if not fractions:
domain = ground.poly_ring(*gens)
for numer in numers:
for monom, coeff in numer.items():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for numer, denom in zip(numers, denoms):
for monom, coeff in numer.items():
numer[monom] = ground.from_sympy(coeff)
for monom, coeff in denom.items():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return domain, result
def _construct_expression(coeffs, opt):
"""The last resort case, i.e. use the expression domain. """
domain, result = EX, []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
@public
def construct_domain(obj, **args):
"""Construct a minimal domain for the list of coefficients. """
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
if not obj:
monoms, coeffs = [], []
else:
monoms, coeffs = list(zip(*list(obj.items())))
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = list(map(sympify, coeffs))
result = _construct_simple(coeffs, opt)
if result is not None:
if result is not False:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
else:
if opt.composite is False:
result = None
else:
result = _construct_composite(coeffs, opt)
if result is not None:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return domain, dict(list(zip(monoms, coeffs)))
else:
return domain, coeffs
else:
return domain, coeffs[0]
| 28.723333 | 83 | 0.555182 | [
"BSD-3-Clause"
] | ABKor752/sympy | sympy/polys/constructor.py | 8,617 | Python |
import keras
import os
from keras import losses
from keras.models import Model
from keras.layers import Input,merge, concatenate, Conv2D, MaxPooling2D, Activation, UpSampling2D,Dropout,Conv2DTranspose,add,multiply,Flatten,Dense
from keras.layers.normalization import BatchNormalization as bn
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import RMSprop
from keras import regularizers
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import numpy as np
import cv2
class finalNetwork:
def __init__(self, images_dir, clustering_dir, classfication_dir, output_dir = None):
"""
thie function initializes the network class
:param images_dir:
:param clustering_dir:
:param classfication_dir:
"""
self.images_dir = images_dir
self.clustering_dir = clustering_dir
self.classification_dir = classfication_dir
self.model = None
self.output_dir = output_dir
self.model_file_name = 'finalModel.h5'
def load_model(self):
"""
this function loads model from file
"""
if os.path.isfile(self.model_file_name):
self.model = keras.models.load_model(self.model_file_name)
def UNet(self,input_shape,learn_rate=1e-3):
l2_lambda = 0.0002
DropP = 0.3
kernel_size=3
inputs = Input(input_shape)
conv1a = Conv2D( 12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(inputs)
conv1a = bn()(conv1a)
conv1b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(conv1a)
conv1b = bn()(conv1b)
merge1=concatenate([conv1a,conv1b])
conv1c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
conv1c = bn()(conv1c)
merge2=concatenate([conv1a,conv1b,conv1c])
conv1d = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
conv1d = bn()(conv1d)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1d)
pool1 = Dropout(DropP)(pool1)
#############################
conv2a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(pool1)
conv2a = bn()(conv2a)
conv2b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(conv2a)
conv2b = bn()(conv2b)
merge1=concatenate([conv2a,conv2b])
conv2c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
conv2c = bn()(conv2c)
merge2=concatenate([conv2a,conv2b,conv2c])
conv2d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
conv2d = bn()(conv2d)
merge3=concatenate([conv2a,conv2b,conv2c,conv2d])
conv2e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
conv2e = bn()(conv2e)
merge4=concatenate([conv2a,conv2b,conv2c,conv2d,conv2e])
conv2f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
conv2f = bn()(conv2f)
merge5=concatenate([conv2a,conv2b,conv2c,conv2d,conv2e,conv2f])
conv2g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
conv2g = bn()(conv2g)
merge6=concatenate([conv2a,conv2b,conv2c,conv2d,conv2e,conv2f,conv2g])
conv2h = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
conv2h = bn()(conv2h)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2h)
pool2 = Dropout(DropP)(pool2)
#############################
conv3a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(pool2)
conv3a = bn()(conv3a)
conv3b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(conv3a)
conv3b = bn()(conv3b)
merge1=concatenate([conv3a,conv3b])
conv3c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
conv3c = bn()(conv3c)
merge2=concatenate([conv3a,conv3b,conv3c])
conv3d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
conv3d = bn()(conv3d)
merge3=concatenate([conv3a,conv3b,conv3c,conv3d])
conv3e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
conv3e = bn()(conv3e)
merge4=concatenate([conv3a,conv3b,conv3c,conv3d,conv3e])
conv3f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
conv3f = bn()(conv3f)
merge5=concatenate([conv3a,conv3b,conv3c,conv3d,conv3e,conv3f])
conv3g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
conv3g = bn()(conv3g)
merge6=concatenate([conv3a,conv3b,conv3c,conv3d,conv3e,conv3f,conv3g])
conv3h = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
conv3h = bn()(conv3h)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3h)
pool3 = Dropout(DropP)(pool3)
#############################
conv4a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(pool3)
conv4a = bn()(conv4a)
conv4b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(conv4a)
conv4b = bn()(conv4b)
merge1=concatenate([conv4a,conv4b])
conv4c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
conv4c = bn()(conv4c)
merge2=concatenate([conv4a,conv4b,conv4c])
conv4d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
conv4d = bn()(conv4d)
merge3=concatenate([conv4a,conv4b,conv4c,conv4d])
conv4e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
conv4e = bn()(conv4e)
merge4=concatenate([conv4a,conv4b,conv4c,conv4d,conv4e])
conv4f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
conv4f = bn()(conv4f)
merge5=concatenate([conv4a,conv4b,conv4c,conv4d,conv4e,conv4f])
conv4g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
conv4g = bn()(conv4g)
merge6=concatenate([conv4a,conv4b,conv4c,conv4d,conv4e,conv4f,conv4g])
conv4h = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
conv4h = bn()(conv4h)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4h)
pool4 = Dropout(DropP)(pool4)
#############################
conv5a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(pool4)
conv5a = bn()(conv5a)
conv5b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(conv5a)
conv5b = bn()(conv5b)
merge1=concatenate([conv5a,conv5b])
conv5c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
conv5c = bn()(conv5c)
merge2=concatenate([conv5a,conv5b,conv5c])
conv5d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
conv5d = bn()(conv5d)
merge3=concatenate([conv5a,conv5b,conv5c,conv5d])
conv5e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
conv5e = bn()(conv5e)
merge4=concatenate([conv5a,conv5b,conv5c,conv5d,conv5e])
conv5f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
conv5f = bn()(conv5f)
merge5=concatenate([conv5a,conv5b,conv5c,conv5d,conv5e,conv5f])
conv5g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
conv5g = bn()(conv5g)
merge6=concatenate([conv5a,conv5b,conv5c,conv5d,conv5e,conv5f,conv5g])
conv5h = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
conv5h = bn()(conv5h)
flatten_block=Flatten()(conv5h)
#####################################
#branch 2
inputtwo=Input(shape=(1,), dtype='float32',name='inputtwo')
#xmerge1=concatenate([flatten_block,inputtwo])
#####################################
#branch 3
xinputtwo=Input(shape=(1000,), dtype='float32',name='xinputtwo')
xlayerone=Dense(32, activation='relu' )(xinputtwo)
xlayertwo=Dense(64,activation='relu' )(xlayerone)
xlayerthree=Dense(128,activation='relu' )(xlayertwo)
xlayerfour=Dense(256,activation='relu' )(xlayerthree)
########################################
final_merge=concatenate([flatten_block,inputtwo,xlayerfour])
#mixing the input of the three branches
after_merger_layers_1=Dense(32,activation='relu' )(final_merge)
after_merger_layers_2=Dense(64,activation='relu' )(after_merger_layers_1)
after_merger_layers_3=Dense(128,activation='relu' )(after_merger_layers_2)
after_merger_layers_4=Dense(256,activation='relu' )(after_merger_layers_3)
final_op=Dense(15000, activation='softmax',name='final_op')(after_merger_layers_4)
model = Model(inputs=[inputs,inputtwo,xinputtwo], outputs=final_op)
model.compile(optimizer='adagrad',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
return model
def train(self):
"""
this function trains the final network
:return:
"""
self.load_model()
if self.model is None:
self.model = self.UNet(input_shape=(64,64,3))
print(self.model.summary())
for k in range(0, 4):
for i in range(0, 14):
print(i)
X_train = np.load(os.path.join ( self.images_dir , "X_"+str(i)+".npy"))
X1_train = np.load(os.path.join(self.clustering_dir, "train_X_"+str(i)+".npy"))
X2_train = np.load(os.path.join(self.classification_dir, "train_X_"+str(i)+".npy"))
X_train = X_train.astype('float32')
X1_train = X1_train.astype('float32')
X2_train = X2_train.astype('float32')
#X_train=X_train.reshape(X_train.shape+(1,))
y_train = np.load(os.path.join(self.images_dir, "y_"+str(i)+".npy"))#.reshape(X_train.shape)
y_train = keras.utils.to_categorical(y_train, 15000)
self.model.fit([X_train, X1_train, X2_train], [y_train],
batch_size=64,
nb_epoch=1,
shuffle=True)
self.model.save('final_net_dsp.h5')
def predict(self):
"""
this function runs the prediction over the sets
:return:
"""
if self.model is None:
self.load_model()
if self.model is None:
return None
i =0
X_train = np.load(os.path.join(self.images_dir, "X_" + str(i) + ".npy"))
X1_train = np.load(os.path.join(self.clustering_dir, "train_X_" + str(i) + ".npy"))
X2_train = np.load(os.path.join(self.classification_dir, "train_X_" + str(i) + ".npy"))
predicted = self.model.predict([X_train, X1_train, X2_train], batch_size=20)
return predicted
| 35.4975 | 148 | 0.610747 | [
"MIT"
] | dsp-uga/rope | src/models/final_model.py | 14,199 | Python |
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
| 34.480769 | 88 | 0.725739 | [
"Apache-2.0"
] | akravacyber/kale | backend/kale/tests/assets/kfp_dsl/simple_data_passing.py | 7,172 | Python |
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class LuminosityEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, ieos, fext, intc, tke_diss, bconv, tconv, data_prefix):
super(LuminosityEquation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
yzn0 = self.getRAdata(eht, 'yzn0')
zzn0 = self.getRAdata(eht, 'zzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
tt = self.getRAdata(eht, 'tt')[intc]
cp = self.getRAdata(eht, 'cp')[intc]
gg = self.getRAdata(eht, 'gg')[intc]
abar = self.getRAdata(eht, 'abar')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
dduy = self.getRAdata(eht, 'dduy')[intc]
dduz = self.getRAdata(eht, 'dduz')[intc]
ddttux = self.getRAdata(eht, 'ddttux')[intc]
dduxttx = self.getRAdata(eht, 'dduxttx')[intc]
dduytty = self.getRAdata(eht, 'dduytty')[intc]
dduzttz = self.getRAdata(eht, 'dduzttz')[intc]
eiuxddx = self.getRAdata(eht, 'eiuxddx')[intc]
eiuyddy = self.getRAdata(eht, 'eiuyddy')[intc]
eiuzddz = self.getRAdata(eht, 'eiuzddz')[intc]
dduxux = self.getRAdata(eht, 'dduxux')[intc]
dduyuy = self.getRAdata(eht, 'dduyuy')[intc]
dduzuz = self.getRAdata(eht, 'dduzuz')[intc]
dduxuy = self.getRAdata(eht, 'dduxuy')[intc]
dduxuz = self.getRAdata(eht, 'dduxuz')[intc]
ddekux = self.getRAdata(eht, 'ddekux')[intc]
ddek = self.getRAdata(eht, 'ddek')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
eiux = self.getRAdata(eht, 'eiux')[intc]
ddetux = self.getRAdata(eht, 'ddetux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
dddivu = self.getRAdata(eht, 'dddivu')[intc]
uxdivu = self.getRAdata(eht, 'uxdivu')[intc]
ppux = self.getRAdata(eht, 'ppux')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
chim = self.getRAdata(eht, 'chim')[intc]
chit = self.getRAdata(eht, 'chit')[intc]
chid = self.getRAdata(eht, 'chid')[intc]
gamma1 = self.getRAdata(eht, 'gamma1')[intc]
gascon = 8.3144629e7 # gas constant in cgs
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht, 'cp')[intc]
cv = self.getRAdata(eht, 'cv')[intc]
gamma1 = cp / cv # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
# print(gamma1)
# print("-----------")
# print((gamma1/(gamma1-1.))*gascon/abar)
# print("-----------")
# print(cp)
##########################
# HSSE LUMINOSITY EQUATION
##########################
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_tt = self.getRAdata(eht, 'tt')
t_pp = self.getRAdata(eht, 'pp')
t_ddei = self.getRAdata(eht, 'ddei')
t_ddss = self.getRAdata(eht, 'ddss')
t_ddtt = self.getRAdata(eht, 'ddtt')
t_ddux = self.getRAdata(eht, 'ddux')
t_dduy = self.getRAdata(eht, 'dduy')
t_dduz = self.getRAdata(eht, 'dduz')
t_dduxux = self.getRAdata(eht, 'dduxux')
t_dduyuy = self.getRAdata(eht, 'dduyuy')
t_dduzuz = self.getRAdata(eht, 'dduzuz')
t_uxux = self.getRAdata(eht, 'uxux')
t_uyuy = self.getRAdata(eht, 'uyuy')
t_uzuz = self.getRAdata(eht, 'uzuz')
t_fht_ek = 0.5 * (t_dduxux + t_dduyuy + t_dduzuz) / t_dd
t_fht_ei = t_ddei / t_dd
t_fht_et = t_fht_ek + t_fht_ei
t_fht_ss = t_ddss / t_dd
t_fht_ux = t_ddux / t_dd
t_fht_uy = t_dduy / t_dd
t_fht_uz = t_dduz / t_dd
t_fht_ui_fht_ui = t_fht_ux * t_fht_ux + t_fht_uy * t_fht_uy + t_fht_uz * t_fht_uz
t_fht_tt = t_ddtt/t_dd
# t_mm = self.getRAdata(eht,'mm'))
# minus_dt_mm = -self.dt(t_mm,xzn0,t_timec,intc)
# fht_ux = minus_dt_mm/(4.*np.pi*(xzn0**2.)*dd)
# construct equation-specific mean fields
# fht_ek = 0.5*(dduxux + dduyuy + dduzuz)/dd
fht_ek = ddek / dd
fht_ux = ddux / dd
fht_uy = dduy / dd
fht_uz = dduz / dd
fht_ei = ddei / dd
fht_et = fht_ek + fht_ei
fht_enuc = (ddenuc1 + ddenuc2) / dd
fht_eiux = ddeiux/dd
fei = ddeiux - ddux * ddei / dd
fekx = ddekux - fht_ux * fht_ek
fpx = ppux - pp * ux
fekx = ddekux - fht_ux * fht_ek
fht_ui_fht_ui = fht_ux * fht_ux + fht_uy * fht_uy + fht_uz * fht_uz
if self.ig == 1: # Kippenhahn and Weigert, page 38
alpha = 1.
delta = 1.
phi = 1.
elif self.ig == 2:
alpha = 1. / chid
delta = -chit / chid
phi = chid / chim
fht_rxx = dduxux - ddux * ddux / dd
fdil = (uxdivu - ux * divu)
gg = -gg
if self.ig == 1:
surface = (yzn0[-1] - yzn0[0]) * (zzn0[-1] - zzn0[0])
elif self.ig == 2:
# sphere surface
surface = +4. * np.pi * (xzn0 ** 2.)
else:
print("ERROR(Properties.py): " + self.errorGeometry(self.ig))
sys.exit()
####################################
# STANDARD LUMINOSITY EQUATION EXACT
####################################
self.minus_cp_rho_dTdt = -cp*(self.dt(t_ddtt, xzn0, t_timec, intc) + self.Div(ddttux,xzn0) - (dduxttx + dduytty + dduzttz))
self.plus_delta_dPdt = +delta * self.dt(t_pp, xzn0, t_timec, intc)
self.minus_dd_div_eiui = -(self.Div(ddeiux, xzn0) - (eiuxddx + eiuyddy + eiuzddz))
#self.minus_dd_div_eiui = -(self.Div(ddeiux, xzn0))
self.plus_tke_diss = +tke_diss
self.minus_resLumExactEquation = -(self.minus_cp_rho_dTdt+self.plus_delta_dPdt+self.minus_dd_div_eiui+self.plus_tke_diss)
########################################
# END STANDARD LUMINOSITY EQUATION EXACT
########################################
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.fht_et = fht_ei + fht_ek
self.nx = nx
self.bconv = bconv
self.tconv = tconv
self.fext = fext
def plot_et(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot mean total energy stratification in the model"""
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.fht_et
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r'total energy')
plt.plot(grd1, plt1, color='brown', label=r'$\widetilde{\varepsilon}_t$')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_t$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_t$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_et.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_et.eps')
def plot_luminosity_equation_exact(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot luminosity equation in the model"""
# load x GRID
grd1 = self.xzn0
rhs0 = self.minus_cp_rho_dTdt
rhs1 = self.plus_delta_dPdt
rhs2 = self.minus_dd_div_eiui
rhs3 = self.plus_tke_diss
res = self.minus_resLumExactEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [rhs0, rhs1, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
self.bconv = 4.e8
self.tconv = 1.2e9
xlimitrange = np.where((grd1 > self.bconv) & (grd1 < self.tconv))
xlimitbottom = np.where(grd1 < self.bconv)
xlimittop = np.where(grd1 > self.tconv)
# plot DATA
plt.title("standard luminosity equation exact")
if self.ig == 1:
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label=r"$-c_P \overline{\rho \partial_t T}$")
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y',label = r"$+\delta \overline{\partial_t P}$")
plt.plot(grd1[xlimitrange], rhs2[xlimitrange], color='r',label = r"$-\overline{\rho \nabla \cdot \epsilon_I {\bf u}}$")
plt.plot(grd1[xlimitrange], rhs3[xlimitrange], color='g',label = r"$+\varepsilon_K$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N$")
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label="zero")
elif self.ig == 2:
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label=r"$-c_P \rho dT/dt$")
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y',label = r"$+\delta dP/dt$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N$")
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label="zero")
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg g$^{-1}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg g$^{-1}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10}, ncol = 2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'standard_luminosity_exact_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'standard_luminosity_exact_eq.eps')
| 35.538922 | 131 | 0.563353 | [
"BSD-2-Clause"
] | mmicromegas/ransX | CANUTO1997/LuminosityEquation.py | 11,870 | Python |
import numpy as np
class Graph():
""" The Graph to model the skeletons extracted by the openpose
Args:
strategy (string): must be one of the follow candidates
- uniform: Uniform Labeling
- distance: Distance Partitioning
- spatial: Spatial Configuration
For more information, please refer to the section 'Partition Strategies'
in our paper (https://arxiv.org/abs/1801.07455).
layout (string): must be one of the follow candidates
- openpose: Is consists of 18 joints. For more information, please
refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output
- ntu-rgb+d: Is consists of 25 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
max_hop (int): the maximal distance between two connected nodes
dilation (int): controls the spacing between the kernel points
"""
def __init__(self,
layout='openpose',
strategy='uniform',
max_hop=1,
dilation=1):
self.max_hop = max_hop
self.dilation = dilation
self.get_edge(layout)
self.hop_dis = get_hop_distance(
self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
return self.A
def get_edge(self, layout):
if layout == 'openpose':
self.num_node = 18
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12,
11),
(10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1),
(0, 1), (15, 0), (14, 0), (17, 15), (16, 14)]
self.edge = self_link + neighbor_link
self.center = 1
elif layout == 'ntu-rgb+d':
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21),
(6, 5), (7, 6), (8, 7), (9, 21), (10, 9),
(11, 10), (12, 11), (13, 1), (14, 13), (15, 14),
(16, 15), (17, 1), (18, 17), (19, 18), (20, 19),
(22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
elif layout == 'ntu_edge':
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6),
(8, 7), (9, 2), (10, 9), (11, 10), (12, 11),
(13, 1), (14, 13), (15, 14), (16, 15), (17, 1),
(18, 17), (19, 18), (20, 19), (21, 22), (22, 8),
(23, 24), (24, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 2
# elif layout=='customer settings':
# pass
else:
raise ValueError("Do Not Exist This Layout.")
#计算邻接矩阵A
def get_adjacency(self, strategy):
valid_hop = range(0, self.max_hop + 1, self.dilation) #range(start,stop,step)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[self.hop_dis == hop] = 1
normalize_adjacency = normalize_digraph(adjacency)
if strategy == 'uniform':
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif strategy == 'distance':
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for i, hop in enumerate(valid_hop):
A[i][self.hop_dis == hop] = normalize_adjacency[self.hop_dis ==
hop]
self.A = A
elif strategy == 'spatial':
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if self.hop_dis[j, i] == hop:
if self.hop_dis[j, self.center] == self.hop_dis[
i, self.center]:
a_root[j, i] = normalize_adjacency[j, i]
elif self.hop_dis[j, self.
center] > self.hop_dis[i, self.
center]:
a_close[j, i] = normalize_adjacency[j, i]
else:
a_further[j, i] = normalize_adjacency[j, i]
if hop == 0:
A.append(a_root)
else:
A.append(a_root + a_close)
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise ValueError("Do Not Exist This Strategy")
# 此函数的返回值hop_dis就是图的邻接矩阵
def get_hop_distance(num_node, edge, max_hop=1):
A = np.zeros((num_node, num_node))
for i, j in edge:
A[j, i] = 1
A[i, j] = 1
# compute hop steps
hop_dis = np.zeros((num_node, num_node)) + np.inf # np.inf 表示一个无穷大的正数
# np.linalg.matrix_power(A, d)求矩阵A的d幂次方,transfer_mat矩阵(I,A)是一个将A矩阵拼接max_hop+1次的矩阵
transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)]
# (np.stack(transfer_mat) > 0)矩阵中大于0的返回Ture,小于0的返回False,最终arrive_mat是一个布尔矩阵,大小与transfer_mat一样
arrive_mat = (np.stack(transfer_mat) > 0)
# range(start,stop,step) step=-1表示倒着取
for d in range(max_hop, -1, -1):
# 将arrive_mat[d]矩阵中为True的对应于hop_dis[]位置的数设置为d
hop_dis[arrive_mat[d]] = d
return hop_dis
# 将矩阵A中的每一列的各个元素分别除以此列元素的形成新的矩阵
def normalize_digraph(A):
Dl = np.sum(A, 0) #将矩阵A压缩成一行
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-1)
AD = np.dot(A, Dn)
return AD
def normalize_undigraph(A):
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-0.5)
DAD = np.dot(np.dot(Dn, A), Dn)
return DAD | 41.91411 | 97 | 0.492096 | [
"MIT"
] | IkeaASM/ikea_asm_dataset | action/pose_based/net/utils/graph.py | 7,104 | Python |
import struct
class PEF:
MAGIC = b'Joy!'
CONT_HEAD_FMT = '>4s4s4s5I2HI'
CONT_HEAD_LEN = struct.calcsize(CONT_HEAD_FMT)
SEC_HEAD_FMT = '>i5I4B'
SEC_HED_LEN = struct.calcsize(SEC_HEAD_FMT)
@classmethod
def read_from(cls, path):
with open(path, 'rb') as f:
return cls(f.read())
def __init__(self, data):
(magic, fourcc, arch, ver,
timestamp, old_def_ver, old_imp_ver, cur_ver,
sec_count, inst_sec_count, reserv) = struct.unpack_from(self.CONT_HEAD_FMT, data)
sec_earliest = len(data)
sec_latest = 0
self.sections = []
self.sectypes = []
self.headeroffsets = []
self.code = None
for i in range(sec_count):
sh_offset = self.CONT_HEAD_LEN + self.SEC_HED_LEN*i
(sectionName, sectionAddress, execSize,
initSize, rawSize, containerOffset,
regionKind, shareKind, alignment, reserved) = struct.unpack_from(self.SEC_HEAD_FMT, data, sh_offset)
the_sec = data[containerOffset : containerOffset + rawSize]
if regionKind == 0 and execSize == initSize == rawSize:
the_sec = bytearray(the_sec)
self.code = the_sec
self.sections.append(the_sec)
self.sectypes.append(regionKind)
self.headeroffsets.append(sh_offset)
sec_earliest = min(sec_earliest, containerOffset)
sec_latest = max(sec_latest, containerOffset + rawSize)
if any(data[sec_latest:]):
print('nonzero trailing data from', hex(sec_latest), 'to', hex(len(data)), ' ... will cause incorrect output')
self.padmult = 1
while len(data) % (self.padmult * 2) == 0:
self.padmult *= 2
self.header = data[:sec_earliest]
def __bytes__(self):
accum = bytearray(self.header)
for i in range(len(self.sections)):
the_sec = self.sections[i]
hoff = self.headeroffsets[i]
while len(accum) % 16:
accum.append(0)
new_off = len(accum)
new_len = len(the_sec)
accum.extend(the_sec)
struct.pack_into('>I', accum, hoff + 20, new_off)
if the_sec is self.code:
for i in range(8, 20, 4):
struct.pack_into('>I', accum, hoff + i, new_len)
while len(accum) % self.padmult != 0:
accum.extend(b'\x00')
return bytes(accum)
def write_to(self, path):
with open(path, 'wb') as f:
f.write(bytes(self))
| 29.359551 | 122 | 0.574436 | [
"MIT"
] | elliotnunn/ToolboxToolbox | pefbinary.py | 2,613 | Python |
import os
from patricesorter._version import __version__
VERSION = __version__
COPYRIGHT = "Copyright (C) 2022"
LICENSE = "MIT"
| 16.25 | 46 | 0.776923 | [
"MIT"
] | PatriceJada/patricesorter | patricesorter/lib/defaults.py | 130 | Python |
from selenium.webdriver.support.select import Select
def get_selected_option(browser, css_selector):
# Takes a css selector for a <select> element and returns the value of
# the selected option
select = Select(browser.find_element_by_css_selector(css_selector))
return select.first_selected_option.get_attribute('value')
| 42.25 | 74 | 0.792899 | [
"MIT"
] | egineering-llc/egat_example_project | tests/test_helpers/selenium_helper.py | 338 | Python |
import sys
import numpy as np
from contextlib import contextmanager
from qtpy.QtGui import QOpenGLBuffer
def setup_vertex_buffer(gl, data, shader, shader_variable):
'Setup a vertex buffer with `data` vertices as `shader_variable` on shader'
vbo = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)
vbo.create()
with bind(vbo):
vertices = np.array(data, np.float32)
count, dim_vertex = vertices.shape
vbo.allocate(vertices.flatten(), vertices.nbytes)
attr_loc = shader.attributeLocation(shader_variable)
shader.enableAttributeArray(attr_loc)
shader.setAttributeBuffer(attr_loc, gl.GL_FLOAT, 0, dim_vertex)
return vbo
def update_vertex_buffer(vbo, data):
'Update a vertex buffer with `data` vertices'
vertices = np.asarray(data, np.float32)
count, dim_vertex = vertices.shape
with bind(vbo):
vbo.allocate(vertices.flatten(), vertices.nbytes)
def copy_data_to_pbo(pbo, data, *, mapped_array=None):
'Allocate or update data stored in a pixel buffer object'
width, height = data.shape
with bind(pbo):
if pbo.isCreated() and mapped_array is not None:
mapped_array[:] = data.reshape((width, height))
return mapped_array
full_size = data.nbytes
pointer_type = np.ctypeslib.ndpointer(
dtype=data.dtype, shape=(width, height), ndim=data.ndim)
pbo.create()
with bind(pbo):
pbo.allocate(data, full_size)
ptr = pbo.map(QOpenGLBuffer.WriteOnly)
assert ptr is not None, 'Failed to map pixel buffer array'
pointer_type = np.ctypeslib.ndpointer(
dtype=data.dtype, shape=(width, height), ndim=data.ndim)
mapped_array = np.ctypeslib.as_array(pointer_type(int(ptr)))
pbo.unmap()
mapped_array[:] = data.reshape((width, height))
return mapped_array
def update_pbo_texture(gl, pbo, texture, *, array_data, texture_format,
source_format, source_type):
'Update a texture associated with a PBO'
width, height = array_data.shape[:2]
if source_format == gl.GL_RGB:
height //= 3
with bind(pbo, texture):
# AreaDetector arrays are not strided
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
# AreaDetector arrays are big endian - so let OpenGL take care of
# byteswapping if that doesn't match up with the system/array
# endianness
# gl.glPixelStorei(gl.GL_UNPACK_SWAP_BYTES,
# int(not array_data.dtype.isnative))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER,
gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER,
gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, texture_format, width, height, 0,
source_format, source_type, None)
@contextmanager
def bind(*objs, args=None):
'Bind all objs (optionally with positional arguments); releases at cleanup'
if args is None:
args = (None for obj in objs)
for obj, arg in zip(objs, args):
if arg is not None:
obj.bind(arg)
else:
obj.bind()
yield
for obj in objs[::-1]:
obj.release()
| 33.418367 | 79 | 0.651908 | [
"BSD-3-Clause"
] | klauer/caproto-image-viewer | caimageviewer/gl_util.py | 3,275 | Python |
import base64 as _base64
import hashlib as _hashlib
import http.server as _BaseHTTPServer
import os as _os
import re as _re
import urllib.parse as _urlparse
import webbrowser as _webbrowser
from http import HTTPStatus as _StatusCodes
from multiprocessing import get_context as _mp_get_context
from urllib.parse import urlencode as _urlencode
import keyring as _keyring
import requests as _requests
from flytekit.loggers import auth_logger
_code_verifier_length = 64
_random_seed_length = 40
_utf_8 = "utf-8"
# Identifies the service used for storing passwords in keyring
_keyring_service_name = "flyteauth"
# Identifies the key used for storing and fetching from keyring. In our case, instead of a username as the keyring docs
# suggest, we are storing a user's oidc.
_keyring_access_token_storage_key = "access_token"
_keyring_refresh_token_storage_key = "refresh_token"
def _generate_code_verifier():
"""
Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:return str:
"""
code_verifier = _base64.urlsafe_b64encode(_os.urandom(_code_verifier_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub(r"[^a-zA-Z0-9_\-.~]+", "", code_verifier)
if len(code_verifier) < 43:
raise ValueError("Verifier too short. number of bytes must be > 30.")
elif len(code_verifier) > 128:
raise ValueError("Verifier too long. number of bytes must be < 97.")
return code_verifier
def _generate_state_parameter():
state = _base64.urlsafe_b64encode(_os.urandom(_random_seed_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub("[^a-zA-Z0-9-_.,]+", "", state)
return code_verifier
def _create_code_challenge(code_verifier):
"""
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:param str code_verifier: represents a code verifier generated by generate_code_verifier()
:return str: urlsafe base64-encoded sha256 hash digest
"""
code_challenge = _hashlib.sha256(code_verifier.encode(_utf_8)).digest()
code_challenge = _base64.urlsafe_b64encode(code_challenge).decode(_utf_8)
# Eliminate invalid characters
code_challenge = code_challenge.replace("=", "")
return code_challenge
class AuthorizationCode(object):
def __init__(self, code, state):
self._code = code
self._state = state
@property
def code(self):
return self._code
@property
def state(self):
return self._state
class OAuthCallbackHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
"""
A simple wrapper around BaseHTTPServer.BaseHTTPRequestHandler that handles a callback URL that accepts an
authorization token.
"""
def do_GET(self):
url = _urlparse.urlparse(self.path)
if url.path.strip("/") == self.server.redirect_path.strip("/"):
self.send_response(_StatusCodes.OK)
self.end_headers()
self.handle_login(dict(_urlparse.parse_qsl(url.query)))
else:
self.send_response(_StatusCodes.NOT_FOUND)
def handle_login(self, data):
self.server.handle_authorization_code(AuthorizationCode(data["code"], data["state"]))
class OAuthHTTPServer(_BaseHTTPServer.HTTPServer):
"""
A simple wrapper around the BaseHTTPServer.HTTPServer implementation that binds an authorization_client for handling
authorization code callbacks.
"""
def __init__(
self,
server_address,
RequestHandlerClass,
bind_and_activate=True,
redirect_path=None,
queue=None,
):
_BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self._redirect_path = redirect_path
self._auth_code = None
self._queue = queue
@property
def redirect_path(self):
return self._redirect_path
def handle_authorization_code(self, auth_code):
self._queue.put(auth_code)
self.server_close()
def handle_request(self, queue=None):
self._queue = queue
return super().handle_request()
class Credentials(object):
def __init__(self, access_token=None):
self._access_token = access_token
@property
def access_token(self):
return self._access_token
class AuthorizationClient(object):
def __init__(
self,
auth_endpoint=None,
token_endpoint=None,
scopes=None,
client_id=None,
redirect_uri=None,
client_secret=None,
):
self._auth_endpoint = auth_endpoint
self._token_endpoint = token_endpoint
self._client_id = client_id
self._scopes = scopes
self._redirect_uri = redirect_uri
self._code_verifier = _generate_code_verifier()
code_challenge = _create_code_challenge(self._code_verifier)
self._code_challenge = code_challenge
state = _generate_state_parameter()
self._state = state
self._credentials = None
self._refresh_token = None
self._headers = {"content-type": "application/x-www-form-urlencoded"}
self._expired = False
self._client_secret = client_secret
self._params = {
"client_id": client_id, # This must match the Client ID of the OAuth application.
"response_type": "code", # Indicates the authorization code grant
"scope": " ".join(s.strip("' ") for s in scopes).strip(
"[]'"
), # ensures that the /token endpoint returns an ID and refresh token
# callback location where the user-agent will be directed to.
"redirect_uri": self._redirect_uri,
"state": state,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
}
# Prefer to use already-fetched token values when they've been set globally.
self._refresh_token = _keyring.get_password(_keyring_service_name, _keyring_refresh_token_storage_key)
access_token = _keyring.get_password(_keyring_service_name, _keyring_access_token_storage_key)
if access_token:
self._credentials = Credentials(access_token=access_token)
def __repr__(self):
return f"AuthorizationClient({self._auth_endpoint}, {self._token_endpoint}, {self._client_id}, {self._scopes}, {self._redirect_uri})"
@property
def has_valid_credentials(self) -> bool:
return self._credentials is not None
@property
def can_refresh_token(self) -> bool:
return self._refresh_token is not None
def start_authorization_flow(self):
# In the absence of globally-set token values, initiate the token request flow
ctx = _mp_get_context("fork")
q = ctx.Queue()
# First prepare the callback server in the background
server = self._create_callback_server()
server_process = ctx.Process(target=server.handle_request, args=(q,))
server_process.daemon = True
server_process.start()
# Send the call to request the authorization code in the background
self._request_authorization_code()
# Request the access token once the auth code has been received.
auth_code = q.get()
server_process.terminate()
self.request_access_token(auth_code)
def _create_callback_server(self):
server_url = _urlparse.urlparse(self._redirect_uri)
server_address = (server_url.hostname, server_url.port)
return OAuthHTTPServer(server_address, OAuthCallbackHandler, redirect_path=server_url.path)
def _request_authorization_code(self):
scheme, netloc, path, _, _, _ = _urlparse.urlparse(self._auth_endpoint)
query = _urlencode(self._params)
endpoint = _urlparse.urlunparse((scheme, netloc, path, None, query, None))
auth_logger.debug(f"Requesting authorization code through {endpoint}")
_webbrowser.open_new_tab(endpoint)
def _initialize_credentials(self, auth_token_resp):
"""
The auth_token_resp body is of the form:
{
"access_token": "foo",
"refresh_token": "bar",
"token_type": "Bearer"
}
"""
response_body = auth_token_resp.json()
if "access_token" not in response_body:
raise ValueError('Expected "access_token" in response from oauth server')
if "refresh_token" in response_body:
self._refresh_token = response_body["refresh_token"]
access_token = response_body["access_token"]
refresh_token = response_body["refresh_token"]
_keyring.set_password(_keyring_service_name, _keyring_access_token_storage_key, access_token)
_keyring.set_password(_keyring_service_name, _keyring_refresh_token_storage_key, refresh_token)
self._credentials = Credentials(access_token=access_token)
def request_access_token(self, auth_code):
if self._state != auth_code.state:
raise ValueError(f"Unexpected state parameter [{auth_code.state}] passed")
self._params.update(
{
"code": auth_code.code,
"code_verifier": self._code_verifier,
"grant_type": "authorization_code",
}
)
resp = _requests.post(
url=self._token_endpoint,
data=self._params,
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
# TODO: handle expected (?) error cases:
# https://auth0.com/docs/flows/guides/device-auth/call-api-device-auth#token-responses
raise Exception(
"Failed to request access token with response: [{}] {}".format(resp.status_code, resp.content)
)
self._initialize_credentials(resp)
def refresh_access_token(self):
if self._refresh_token is None:
raise ValueError("no refresh token available with which to refresh authorization credentials")
resp = _requests.post(
url=self._token_endpoint,
data={"grant_type": "refresh_token", "client_id": self._client_id, "refresh_token": self._refresh_token},
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
self._expired = True
# In the absence of a successful response, assume the refresh token is expired. This should indicate
# to the caller that the AuthorizationClient is defunct and a new one needs to be re-initialized.
_keyring.delete_password(_keyring_service_name, _keyring_access_token_storage_key)
_keyring.delete_password(_keyring_service_name, _keyring_refresh_token_storage_key)
return
self._initialize_credentials(resp)
@property
def credentials(self):
"""
:return flytekit.clis.auth.auth.Credentials:
"""
return self._credentials
@property
def expired(self):
"""
:return bool:
"""
return self._expired
| 36.769481 | 141 | 0.680442 | [
"Apache-2.0"
] | AdrianoKF/flytekit | flytekit/clis/auth/auth.py | 11,325 | Python |
# Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import seafile_api, ccnet_api
from seahub.group.utils import get_group_member_info, is_group_member
from seahub.group.signals import add_user_to_group
from seahub.avatar.settings import AVATAR_DEFAULT_SIZE
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
logger = logging.getLogger(__name__)
class AdminGroupMembers(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
def get(self, request, group_id, format=None):
""" List all group members
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
avatar_size = int(request.GET.get('avatar_size',
AVATAR_DEFAULT_SIZE))
except ValueError:
avatar_size = AVATAR_DEFAULT_SIZE
try:
members = ccnet_api.get_group_members(group_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
group_members_info = []
for m in members:
member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)
group_members_info.append(member_info)
group_members = {
'group_id': group_id,
'group_name': group.group_name,
'members': group_members_info
}
return Response(group_members)
def post(self, request, group_id):
"""
Bulk add group members.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
emails = request.POST.getlist('email', '')
if not emails:
error_msg = 'Email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = {}
result['failed'] = []
result['success'] = []
emails_need_add = []
for email in emails:
try:
User.objects.get(email=email)
except User.DoesNotExist:
result['failed'].append({
'email': email,
'error_msg': 'User %s not found.' % email
})
continue
if is_group_member(group_id, email, in_structure=False):
result['failed'].append({
'email': email,
'error_msg': 'User %s is already a group member.' % email2nickname(email)
})
continue
emails_need_add.append(email)
# Add user to group.
for email in emails_need_add:
try:
ccnet_api.group_add_member(group_id, group.creator_name, email)
member_info = get_group_member_info(request, group_id, email)
result['success'].append(member_info)
except Exception as e:
logger.error(e)
result['failed'].append({
'email': email,
'error_msg': 'Internal Server Error'
})
add_user_to_group.send(sender=None,
group_staff=request.user.username,
group_id=group_id,
added_user=email)
return Response(result)
class AdminGroupMember(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
def put(self, request, group_id, email, format=None):
""" update role of a group member
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
User.objects.get(email=email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if not is_group_member(group_id, email):
error_msg = 'Email %s invalid.' % email
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
is_admin = request.data.get('is_admin', '')
try:
# set/unset a specific group member as admin
if is_admin.lower() == 'true':
ccnet_api.group_set_admin(group_id, email)
elif is_admin.lower() == 'false':
ccnet_api.group_unset_admin(group_id, email)
else:
error_msg = 'is_admin invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
member_info = get_group_member_info(request, group_id, email)
return Response(member_info)
def delete(self, request, group_id, email, format=None):
""" Delete an user from group
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# delete member from group
try:
if not is_group_member(group_id, email):
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if group.creator_name == email:
error_msg = '%s is group owner, can not be removed.' % email
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
ccnet_api.group_remove_member(group_id, group.creator_name, email)
# remove repo-group share info of all 'email' owned repos
seafile_api.remove_group_repos_by_owner(group_id, email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
| 35.405063 | 93 | 0.618401 | [
"Apache-2.0"
] | DMKun/seahub | seahub/api2/endpoints/admin/group_members.py | 8,391 | Python |
import os
import cv2
import time
import argparse
import multiprocessing
import numpy as np
import tools.find_mxnet
import mxnet as mx
import sys
from detect.image_detector import ImageDetector
from symbol.symbol_factory import get_symbol
from utils import WebcamVideoStream
class_names = 'Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, \
Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, \
Turkey, Ukraine, U.A.E., U.K., U.S.A.'
detector = None
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, class_names, thresh, plot_confidence,
nms_thresh=0.5, force_nms=True, nms_topk=400):
if net is not None:
net = get_symbol(net, data_shape, num_classes=len(class_names), nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = ImageDetector(net, prefix, epoch, data_shape, mean_pixels, class_names, thresh,\
plot_confidence, ctx=ctx)
return detector
def process_image(image_frame):
# run detection
detected_img = detector.detect_and_layover_image(image_frame, False)
return detected_img
def parse_args():
parser = argparse.ArgumentParser(description='Detect objects in the live video')
parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced',
help='which network to use')
parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model',
default=1, type=int)
parser.add_argument('--prefix', dest='prefix', help='Trained model prefix',
default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str)
parser.add_argument('--thresh', dest='thresh', help='Threshold of confidence level',
default=0.43, type=float)
parser.add_argument('--plot-prob', dest='plot_prob', help='Should probabilities be printed. (1 = Yes, 0 = No)',
default=1, type=int)
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--data-shape', dest='data_shape', type=int, default=300,
help='set image shape')
parser.add_argument('--class-names', dest='class_names', type=str,
default = class_names, help='string of comma separated names')
parser.add_argument('--force', dest='force_nms', type=bool, default=True,
help='force non-maximum suppression on different class')
parser.add_argument('--has-gpu', dest='gpu', help='GPU device 1 if present else 0',
default=1, type=int)
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=480, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=640, help='Height of the frames in the video stream.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
color_subtract = (args.mean_r, args.mean_g, args.mean_b)
ctx = mx.gpu(0) if args.gpu == 1 else mx.cpu(0)
class_names = [class_name.strip() for class_name in args.class_names.split(',')]
detector = get_detector(args.network, args.prefix, args.epoch, args.data_shape, color_subtract, ctx,
class_names, args.thresh, args.plot_prob, args.nms_thresh, args.force_nms)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
while True:
frame = video_capture.read()
detected_img = process_image(frame)
cv2.imshow('Video', detected_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.stop()
cv2.destroyAllWindows()
| 41.556701 | 112 | 0.711734 | [
"MIT"
] | Prasad9/Detect-Flags-SSD | object_detection_app.py | 4,031 | Python |
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if versions:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = dash_api.neutron.network_list_for_tenant(request,
tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
| 39.541568 | 79 | 0.554634 | [
"Apache-2.0"
] | ChinaMassClouds/horizon | openstack_dashboard/contrib/trove/content/databases/workflows/create_instance.py | 16,647 | Python |
from typing import NamedTuple
from inari._internal import _format
from ward import each, test, using
class Fixtures(NamedTuple):
raw_description: str
attributes: str
result: str
simple = Fixtures(
raw_description="- var (`object`): Target object.",
attributes="",
result="- **var** (`object`): Target object.",
)
attr_and_emphasize = Fixtures(
raw_description="* variables (`list[VariableCollector]`):"
+ " Class properties. **IMPORTANT!**",
attributes="{: #ClassItself }",
result="- **variables**{: #ClassItself } (`list[VariableCollector]`): Class "
+ "properties. **IMPORTANT!**",
)
multiline = Fixtures(
raw_description="* var (`object`): Target object.\n"
+ "+ var (`object`): `backtick`\n\n"
+ "**Returns**\n"
+ "* `str`: Return type.\n",
attributes="",
result="- **var** (`object`): Target object.\n"
+ "- **var** (`object`): `backtick`\n\n"
+ "**Returns**\n"
+ "- `str`: Return type.\n",
)
no_description = Fixtures(
raw_description="* no_description(`str`)",
attributes="",
result="- **no_description** (`str`)",
)
@test(
"Description `` {raw_description} `` with `` {attributes} `` should be modified "
+ "into `` {result} `` ."
)
@using(
raw_description=each(
simple.raw_description,
attr_and_emphasize.raw_description,
multiline.raw_description,
no_description.raw_description,
),
attributes=each(
simple.attributes,
attr_and_emphasize.attributes,
multiline.attributes,
no_description.attributes,
),
result=each(
simple.result,
attr_and_emphasize.result,
multiline.result,
no_description.result,
),
)
def _(raw_description: str, attributes: str, result: str) -> None:
assert _format.modify_attrs(raw_description, attributes) == result
| 27.217391 | 85 | 0.622471 | [
"MIT"
] | tkamenoko/inari | tests/_internal/test_format.py | 1,878 | Python |
from tkinter import *
from time import *
## 전역 변수 선언 부분 ##
fnameList = ["jeju1.gif", "jeju2.gif", "jeju3.gif", "jeju4.gif", "jeju5.gif", "jeju6.gif", "jeju7.gif", "jeju8.gif", "jeju9.gif", "jeju10.gif"]
photoList = [None] * 9
num1,num2,num3 = 0,1,2
## 함수 선언 부분 ##
def clickNext() :
global num1,num2,num3
num1 += 1
num2 += 1
num3 += 1
if num1 > 9 :
num1 = 0
if num2 > 9 :
num2 = 0
if num3 > 9 :
num3 = 0
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
def clickPrev() :
global num1,num2,num3
num1 -= 1
num2 -= 1
num3 -= 1
if num1 < 0 :
num1 = 9
if num2 < 0 :
num2 = 9
if num3 < 0 :
num3 = 9
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
def clickFirst():
global num1,num2,num3
num1,num2,num3 = 0, 9 , 1
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image=photo)
pLabel.image = photo
pLabel1.configure(image=photo1)
pLabel1.image = photo1
pLabel2.configure(image=photo2)
pLabel2.image = photo2
def clickEnd() :
global num1,num2,num3
num1,num2,num3 = 9, 8 ,0
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
## 메인 코드 부분
window = Tk()
window.geometry("730x330")
window.title("사진 앨범 보기")
window.configure(background="white")
btnPrev = Button(window, text = "<< 이전", command = clickPrev, width = 10, background="skyblue")
btnNext = Button(window, text = "다음 >>", command = clickNext, width = 10, background="skyblue")
btnFirst = Button(window, text = "처 음", command = clickFirst, width = 10, background="skyblue")
btnEnd = Button(window, text = "마지막", command = clickEnd, width = 10, background="skyblue")
photo = PhotoImage(file = "gif/" + fnameList[0])
photo = photo.subsample(2,2)
pLabel = Label(window, image = photo)
photo1 = PhotoImage(file = "gif/" + fnameList[9])
photo1 = photo1.subsample(4,4)
pLabel1 = Label(window, image = photo1)
photo2 = PhotoImage(file = "gif/" + fnameList[1])
photo2 = photo2.subsample(4,4)
pLabel2 = Label(window, image = photo2)
btnPrev.place(x = 280, y = 270)
btnNext.place(x = 380, y = 270)
btnFirst.place(x = 180, y = 270)
btnEnd.place(x = 480, y = 270)
pLabel1.place(x = 20, y = 50)
pLabel.place(x = 200, y = 10)
pLabel2.place(x = 545, y = 50)
window.mainloop()
| 27.696296 | 143 | 0.635999 | [
"Apache-2.0"
] | define16/Class | 4-1/WIndow Programing/20180502/p1.py | 3,809 | Python |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import pytest
import asyncio
from unittest.mock import patch, call, MagicMock
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import ReadingsStorageClient, StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.common.audit_logger import AuditLogger
from foglamp.common.storage_client.exceptions import *
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
"""Test the units of purge.py"""
def test_init(self, event_loop):
"""Test that creating an instance of Purge calls init of FoglampProcess and creates loggers"""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__") as mock_process:
with patch.object(logger, "setup") as log:
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
assert isinstance(p, Purge)
assert isinstance(p._audit, AuditLogger)
log.assert_called_once_with("Data Purge")
mock_process.assert_called_once_with()
def test_write_statistics(self, event_loop):
"""Test that write_statistics calls update statistics with defined keys and value increments"""
@asyncio.coroutine
def mock_s_update():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, 'update', return_value=mock_s_update()) as mock_stats_update:
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._storage = mockStorageClient
p.write_statistics(1, 2)
mock_stats_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_set_configuration(self, event_loop):
"""Test that purge's set_configuration returns configuration item with key 'PURGE_READ' """
@asyncio.coroutine
def mock_cm_return():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._storage = MagicMock(spec=StorageClient)
mock_cm = ConfigurationManager(p._storage)
with patch.object(mock_cm, 'create_category', return_value=mock_cm_return()) as mock_create_cat:
with patch.object(mock_cm, 'get_category_all_items', return_value=mock_cm_return()) \
as mock_get_cat:
p.set_configuration()
mock_get_cat.assert_called_once_with('PURGE_READ')
args, kwargs = mock_create_cat.call_args
assert len(args) == 3
assert args[0] == 'PURGE_READ'
@pytest.fixture()
def store_purge(self, **kwargs):
if kwargs.get('age') == '-1' or kwargs.get('size') == '-1':
raise StorageServerError(400, "Bla", "Some Error")
return {"readings": 10, "removed": 1, "unsentPurged": 2, "unsentRetained": 7}
config = {"purgeAgeSize": {"retainUnsent": {"value": "False"}, "age": {"value": "72"}, "size": {"value": "20"}},
"purgeAge": {"retainUnsent": {"value": "False"}, "age": {"value": "72"}, "size": {"value": "0"}},
"purgeSize": {"retainUnsent": {"value": "False"}, "age": {"value": "0"}, "size": {"value": "100"}},
"retainAgeSize": {"retainUnsent": {"value": "True"}, "age": {"value": "72"}, "size": {"value": "20"}},
"retainAge": {"retainUnsent": {"value": "True"}, "age": {"value": "72"}, "size": {"value": "0"}},
"retainSize": {"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "100"}}}
@pytest.mark.parametrize("conf, expected_return, expected_calls", [
(config["purgeAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}),
(config["purgeAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}),
(config["purgeSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}),
(config["retainAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retain'}),
(config["retainAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retain'}),
(config["retainSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retain'})
])
def test_purge_data(self, event_loop, conf, expected_return, expected_calls):
"""Test that purge_data calls Storage's purge with defined configuration"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge) as mock_storage_purge:
with patch.object(audit, 'information', return_value=mock_audit_info()) as audit_info:
# Test the positive case when all if conditions in purge_data pass
assert expected_return == p.purge_data(conf)
assert audit_info.called
args, kwargs = mock_storage_purge.call_args
assert kwargs == expected_calls
@pytest.mark.parametrize("conf, expected_return", [
({"retainUnsent": {"value": "False"}, "age": {"value": "0"}, "size": {"value": "0"}}, (0, 0)),
({"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "0"}}, (0, 0))
])
def test_purge_data_no_data_purged(self, event_loop, conf, expected_return):
"""Test that purge_data logs message when no data was purged"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert expected_return == p.purge_data(conf)
p._logger.info.assert_called_once_with("No rows purged")
@pytest.mark.parametrize("conf, expected_return", [
({"retainUnsent": {"value": "True"}, "age": {"value": "-1"}, "size": {"value": "-1"}}, (0, 0))
])
def test_purge_error_storage_response(self, event_loop, conf, expected_return):
"""Test that purge_data logs error when storage purge returns an error response"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert expected_return == p.purge_data(conf)
@pytest.mark.parametrize("conf, expected_error_key",
[({"retainUnsent": {"value": "True"}, "age": {"value": "bla"}, "size": {"value": "0"}},
"age"),
({"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "bla"}},
"size")])
def test_purge_data_invalid_conf(self, event_loop, conf, expected_error_key):
"""Test that purge_data raises exception when called with invalid configuration"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
# Test the code block when purge failed because of invalid configuration
p.purge_data(conf)
p._logger.error.assert_called_with('Configuration item {} bla should be integer!'.
format(expected_error_key))
def test_run(self, event_loop):
"""Test that run calls all units of purge process"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
config = "Some config"
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config) as mock_set_config:
with patch.object(p, 'purge_data', return_value=(1, 2)) as mock_purge_data:
with patch.object(p, 'write_statistics') as mock_write_stats:
p.run()
# Test the positive case when no error in try block
mock_set_config.assert_called_once_with()
mock_purge_data.assert_called_once_with(config)
mock_write_stats.assert_called_once_with(1, 2)
def test_run_exception(self, event_loop):
"""Test that run calls all units of purge process and checks the exception handling"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
config = "Some config"
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config):
with patch.object(p, 'purge_data', return_value=Exception()):
with patch.object(p, 'write_statistics'):
p.run()
# Test the negative case when function purge_data raise some exception
p._logger.exception.assert_called_once_with("'Exception' object is not iterable")
| 50.726923 | 116 | 0.600804 | [
"Apache-2.0"
] | ThyagOSI/FogLAMP | tests/unit/python/foglamp/tasks/purge/test_purge.py | 13,189 | Python |
T = int(input())
if 1 <= T <= 100:
for i in range(T):
A = int(input())
B = int(input())
C = int(input())
if (1 <= A <= 10 ** 16) and (1 <= B <= 10 ** 16) and (1 <= C <= 10 ** 16):
for x in range(A, B + 1):
if x % C == 0:
print(x)
| 26.25 | 82 | 0.336508 | [
"MIT"
] | thisIsShikhar/subeen_52problemsSolve | Problem 33.py | 315 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.